input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# --------------------------------------------------------
# Pytorch Multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
#from model.nms.nms_wrapper import nms
#from model.roi_layers import nms
from torchvision.ops.boxes import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res50.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def bb_overlap(bb1, bb2):#########################
""" check if overlap"""
#assert bb1[0] < bb1[2]
#assert bb1[1] < bb1[3]
#assert bb2[0] < bb2[2]
#assert bb2[1] < bb2[3]
# determine the coordinates of the intersection rectangle
x_left = max(bb1[0], bb2[0])
y_top = max(bb1[1], bb2[1])
x_right = min(bb1[2], bb2[2])
y_bottom = min(bb1[3], bb2[3])
if x_right < x_left or y_bottom < y_top:
return False
return True
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
print(cfg.CUDA)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.0
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
#print(scores.shape)
#print(boxes.shape)
#exit()
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
#print(scores.shape)
#print(pred_boxes.shape)
#sys.exit()
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()############################## nms for head & tail in each classes
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes[:,12*k+4:12*k+8],pred_boxes[:,12*k+8:12*k+12]),0)
s = torch.cat((scores[:,3*k+1],scores[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
'''
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()############################## nms for all head, (tail, full-length) classes
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for j in range(3):
b = pred_boxes[:,4*j+4:4*j+8]
s = scores[:,j+1]
#print(b.shape)
for k in range(1, 13):
b = torch.cat((b, pred_boxes[:,12*k+4*j+4:12*k+4*j+8]),0)
s = torch.cat((s ,scores[:,3*k+j+1]),0)
#print(b.shape,s.shape)
#sys.exit()
keep = nms(b, s, 0.5)
#print(keep, len(keep))
for l in range(13):
idx = [g.item() for g in keep if g < (l+1)*300 and g >= l*300]
#print(len(idx), new_pred_boxes[:len(idx),12*l+4*j+4:12*l+4*j+8])
#print(new_pred_boxes[:len(idx),12*l+4*j+4:12*l+4*j+8].shape, b[idx].shape)
new_pred_boxes[:len(idx),12*l+4*j+4:12*l+4*j+8] = b[idx]
new_scores[:len(idx),3*l+j+1] = s[idx]
#print([g for g in s[idx] if g > 0.5])
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
'''
#new_pred_boxes = pred_boxes
#new_scores = scores
#H_classes = [1,4,7,10,13,16,19,22,25,28,31]#########
#T_classes = [2,5,8,11,14,17,20,23,26,29,32]
#exist_classes = []
#exist_dets = []
for j in xrange(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = new_scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = new_pred_boxes[inds, :]
else:
cls_boxes = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
# print(cls_boxes.shape)
# print(cls_scores.unsqueeze(1).shape)
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
#print(j, cls_dets)
#sys.exit()
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
'''
#kkk = [cls_dets.cpu().numpy()[k][-1] for k in range(len(cls_dets.cpu().numpy()))]
#print(kkk)
#exit()
if j in H_classes:
disjoint_class_n = j+1
if j in T_classes:
disjoint_class_n = j-1
if disjoint_class_n in exist_classes:
idx = exist_classes.index(disjoint_class_n)
if bb_overlap(exist_dets[idx][0][:4],cls_dets.cpu().numpy()[0][:4]):
if exist_dets[idx][0][-1] > cls_dets.cpu().numpy()[0][-1]:
all_boxes[j][i] = empty_array
disjoint_class_n=0
continue
else:
exist_dets[idx] = cls_dets.cpu().numpy()[0]
all_boxes[exist_classes[idx]][i] = empty_array
exist_classes[idx] = j
disjoint_class_n=0
continue
exist_dets.append(cls_dets.cpu().numpy()[0])
exist_classes.append(j)
'''
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
#print(exist_classes)
#for k, j in enumerate(exist_classes):
# all_boxes[j][i] = exist_dets[k]
#print(all_boxes)
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
| |
import math
import numpy as np
import warnings
from collections import Counter
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score
from sklearn.preprocessing import MinMaxScaler
from .metrics import gap_statistic, silhouette_score_block
from ..utils.os_utl import check_types, filter_kwargs
from ..utils.log_utl import print_progress_bar, printv
"""
GapStatistic estimator is the Miles Granger implementation 2016.04.25.1430 shared in
https://anaconda.org/milesgranger/gap-statistic/notebook
"""
class EstimatorMixin:
def __init__(self, cluster_fn=MiniBatchKMeans, **kwargs):
self.K = 0
self.best_votes = list()
self.cluster = cluster_fn
self.metrics = None
@staticmethod
def _get_best_votes(arr):
k_best_n = arr[arr[:, 1].argsort()[::-1], 0].astype(int)
return k_best_n.tolist()
@staticmethod
def _get_max_k(x, max_k):
if max_k > x.shape[0]:
print(f'Can only use number of clusters lower than number of examples ({x.shape[0]}).')
return min(max_k, x.shape[0])
def plot_metric(self, ax=None, show=True, normalise=False, n_votes=3):
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=(24, 12))
# metric
x, y = self.metrics.T
if normalise:
y = MinMaxScaler().fit_transform(y.reshape(-1, 1))
ax.plot(x, y, label=f'{self.__class__.__qualname__} ({self.best_votes[0]})', linewidth=0.7)
# votes
votes = np.array(self.best_votes[:n_votes])
ax.scatter(votes, y[votes - 1], color=ax.lines[-1].get_color(), alpha=0.3, edgecolors='k',
s=np.array(range(25, (n_votes+1)*15 + 1, 15))[::-1])
if show:
ax.legend()
plt.show()
return
def __repr__(self):
return f'{self.__class__.__module__}.{self.__class__.__qualname__}'
class Riddle(EstimatorMixin):
"""
Riddle K-estimator.
Estimates the correct value for K using the reciprocal delta log rule.
"""
@check_types(x=np.ndarray, max_k=int)
def fit(self, x, max_k=50, **kwargs):
"""
Directly fit this estimator to original data considering max_k clusters.
Kwargs passed are not used. They are included for estimators compatibility only
:param np.ndarray x: Array with the data to cluster
:param int max_k: Maximum number of clusters to try to find
:param kwargs:
:return int: Best number of clusters
"""
# calculate s_k
max_k = self._get_max_k(x, max_k)
s_k = np.array([self.cluster(k).fit(x).inertia_ for k in range(1, max_k + 1)])
return self.fit_s_k(s_k)
@check_types(s_k=(np.ndarray, list, tuple))
def fit_s_k(self, s_k, **kwargs):
"""
Fit the estimator to the distances of each datapoint to assigned cluster centroid
Kwargs passed are not used. They are included for estimators compatibility only
:param np.ndarray|list|tuple s_k: Collection of inertias_ for each n_cluster explored
:param kwargs:
:return int: Best number of clusters
"""
if isinstance(s_k, (list, tuple)):
s_k = np.array(s_k)
r_k = 1/s_k
n_cl = range(1, len(r_k) + 1)
diff = np.pad(np.diff(r_k), (1, 0), 'constant', constant_values=-np.inf)
results = diff/np.log(n_cl)
results[results == -np.inf] = results[results != -np.inf].min()
self.metrics = np.vstack((n_cl, results)).T
self.best_votes = self._get_best_votes(self.metrics)
self.K = self.best_votes[0]
return self.K
class AsankaPerera(EstimatorMixin):
"""Estimates the K-Means K hyperparameter through geometrical analysis of the distortion curve"""
def __init__(self, tolerance=1e-3, **kwargs):
super().__init__(**kwargs)
self.tolerance = tolerance
@check_types(x=np.ndarray, max_k=int)
def fit(self, x, max_k=50, **kwargs):
"""
Directly fit this estimator to original data considering max_k clusters.
:param np.ndarray x: Array with the data to cluster
:param int max_k: Maximum number of clusters to try to find
:return int: Best number of clusters
"""
max_k = self._get_max_k(x, max_k)
s_k_list = list()
sk0 = 0
for k in range(1, max_k + 1):
sk1 = self.cluster(k).fit(x).inertia_
s_k_list.append(sk1)
if k > 2 and abs(sk0 - sk1) < self.tolerance:
break
sk0 = sk1
# Pass the line endpoints and find max dist
self.metrics = self.find_distances(np.array(s_k_list), x0=1, y0=s_k_list[0], x1=3 * len(s_k_list), y1=0)
self.best_votes = self._get_best_votes(self.metrics)
self.K = self.best_votes[0]
return self.K
@check_types(s_k=(np.ndarray, list, tuple))
def fit_s_k(self, s_k, **kwargs):
"""
Fit the estimator to the distances of each datapoint to assigned cluster centroid
:param np.ndarray|list|tuple s_k: Collection of inertias_ for each n_cluster explored
:param kwargs: Tolerance to be used can be passed as a kwarg
:return int: Best number of clusters
"""
if isinstance(s_k, list):
s_k = np.array(s_k)
s_k_list = list()
sk0 = 0
for k, sk1 in enumerate(s_k):
s_k_list.append(sk1)
if (k > 2) and (abs(sk0 - sk1) < self.tolerance):
break
sk0 = sk1
# Pass the line endpoints and find max dist
self.metrics = self.find_distances(np.array(s_k_list), x0=1, y0=s_k_list[0], x1=3 * len(s_k_list), y1=0)
self.best_votes = self._get_best_votes(self.metrics)
self.K = self.best_votes[0]
return self.K
@classmethod
def find_distances(cls, s_k, x0, y0, x1, y1):
"""
Find the largest distance from each point in s_k (defined by (x=position in array/list, y=value)) to line
defined by (x0, y0) and (x1, y1)
:param np.ndarray|list|tuple s_k: values of y of datapoints to test
:param int|float x0: Coordinate x of point 0
:param int|float y0: Coordinate y of point 0
:param int|float x1: Coordinate x of point 1
:param int|float y1: Coordinate y of point 1
:return np.ndarray: Array with (best number of clusters, distance to line)
"""
k_dist = np.array([[k, cls.distance_to_line(k, s_k[k - 1], x0, y0, x1, y1)] for k in range(1, len(s_k) + 1)])
if int(k_dist[k_dist[:, 1].argmax(), 0]) == int(k_dist[-1, 0]):
print('AsankaPerera: Number of clusters explored is not optimal! Run analysis with higher number of '
'clusters.')
return k_dist
@staticmethod
def distance_to_line(x0, y0, x1, y1, x2, y2):
"""
Calculates the distance from (x0,y0) to the line defined by (x1,y1) and (x2,y2)
:param int|float x0: Coordinate x of point 0
:param int|float y0: Coordinate y of point 0
:param int|float x1: Coordinate x of point 1
:param int|float y1: Coordinate y of point 1
:param int|float x2: Coordinate x of point 2
:param int|float y2: Coordinate y of point 2
:return float: distance between point0 and the line defined by point1 and point2
"""
dx = x2 - x1
dy = y2 - y1
return abs(dy * x0 - dx * y0 + x2 * y1 - y2 * x1) / math.sqrt(dx * dx + dy * dy)
class PhamDimovNguyen(EstimatorMixin):
"""Estimates the best value for K using Pham-Dimov-Nguyen method"""
@check_types(x=np.ndarray, max_k=int)
def fit(self, x, max_k=50, **kwargs):
"""
Directly fit this estimator to original data considering max_k clusters.
Kwargs passed are not used. They are included for estimators compatibility only
:param np.ndarray x: Array with the data to cluster
:param int max_k: Maximum number of clusters to try to find
:param kwargs:
:return int: Best number of clusters
"""
max_k = self._get_max_k(x, max_k)
s_k = np.array([self.cluster(k).fit(x).inertia_ for k in range(1, max_k + 1)])
return self.fit_s_k(s_k, data_shape=x.shape)
@check_types(s_k=(np.ndarray, list, tuple), data_shape=(np.ndarray, tuple, list))
def fit_s_k(self, s_k, data_shape, **kwargs):
"""
Fit the estimator to the distances of each datapoint to assigned cluster centroid
Kwargs passed are not used. They are included for estimators compatibility only
:param np.ndarray|list|tuple s_k: Collection of inertias_ for each n_cluster explored
:param np.ndarray|list|tuple data_shape: shape of the data used to cluster
:param kwargs:
:return int: Best number of clusters
"""
if isinstance(s_k, list):
s_k = np.array(s_k)
# calculate all alphas
a_k = np.zeros(len(s_k) + 1)
for i in range(2, len(s_k) + 1):
a_k[i] = self._alpha_k(a_k, i, data_shape[1])
# pad s_k to move them to correct cluster position
s_k = np.pad(s_k, (1, 0), 'constant', constant_values=0)
# evaluate function for all positions
with np.errstate(divide='ignore'):
f_k = s_k[1:] / (a_k[1:] * s_k[:-1])
f_k[f_k == np.inf] = f_k[f_k != np.inf].max()
self.metrics = np.vstack((range(1, len(f_k) + 1), f_k)).T
# get K from where function result is minimum and if needed correct it
self.best_votes = self._get_best_votes(self.metrics)
self.K = self.best_votes[0]
return self.K
@staticmethod
def _alpha_k(a_k, k, dim):
if k == 2:
ak = 1.0 - 3.0 / (4.0 * dim)
else:
ak1 = a_k[k - 1]
ak = ak1 + (1.0 - ak1) / 6.0
return ak
@staticmethod
def _get_best_votes(arr):
_arr = arr[arr[:, 1] <= 0.85]
k_best_n = _arr[_arr[:, 1].argsort(), 0].astype(int)
rem = arr[~np.isin(arr[:, 0], k_best_n)]
rem = rem[rem[:, 1].argsort(), 0].astype(int)
return list(k_best_n) + list(rem)
class GapStatistic(EstimatorMixin):
"""
Class that implements the GapStatistic estimator as defined by Miles Granger (implementation 2016.04.25.1430)
shared in https://anaconda.org/milesgranger/gap-statistic/notebook
"""
def __init__(self, n_refs=3, **kwargs):
super().__init__(**kwargs)
self.n_refs = n_refs
def fit(self, x, max_k=50, **kwargs):
"""
Directly fit this estimator to original data considering max_k clusters.
:param np.ndarray x: Array with the data to cluster
:param int max_k: Maximum number of clusters to try to find
:param kwargs: additional arguments for cluster function passed, e.g: batch_size
:return int: Best number of clusters
"""
max_k = self._get_max_k(x, max_k)
results = list()
for gap_index, k in enumerate(range(1, max_k + 1)):
# fit data to cluster function provided and Calculate gap statistic
orig_disp = self.cluster(k, **kwargs).fit(x).inertia_
results.append((k, gap_statistic(k, orig_disp, x.shape, self.n_refs, **kwargs)))
self.metrics = np.array(results)
self.best_votes = self._get_best_votes(self.metrics)
self.K = self.best_votes[0]
return self.K
def fit_s_k(self, s_k, data_shape, **kwargs):
"""
Fit the estimator to the distances of each datapoint | |
"image_uri"
elif key == "problemType":
suggest = "problem_type"
elif key == "containerArguments":
suggest = "container_arguments"
elif key == "containerEntrypoint":
suggest = "container_entrypoint"
elif key == "postAnalyticsProcessorSourceUri":
suggest = "post_analytics_processor_source_uri"
elif key == "recordPreprocessorSourceUri":
suggest = "record_preprocessor_source_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionModelQualityAppSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionModelQualityAppSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionModelQualityAppSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
image_uri: str,
problem_type: 'ModelQualityJobDefinitionProblemType',
container_arguments: Optional[Sequence[str]] = None,
container_entrypoint: Optional[Sequence[str]] = None,
environment: Optional[Any] = None,
post_analytics_processor_source_uri: Optional[str] = None,
record_preprocessor_source_uri: Optional[str] = None):
"""
Container image configuration object for the monitoring job.
:param str image_uri: The container image to be run by the monitoring job.
:param Sequence[str] container_arguments: An array of arguments for the container used to run the monitoring job.
:param Sequence[str] container_entrypoint: Specifies the entrypoint for a container used to run the monitoring job.
:param Any environment: Sets the environment variables in the Docker container
:param str post_analytics_processor_source_uri: An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.
:param str record_preprocessor_source_uri: An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers
"""
pulumi.set(__self__, "image_uri", image_uri)
pulumi.set(__self__, "problem_type", problem_type)
if container_arguments is not None:
pulumi.set(__self__, "container_arguments", container_arguments)
if container_entrypoint is not None:
pulumi.set(__self__, "container_entrypoint", container_entrypoint)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if post_analytics_processor_source_uri is not None:
pulumi.set(__self__, "post_analytics_processor_source_uri", post_analytics_processor_source_uri)
if record_preprocessor_source_uri is not None:
pulumi.set(__self__, "record_preprocessor_source_uri", record_preprocessor_source_uri)
@property
@pulumi.getter(name="imageUri")
def image_uri(self) -> str:
"""
The container image to be run by the monitoring job.
"""
return pulumi.get(self, "image_uri")
@property
@pulumi.getter(name="problemType")
def problem_type(self) -> 'ModelQualityJobDefinitionProblemType':
return pulumi.get(self, "problem_type")
@property
@pulumi.getter(name="containerArguments")
def container_arguments(self) -> Optional[Sequence[str]]:
"""
An array of arguments for the container used to run the monitoring job.
"""
return pulumi.get(self, "container_arguments")
@property
@pulumi.getter(name="containerEntrypoint")
def container_entrypoint(self) -> Optional[Sequence[str]]:
"""
Specifies the entrypoint for a container used to run the monitoring job.
"""
return pulumi.get(self, "container_entrypoint")
@property
@pulumi.getter
def environment(self) -> Optional[Any]:
"""
Sets the environment variables in the Docker container
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter(name="postAnalyticsProcessorSourceUri")
def post_analytics_processor_source_uri(self) -> Optional[str]:
"""
An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.
"""
return pulumi.get(self, "post_analytics_processor_source_uri")
@property
@pulumi.getter(name="recordPreprocessorSourceUri")
def record_preprocessor_source_uri(self) -> Optional[str]:
"""
An Amazon S3 URI to a script that is called per row prior to running analysis. It can base64 decode the payload and convert it into a flatted json so that the built-in container can use the converted data. Applicable only for the built-in (first party) containers
"""
return pulumi.get(self, "record_preprocessor_source_uri")
@pulumi.output_type
class ModelQualityJobDefinitionModelQualityBaselineConfig(dict):
"""
Baseline configuration used to validate that the data conforms to the specified constraints and statistics.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "baseliningJobName":
suggest = "baselining_job_name"
elif key == "constraintsResource":
suggest = "constraints_resource"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionModelQualityBaselineConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionModelQualityBaselineConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionModelQualityBaselineConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
baselining_job_name: Optional[str] = None,
constraints_resource: Optional['outputs.ModelQualityJobDefinitionConstraintsResource'] = None):
"""
Baseline configuration used to validate that the data conforms to the specified constraints and statistics.
"""
if baselining_job_name is not None:
pulumi.set(__self__, "baselining_job_name", baselining_job_name)
if constraints_resource is not None:
pulumi.set(__self__, "constraints_resource", constraints_resource)
@property
@pulumi.getter(name="baseliningJobName")
def baselining_job_name(self) -> Optional[str]:
return pulumi.get(self, "baselining_job_name")
@property
@pulumi.getter(name="constraintsResource")
def constraints_resource(self) -> Optional['outputs.ModelQualityJobDefinitionConstraintsResource']:
return pulumi.get(self, "constraints_resource")
@pulumi.output_type
class ModelQualityJobDefinitionModelQualityJobInput(dict):
"""
The inputs for a monitoring job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointInput":
suggest = "endpoint_input"
elif key == "groundTruthS3Input":
suggest = "ground_truth_s3_input"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionModelQualityJobInput. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionModelQualityJobInput.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionModelQualityJobInput.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_input: 'outputs.ModelQualityJobDefinitionEndpointInput',
ground_truth_s3_input: 'outputs.ModelQualityJobDefinitionMonitoringGroundTruthS3Input'):
"""
The inputs for a monitoring job.
"""
pulumi.set(__self__, "endpoint_input", endpoint_input)
pulumi.set(__self__, "ground_truth_s3_input", ground_truth_s3_input)
@property
@pulumi.getter(name="endpointInput")
def endpoint_input(self) -> 'outputs.ModelQualityJobDefinitionEndpointInput':
return pulumi.get(self, "endpoint_input")
@property
@pulumi.getter(name="groundTruthS3Input")
def ground_truth_s3_input(self) -> 'outputs.ModelQualityJobDefinitionMonitoringGroundTruthS3Input':
return pulumi.get(self, "ground_truth_s3_input")
@pulumi.output_type
class ModelQualityJobDefinitionMonitoringGroundTruthS3Input(dict):
"""
Ground truth input provided in S3
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Uri":
suggest = "s3_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionMonitoringGroundTruthS3Input. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionMonitoringGroundTruthS3Input.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionMonitoringGroundTruthS3Input.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_uri: str):
"""
Ground truth input provided in S3
:param str s3_uri: A URI that identifies the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job.
"""
pulumi.set(__self__, "s3_uri", s3_uri)
@property
@pulumi.getter(name="s3Uri")
def s3_uri(self) -> str:
"""
A URI that identifies the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job.
"""
return pulumi.get(self, "s3_uri")
@pulumi.output_type
class ModelQualityJobDefinitionMonitoringOutput(dict):
"""
The output object for a monitoring job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Output":
suggest = "s3_output"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionMonitoringOutput. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionMonitoringOutput.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionMonitoringOutput.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_output: 'outputs.ModelQualityJobDefinitionS3Output'):
"""
The output object for a monitoring job.
"""
pulumi.set(__self__, "s3_output", s3_output)
@property
@pulumi.getter(name="s3Output")
def s3_output(self) -> 'outputs.ModelQualityJobDefinitionS3Output':
return pulumi.get(self, "s3_output")
@pulumi.output_type
class ModelQualityJobDefinitionMonitoringOutputConfig(dict):
"""
The output configuration for monitoring jobs.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "monitoringOutputs":
suggest = "monitoring_outputs"
elif key == "kmsKeyId":
suggest = "kms_key_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionMonitoringOutputConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionMonitoringOutputConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionMonitoringOutputConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
monitoring_outputs: Sequence['outputs.ModelQualityJobDefinitionMonitoringOutput'],
kms_key_id: Optional[str] = None):
"""
The output configuration for monitoring jobs.
:param Sequence['ModelQualityJobDefinitionMonitoringOutput'] monitoring_outputs: Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded.
:param str kms_key_id: The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
"""
pulumi.set(__self__, "monitoring_outputs", monitoring_outputs)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
@property
@pulumi.getter(name="monitoringOutputs")
def monitoring_outputs(self) -> Sequence['outputs.ModelQualityJobDefinitionMonitoringOutput']:
"""
Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded.
"""
return pulumi.get(self, "monitoring_outputs")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
"""
return pulumi.get(self, "kms_key_id")
@pulumi.output_type
class ModelQualityJobDefinitionMonitoringResources(dict):
"""
Identifies the resources to deploy for a monitoring job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clusterConfig":
suggest = "cluster_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionMonitoringResources. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionMonitoringResources.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionMonitoringResources.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cluster_config: 'outputs.ModelQualityJobDefinitionClusterConfig'):
"""
Identifies the resources to deploy for a monitoring job.
"""
pulumi.set(__self__, "cluster_config", cluster_config)
@property
@pulumi.getter(name="clusterConfig")
def cluster_config(self) -> 'outputs.ModelQualityJobDefinitionClusterConfig':
return pulumi.get(self, "cluster_config")
@pulumi.output_type
class ModelQualityJobDefinitionNetworkConfig(dict):
"""
Networking options for a job, such as network traffic encryption between containers, whether to allow inbound and outbound network calls to and from containers, and the VPC subnets and security groups to | |
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
from libotp import WhisperPopup
from panda3d.core import *
from pizzapi import *
from toontown.battle import Fanfare
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import random
DEAL_CODE = 0
DEAL_DESC = 1
BUTTON_BROWSE = 0
BUTTON_ADDRESS = 1
BUTTON_CLOSE = 2
INFO_COUNT = 4
STAGE_NAME = 0
STAGE_ADDRESS = 1
STAGE_CARD = 2
STAGE_FINAL = 3
ITEM_CODE = 0
ITEM_NAME = 1
ITEM_PRICE = 2
class PizzaGUI(DirectFrame, StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('PizzaGUI')
def __init__(self):
DirectFrame.__init__(self, parent=aspect2d, relief=None, image=DGG.getDefaultDialogGeom(), pos=(0.0, 0.0, 0.05),
image_scale=(1.8, 1, 1.4), image_pos=(0, 0, -0.05), image_color=ToontownGlobals.GlobalDialogColor,
text=TTLocalizer.PizzaMenuTitle, text_scale=0.12, text_pos=(0, 0.5), borderWidth=(0.01, 0.01))
StateData.StateData.__init__(self, 'pizza-gui-done')
self.setBin('gui-popup', 0)
self.initialiseoptions(PizzaGUI)
# These help us keep track of whether we have loaded/entered the interface
self.isLoaded = 0
self.isEntered = 0
# Some nice music to play when our menu is open
self.music = base.loader.loadMusic('phase_14.5/audio/bgm/PP_theme.ogg')
# Sound effects for success and failure
self.successSfx = base.loader.loadSfx('phase_3.5/audio/sfx/tt_s_gui_sbk_cdrSuccess.ogg')
self.failureSfx = base.loader.loadSfx('phase_3.5/audio/sfx/tt_s_gui_sbk_cdrFailure.ogg')
self.purchaseSfx = base.loader.loadSfx('phase_14/audio/sfx/cash.ogg')
# Non-GUI Info
self.infoState = STAGE_NAME
self.customer = None
self.address = None
self.card = None
self.store = None
self.menu = None
self.productId = None
self.cart = []
self.activeDeals = []
# Used between multiple screens
self.logo = None
self.infoLabel = None
# The information submission screen
self.infoNode = None
self.entryLabels = []
self.entryFrames = []
self.entryInputs = []
self.submitButton = None
# The title screen
self.titleNode = None
self.menuButtons = []
self.dealButton = None
# Menu screen
self.menuNode = None
self.menuEntryFrame = None
self.menuEntryInput = None
self.checkoutButton = None
self.addButton = None
self.menuList = None
self.productLabel = None
self.backButton = None
# Checkout screen
self.checkoutNode = None
self.backToMenuButton = None
self.removeButton = None
self.placeOrderButton = None
self.cartList = None
self.cartProductLabel = None
self.totalsLabel = None
def unload(self):
# Only unload if we have the interface loaded
if not self.isLoaded:
return
self.isLoaded = False
# Exit the interface
self.exit()
# Unload the DirectFrame
DirectFrame.destroy(self)
def load(self):
# Only load if we haven't yet loaded
if self.isLoaded:
return
self.isLoaded = True
# Load the Domino's Pizza logo as a texture
logoTexture = loader.loadTexture('phase_14.5/maps/pp_logo.jpg', 'phase_14.5/maps/pp_logo_a.rgb')
self.logo = OnscreenImage(image=logoTexture)
self.logo.setTransparency(TransparencyAttrib.MAlpha)
self.logo.setScale(0.25)
self.logo.setPos(-0.35, 0, 0)
self.logo.reparentTo(self)
# A general label we use to display information
self.infoLabel = DirectLabel(parent=self, relief=None, pos=(-0.35, 0.0, -0.55), scale=0.1, text=TTLocalizer.PizzaInfoNameEntry,
text_font=ToontownGlobals.getInterfaceFont())
# We use this node for our information submission screen
self.infoNode = self.attachNewNode('infoNode')
# Generate entry boxes to put info into
model = loader.loadModel('phase_3.5/models/gui/tt_m_gui_sbk_codeRedemptionGui')
for x in range(INFO_COUNT):
# Get the label text and position for this entry box
text = TTLocalizer.PizzaEntryName[x]
z = (-0.25 * (x + 1)) + 0.5
# A label that goes above the entry box
entryInfo = DirectLabel(parent=self.infoNode, relief=None, pos=(0.4, 0.0, z + 0.1), scale=0.07, text=text,
text_font=ToontownGlobals.getInterfaceFont())
self.entryLabels.append(entryInfo)
# The entry box GUI
entryFrame = DirectFrame(parent=self.infoNode, relief=None, image=model.find('**/tt_t_gui_sbk_cdrCodeBox'),
pos=(0.4, 0.0, z), scale=0.6)
self.entryFrames.append(entryFrame)
# A DirectEntry to input information
entryInput = DirectEntry(parent=self.infoNode, relief=None, text_scale=0.035, width=11.5,
textMayChange=1, pos=(0.2, 0.0, z), text_align=TextNode.ALeft,
backgroundFocus=0, focusInCommand=self.toggleEntryFocus,
focusInExtraArgs=[True], focusOutCommand=self.toggleEntryFocus,
focusOutExtraArgs=[False])
self.entryInputs.append(entryInput)
model.removeNode()
# This button submits the info we entered in the entry boxes
model = loader.loadModel('phase_3/models/gui/quit_button')
self.submitButton = DirectButton(parent=self.infoNode, relief=None, scale=1, pos=(-0.35, 0.0, -0.65),
image=(model.find('**/QuitBtn_UP'), model.find('**/QuitBtn_DN'), model.find('**/QuitBtn_RLVR')),
image_scale=(1.25, 1, 1), text=TTLocalizer.PizzaInfoSubmit, text_scale=0.05,
text_pos=TTLocalizer.DSDcancelPos, command=self.submitInformation)
model.removeNode()
# We use this node for our menu's title screen
self.titleNode = self.attachNewNode('titleNode')
model = loader.loadModel('phase_3/models/gui/quit_button')
for x in range(len(TTLocalizer.PizzaButtons)):
text = TTLocalizer.PizzaButtons[x]
z = (-0.2 * (x + 1)) + 0.4
button = DirectButton(parent=self.titleNode, relief=None, scale=1.25, pos=(0.45, 0.0, z), image=(model.find('**/QuitBtn_UP'),
model.find('**/QuitBtn_DN'),
model.find('**/QuitBtn_RLVR')),
image_scale=(1.25, 1, 1), text=text, text_scale=0.05,
text_pos=TTLocalizer.DSDcancelPos, command=self.selectOption, extraArgs=[x])
self.menuButtons.append(button)
model.removeNode()
# Create a button that you can click to automatically add a daily deal to your cart
self.dealButton = DirectButton(parent=self.titleNode, relief=None, pos=(0.0, 0, -0.65), scale=0.05, text=TTLocalizer.PizzaNoCoupon,
text_font=ToontownGlobals.getInterfaceFont(), text_align=TextNode.ACenter,
command=self.selectDeal, extraArgs=[])
# Left side clip plane
leftClip = PlaneNode("left-clip", Plane(Vec3(1.0, 0.0, 0.0), Point3()))
leftClip.setClipEffect(1)
leftClipNode = self.titleNode.attachNewNode(leftClip)
leftClipNode.setX(-0.8)
self.dealButton.setClipPlane(leftClipNode)
# Right side clip plane
rightClip = PlaneNode("right-clip", Plane(Vec3(-1.0, 0.0, 0.0), Point3()))
rightClip.setClipEffect(1)
rightClipNode = self.titleNode.attachNewNode(rightClip)
rightClipNode.setX(0.8)
self.dealButton.setClipPlane(rightClipNode)
# Second to last, a node for our simple menu
self.menuNode = self.attachNewNode('menuNode')
# We need a box to search for products with
model = loader.loadModel('phase_3.5/models/gui/tt_m_gui_sbk_codeRedemptionGui')
self.menuEntryFrame = DirectFrame(parent=self.menuNode, relief=None, image=model.find('**/tt_t_gui_sbk_cdrCodeBox'),
pos=(-0.35, 0.0, 0.175), scale=0.6)
self.menuEntryInput = DirectEntry(parent=self.menuNode, relief=None, text_scale=0.035, width=11.5,
textMayChange=1, pos=(-0.55, 0.0, 0.175), text_align=TextNode.ALeft,
backgroundFocus=0, focusInCommand=self.toggleEntryFocus,
focusInExtraArgs=[True], focusOutCommand=self.toggleEntryFocus,
focusOutExtraArgs=[False], command=self.menuSearch)
model.removeNode()
# These buttons allow you to add items to your cart and go to checkout
model = loader.loadModel('phase_3/models/gui/quit_button')
self.checkoutButton = DirectButton(parent=self.menuNode, relief=None, scale=1, pos=(0.45, 0.0, -0.65),
image=(model.find('**/QuitBtn_UP'), model.find('**/QuitBtn_DN'), model.find('**/QuitBtn_RLVR')),
image_scale=(1.25, 1, 1), text=TTLocalizer.PizzaInfoCheckout, text_scale=0.05,
text_pos=TTLocalizer.DSDcancelPos, command=self.checkout)
self.addButton = DirectButton(parent=self.menuNode, relief=None, scale=1, pos=(-0.35, 0.0, -0.55),
image=(model.find('**/QuitBtn_UP'), model.find('**/QuitBtn_DN'), model.find('**/QuitBtn_RLVR')),
image_scale=(1.25, 1, 1), text=TTLocalizer.PizzaInfoSelect, text_scale=0.05,
text_pos=TTLocalizer.DSDcancelPos, command=self.selectItem)
model.removeNode()
# The actual list of items in our menu
model = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.menuList = DirectScrolledList(parent=self.menuNode, relief=None, forceHeight=0.07, pos=(0.45, 0, -0.05),
incButton_image=(model.find('**/FndsLst_ScrollUp'),
model.find('**/FndsLst_ScrollDN'),
model.find('**/FndsLst_ScrollUp_Rllvr'),
model.find('**/FndsLst_ScrollUp')), incButton_relief=None,
incButton_scale=(1.3, 1.3, -1.3), incButton_pos=(0.0, 0, -0.5),
incButton_image3_color=Vec4(1, 1, 1, 0.2),
decButton_image=(model.find('**/FndsLst_ScrollUp'),
model.find('**/FndsLst_ScrollDN'),
model.find('**/FndsLst_ScrollUp_Rllvr'),
model.find('**/FndsLst_ScrollUp')), decButton_relief=None,
decButton_scale=(1.3, 1.3, 1.3), decButton_pos=(0.0, 0, 0.47),
decButton_image3_color=Vec4(1, 1, 1, 0.2), itemFrame_pos=(-0.237, 0, 0.41),
itemFrame_scale=1.0, itemFrame_relief=DGG.SUNKEN,
itemFrame_frameSize=(-0.05, 0.56, -0.87, 0.02), itemFrame_frameColor=(0.85, 0.95, 1, 1),
itemFrame_borderWidth=(0.01, 0.01), numItemsVisible=12,
items=[])
model.removeNode()
# This label shows product information
self.productLabel = DirectLabel(parent=self.menuNode, relief=None, pos=(-0.35, 0.0, -0.1), scale=0.075, text='',
text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=12)
# A button that allows us to go back to the title screen
model = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
image = [model.find('**/tt_t_gui_mat_shuffleArrow' + name) for name in ('Up', 'Down', 'Up', 'Disabled')]
self.backButton = DirectButton(self.menuNode, relief=None, image=image,
pos=(-0.965, 0.0, 0.0), command=self.closeMenu)
model.removeNode()
# And finally, a node for the checkout screen
self.checkoutNode = self.attachNewNode('checkoutNode')
# A button that allows us to go back to the menu
model = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
image = [model.find('**/tt_t_gui_mat_shuffleArrow' + name) for name in ('Up', 'Down', 'Up', 'Disabled')]
self.backToMenuButton = DirectButton(self.checkoutNode, relief=None, image=image,
pos=(-0.965, 0.0, 0.0), command=self.returnToCheckout)
model.removeNode()
# A remove from cart and place order button
model = loader.loadModel('phase_3/models/gui/quit_button')
self.removeButton = DirectButton(parent=self.checkoutNode, relief=None, scale=1, pos=(-0.35, 0.0, -0.55),
image=(model.find('**/QuitBtn_UP'), model.find('**/QuitBtn_DN'), model.find('**/QuitBtn_RLVR')),
image_scale=(1.25, 1, 1), text=TTLocalizer.PizzaRemoveCart, text_scale=0.05,
text_pos=TTLocalizer.DSDcancelPos, command=self.removeItem)
self.placeOrderButton = DirectButton(parent=self.checkoutNode, relief=None, scale=1, pos=(0.45, 0.0, -0.65),
image=(model.find('**/QuitBtn_UP'), model.find('**/QuitBtn_DN'), model.find('**/QuitBtn_RLVR')),
image_scale=(1.25, 1, 1), text=TTLocalizer.PizzaPlaceOrder, text_scale=0.05,
text_pos=TTLocalizer.DSDcancelPos, command=self.placeOrder)
model.removeNode()
# The list of items in our cart
model = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.cartList = DirectScrolledList(parent=self.checkoutNode, relief=None, forceHeight=0.07, pos=(0.45, 0, -0.05),
incButton_image=(model.find('**/FndsLst_ScrollUp'),
model.find('**/FndsLst_ScrollDN'),
model.find('**/FndsLst_ScrollUp_Rllvr'),
model.find('**/FndsLst_ScrollUp')), incButton_relief=None,
incButton_scale=(1.3, 1.3, -1.3), incButton_pos=(0.0, 0, -0.5),
incButton_image3_color=Vec4(1, 1, 1, 0.2),
decButton_image=(model.find('**/FndsLst_ScrollUp'),
model.find('**/FndsLst_ScrollDN'),
model.find('**/FndsLst_ScrollUp_Rllvr'),
model.find('**/FndsLst_ScrollUp')), decButton_relief=None,
decButton_scale=(1.3, 1.3, 1.3), decButton_pos=(0.0, 0, 0.47),
decButton_image3_color=Vec4(1, 1, 1, 0.2), itemFrame_pos=(-0.237, 0, 0.41),
itemFrame_scale=1.0, itemFrame_relief=DGG.SUNKEN,
itemFrame_frameSize=(-0.05, 0.56, -0.87, 0.02), itemFrame_frameColor=(0.85, 0.95, 1, 1),
itemFrame_borderWidth=(0.01, 0.01), numItemsVisible=12,
items=[])
model.removeNode()
# This label shows product information
self.cartProductLabel = DirectLabel(parent=self.checkoutNode, relief=None, pos=(-0.35, 0.0, -0.1), scale=0.075, text='',
text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=12)
# This label shows our totals
self.totalsLabel = DirectLabel(parent=self.checkoutNode, relief=None, pos=(-0.60, 0.0, 0.3), scale=0.0675, text=TTLocalizer.PizzaTotals,
text_font=ToontownGlobals.getInterfaceFont(), text_align=TextNode.ALeft)
# Hide the interface because we're only loading it, not entering it
self.hide()
# Exit the interface if we get caged
# Temp commented out because it activates self.exit automatically as soon as you enter the menu
#self.accept('toon-caged', self.exit)
def selectOption(self, index):
if index == BUTTON_CLOSE:
# Exit the interface
self.exit()
elif index == BUTTON_ADDRESS:
# End the deal generation cycle
taskMgr.remove(self.uniqueName('generateDeal'))
# Open information interface
self.requestInformation()
elif index == BUTTON_BROWSE:
# End the deal generation cycle
taskMgr.remove(self.uniqueName('generateDeal'))
# Open the ordering menu
self.openMenu()
def selectDeal(self, code):
print("Selecting deal with code: {}".format(code))
def generateDeal(self):
# Update the deal code and description
if self.activeDeals:
deal = random.choice(self.activeDeals)
code = deal[DEAL_CODE]
self.dealButton['extraArgs'] = [code]
desc = deal[DEAL_DESC]
self.dealButton.setText(desc)
# Get bounds of the new text and set it's position based on it
deal = self.dealButton.component('text0')
bMin, bMax = deal.getTightBounds()
bound = bMax.x / 5
self.dealButton.setPos(Point3(bound, 0.0, -0.65))
# Make an interval that moves the text along the screen
duration = bound * 5
ival = LerpPosInterval(self.dealButton, duration, Point3(-bound, 0.0, -0.65))
ival.start()
# Show another deal after this one
taskMgr.doMethodLater(duration + 0.25, self.generateDeal, self.uniqueName('generateDeal'), extraArgs=[])
def enter(self):
# Only allow us to enter the menu if we aren't already in it
if self.isEntered == 1:
return
self.isEntered = 1
# Lock the Toon down
# TODO: (remove this if you are porting to a non-ttoff source)
base.localAvatar.setLocked(True)
# If we haven't loaded yet for some reason, load the interface
| |
Caso 2: manca il segmento ma ne ho altri. errore.
exit("Error: the linesegments of the door does not belong to the space")
def set_centroid(self,centroid) :
'''
nel caso della mappa topologica e basta, o per altri motivi, potrei volere settare il centroide ad un valore separato
'''
if self.centroid :
print("Warning. Centroid already been set")
self.centroid = centroid
class floor(object) :
def __init__(self,floor_number = None ) :
# id dell piano
self.id = uuid.uuid4()
# numero del piano. se non lo specifico lo lascio a None
self.floor_number = floor_number
# elenco dei punti della stanza
self.points = []
# tutti i linesegments che fanno parte del bordo, falcoltativo, puo' essere None
self.linesgments = []
# le porte
self.doors = []
# se e' True, ho gia' aggiunto tutto al piano e ne calcolo le informazioni
self.compute = False
# perimetro della stanza.
self.bounding_polygon = None
# poligono della stanza (simile a boundinpoly ma diverso)
self.polygon = None
# centroide. se e' la mappa topologica, lo prendo dal layout.
self.centroid = None
# puntatore alle stanze che formao il piano
self.spaces = []
# mappa topologica del piano
self.topological_map
# xml del piano
self.xml = None
def toxml(self, doc, namespace) :
if self.xml :
return self.xml
else :
floor_xml = doc.createElementNS(namespace, "floor")
floor_xml.setAttribute("id",str(self.id))
if self.floor_number :
floor_number_xml = doc.createElementNS(namespace,"floor_number")
floor_number_xml.appendChild(doc.createTextNode(str(self.floor_number)))
floor_xml.appendChild(floor_number_xml)
if self.centroid :
centroid_xml = doc.createElementNS(namespace, "centroid")
centroid_xml.setAttribute("x",str(self.centroid.x))
centroid_xml.setAttribute("y",str(self.self.centroid.y))
floor_xml.appendChild(centroid_xml)
if self.compute :
# bounding box come (minx, miny, maxx, maxy)
minx,miny,maxx,maxy = self.bounds
boundingBox_xml = doc.createElementNS(namespace, "boundingBox")
maxx_xml = doc.createElementNS(namespace, "maxx")
point_xml = doc.createElementNS(namespace, "point")
point_xml.setAttribute("x",str(maxx))
point_xml.setAttribute("y",str(miny))
maxx_xml.appendChild(point_xml)
boundingBox_xml.appendChild(maxx_xml)
maxy_xml = doc.createElementNS(namespace, "maxy")
point_xml = doc.createElementNS(namespace, "point")
point_xml.setAttribute("x",str(maxx))
point_xml.setAttribute("y",str(maxy))
maxy_xml.appendChild(point_xml)
boundingBox_xml.appendChild(maxy_xml)
minx_xml = doc.createElementNS(namespace, "minx")
point_xml = doc.createElementNS(namespace, "point")
point_xml.setAttribute("x",str(minx))
point_xml.setAttribute("y",str(maxy))
minx_xml.appendChild(point_xml)
boundingBox_xml.appendChild(min_xml)
miny_xml = doc.createElementNS(namespace, "miny")
point_xml = doc.createElementNS(namespace, "point")
point_xml.setAttribute("x",str(minx))
point_xml.setAttribute("y",str(miny))
miny_xml.appendChild(point_xml)
boundingBox_xml.appendChild(mint_xml)
floor_xml.appendChild(boundingBox_xml)
if self.compute :
data_xml = doc.createElementNS(namespace, "data")
area_xml = doc.createElementNS(namespace, "area")
area_xml.appendChild(doc.createTextNode(str(self.area)))
data_xml.appendChild(area_xml)
perimeter_xml = doc.createElementNS(namespace, "perimeter")
perimeter.appendChild(doc.createTextNode(str(self.perimeter)))
data_xml.appendChild(perimeter_xml)
floor_xml.appendChild(data_xml)
if self.linesegments:
# contour - list of points
bounding_polygon_xml = doc.createElementNS(namespace, "bounding_polygon")
for point in self.bounding_polygon.coords :
point_xml = doc.createElementNS(namespace, "point")
point_xml.setAttribute("x",str(point[0]))
point_xml.setAttribute("y",str(point[1]))
bounding_polygon_xml.appendChild(point_xml)
floor_xml.appendChild(bounding_polygon_xml)
# list of linesegments
space_representation_xml = doc.createElementNS(namespace, "space_representation")
for line in self.linesegments :
space_representation_xml.appendChild(line.toxml(doc, namespace))
floor_xml.appendChild(space_representation_xml)
portals_xml = doc.createElementNS(namespace, "portals")
for portal in self.portals :
portals_xml.appendChild(portal.toxml(doc, namespace))
floor_xml.appendChild(portals_xml)
spaces_xml = doc.createElementNS(namespace,"spaces")
for space in self.spaces :
spaces_xml.appendChild(space.toxml(doc, namespace))
floor_xml.appendChild(spaces_xml)
self.xml = floor_xml
return self.xml
def compute(self) :
'''
TODO doc. questa funzione mi calcola tutte le informazioni geometriche di una stanza a partire dall'anello esterno.
'''
self.compute = True
if not(self.points) :
print('error - empty room')
exit("Error: No points in the room. This function evaluates geometrical information of the area,\
and cannot be used only with the topological representation.")
#X,Y = zip(*self.Points)
# poligono!
tmp = [ (p.x,p.y) for p in self.points ]
# anello esterno
self.bounding_polygon = LinearRing(tmp)
# rispettivo poligono
self.polygon = Polygon(self.bounding_polygon)
# area
self.area = self.polygon.area
self.perimeter = self.length
# bounding box come (minx, miny, maxx, maxy)
self.bounds = self.polygon.bounds
# centroide come #POINT
self.centroid = self.polygon.centroid
def compute_topological_map(self, colors = None ):
# TODO CHECK
# CREO LA MAPPA TOPOLOGICA DEL PIANO CON IGRAPH.
if colors :
node_id_list = []
label_list = []
label_RC_list = []
edges = []
for s in spaces :
node_id_list.append(s.id)
label_list.append(s.label)
label_RC_list.append(s.type)
for d in s.doors :
edges.append((d.first_id,d.second_id))
self.topological_map = TopologicalMap(node_id_list,label_list,label_RC_list,edges,colors)
else :
node_id_list = []
label_list = []
label_RC_list = []
edges = []
for s in spaces :
node_id_list.append(s.id)
label_list.append(s.label)
label_RC_list.append(s.type)
for d in s.doors :
edges.append((d.first_id,d.second_id))
self.topological_map = topologicalMap(node_id_list,label_list,label_RC_list,edges)
return self.topological_map
def add_space(self,space) :
if not space in self.spaces :
self.spaces.append(space)
else :
exit("Error - Space added twice to the same floor")
def add_linesegment(self, linesegment, door = None):
'''
TODO doc
# l'ordine e' importante.
'''
if not linesegment in self.linesegments :
# add linesegment to contour
self.linesegments.append(linesegment)
# and to points
if not(self.points) :
# primo segmento, aggiungo i due punti
self.points.append(linesegmet.p1)
self.points.append(linesegment.p2)
else :
# aggiungo il punto in coda a quelli aggiunti prima
if self.points[-1].almost_equals(linesegment.p1) :
self.points.append(linesegment.p2)
else :
exit("Error - Trying to add a linesegment in the bounding polygon but the linesegment is non adjacent to any other segment")
if linesegment.wp == 'DOOR' and door :
add_door(door)
else :
exit("Error: - linesegment added twice to a space")
# if the geometrical info about the room were already been computed, redo that step.
if compute :
self.compute()
def add_door(self,door) :
'''
o aggiungo la porta dalla funzione add_linesegment, oppure la aggiungo da qui DOPO aver aggiunto il linesegment
se ho una mappa topologica, invece, non ci deve essere un line_segment di riferimento.
'''
linesegments_id = [x.id for x in self.boundingPoly_linesegmets]
if not door in self.doors and door.id in linesegments_id :
self.doors.append(door)
else :
if not door in self.doors :
# porta aggiunta due volte
exit("Error: - the door was already added ")
else:
# non esiste il segmento relativo all'id della porta. due casi:
if self.boundingPoly_linesegmets == [] :
# Caso 1: ho solo la mappa topologica, non ho segmenti.
self.doors.append(door)
else :
# Caso 2: manca il segmento ma ne ho altri. errore.
exit("Error: the linesegments of the door does not belong to the space")
def set_centroid(self,centroid) :
'''
nel caso della mappa topologica e basta, o per altri motivi, potrei volere settare il centroide ad un valore separato
'''
if self.centroid :
print("Warning. Centroid already set")
self.centroid = centroid
class building(object) :
def __init__(self, name, typeB, topological = True, info = False, addess = None, city = None, country = None, \
construction_date = None, scale_pixel = None, scale_meters = None, other_info = None, color_label_Dict = None) :
'''
Info about a single building
'''
self.id = uuid.uuid4()
# iff topological is true I have only the topological map of the environment (no geometrical features)
self.topological = topological
#building type
self.type = typeB
self.name = name
# da qui in poi tutte le info sono facoltative
# se info e' true allora almeno qualche info c'e'
self.info = info
self.address = address
self.city = city
self.country = country
self.construction_date = construction_date
self.scale_pixel = scale_pixel
self.scale_meters = scale_meters
self.other_info = other_info
self.floors = []
# un dizionario con chiave la label (tutte), e come valore un colore. serve per plottare le cose.
self.colors = color_label_Dict
self.topological_maps = None
self.xml = None
def compute_topological_maps(self):
for f in self.floors :
if self.colors :
f.compute_topological_map(self.colors)
else :
f.compute_topological_map()
self.topological_maps.append(f.topological_map)
return self.topological_maps
def toxml(self, doc, namespace) :
if self.xml :
return self.xml
else :
building_xml = doc.createElementNS(namespace, "building")
name_xml = doc.createElementNS(namespace, "name")
if self.info :
if self.scale_meters and self.scale_pixel :
scale_xml = doc.createElementNS(namespace,"scale")
represented_xml = doc.createElementNS(namespace,"represented_distance")
value_xml = doc.createElementNS(namespace,"value")
um_xml = doc.createElementNS(namespace,"um")
value_xml.appendChild(doc.createTextNode(str(self.scale_pixel)))
um_xml.appendChild(doc.createTextNode(str("pixel")))
represented_xml.appendChild(value_xml)
represented_xml.appendChild(um_xml)
scale_xml.appendChild(represented_xml)
real_xml = doc.createElementNS(namespace,"real_distance")
value_xml = doc.createElementNS(namespace,"value")
um_xml = doc.createElementNS(namespace,"um")
value_xml.appendChild(doc.createTextNode(str(self.scale_meters)))
um_xml.appendChild(doc.createTextNode(str("pixel")))
real_xml.appendChild(value_xml)
real_xml.appendChild(um_xml)
scale_xml.appendChild(real_xml)
building_xml.appendChild(scale_xml)
info_xml = doc.createElementNS(namespace,"info")
if self.address or self.city or self.country :
location_xml = doc.createElementNS(namespace,"info")
if self.address :
address_xml = doc.createElementNS(namespace,"address")
address_xml.appendChild(doc.createTextNode(str(self.address)))
location_xml.appendChild(address_xml)
if self.city :
city_xml = doc.createElementNS(namespace,"city")
city_xml.appendChild(doc.createTextNode(str(self.city)))
location_xml.appendChild(city_xml)
if self.country :
country_xml = doc.createElementNS(namespace,"country")
country_xml.appendChild(doc.createTextNode(str(self.country)))
location_xml.appendChild(country_xml)
info_xml.appendChild(location_xml)
if self.construction_date :
construction_date_xml = doc.createElementNS(namespace,"construction_date")
construction_date_xml.appendChild(doc.createTextNode(str(self.construction_date)))
info_xml.appendChild(construction_date_xml)
building.appendChild(info_xml)
building_xml.setAttribute("id",str(self.id))
building_type_xml = doc.createElementNS(namespace,"building_type")
building_type_xml.appendChild(doc.createTextNode(str(self.type)))
building_xml.appendChild(building_type_xml)
for floor in self.floors :
building_xml.appendChild(floor.toxml(doc, namespace))
self.xml = building_xml
return self.xml
def plot(self, layout = None) :
#TODO.
# Metodi:
# STAMPO il layout di igraph
# STAMPO il layout dei centroidi
# Stampo il layout dei centroidi E le pareti
# Stampo le pareti.
# nel caso sa solo topologico...solo igraph vale.
pass
def add_floor(self,floor) :
if not floor in self.floor :
self.floor.append(floor)
else :
exit("Error - Floor added twice to the same building")
class topologicalMap(object) :
def __init__(self, node_id_list = None, label_list = None, label_RC_list = None, edge_list=None, colors = None) :
# se ho una lista di iid, aggiungo questi. altrimenti aggiungo len(label_list) nodi un numero progressivo come id.
# edge_list e' un array di tuple [ (id1, id2), (id2,id4) ] | |
"""Flexible code for histogramming per-snp and per-replica statistics for selected SNPs in selected replicas in
selected scenarios and/or demographies."""
from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios
from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, \
MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff
from Classes.DotData import DotData
from Operations.Ilya_Operations.PipeRun.python.PipeRun import GetDependsOn
from Operations.Shari_Operations.localize.PopConsts import AllFreqs, AllPops, AllAges, CAUSAL_POS
from Operations.IDotData import IDotData
import operator, os, logging, contextlib, functools, collections, types, ast
from itertools import izip
import itertools, string
from UserDict import DictMixin
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pp
import numpy as np
import math
import traceback as tb
__all__ = ( 'gatherCausalFreqs', 'DefineRulesTo_gatherCausalFreqs', 'histogramSnpStatistic', 'histogramReplicaStatistic',
'AddUpHistograms', 'GraphHistograms', 'GraphCumulPlots', 'DefineRulesTo_histogramSnpStatistic',
'DefineRulesTo_histogramReplicaStatistic', 'findReplicasMatchingConds', 'findSnpsMatchingConds',
'identifyReplicasMeetingConds', 'splitSnpStatsFile',
'DefineRulesTo_identifyReplicasMeetingCommonConds' )
def gatherCausalFreqs( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within one scenario, gather some useful summary info for each replica:
e.g. that replica's modern-day frequency of the causal allele, the genetic map position of the
causal SNP, number of SNPs in the replica, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = Ddata + '/' + simsOut + thinSfx + '/' + scen.scenDir()
statScenDir = Ddata + '/replicastats' + thinSfx + '/' + scen.scenDir()
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
okReplicas = 0
for replicaNum in range( nreplicas ):
if scen.is_neutral(): causalFreq = np.nan
else:
posFile = DotData( SVPath = posFileNames[ replicaNum ], SVSkipFirstLines = 1, SVHeader = False,
names = ['SNP','CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2' ] )
causalLine = posFile[ posFile.CHROM_POS == selpos ]
assert len( causalLine ) == 1
causalFreq = causalLine[0].FREQ1
causalAlleleFreqs.append( causalFreq )
replicaNums.append( replicaNum )
DotData( names = [ 'replicaNum', 'causalAlleleFreq', 'targetCausalFreq' ],
Columns = [ replicaNums, causalAlleleFreqs,
(( 0 if scen.isNeutral() else scen.mutFreq),)*nreplicas ] ).saveToSV( replicaInfoFileName )
def gatherReplicaGDstats( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within each scenario, gather some genetic map-related info for each replica:
e.g. the genetic map position of the
causal SNP, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = os.path.join( Ddata, simsOut + thinSfx, scen.scenDir() )
statScenDir = os.path.join( Ddata, 'replicastats' + thinSfx, scen.scenDir() )
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
def DefineRulesTo_gatherCausalFreqs( pr, Ddata, simsOut = 'simsOut',
mutAges = AllAges, mutPops = AllPops, mutFreqs = AllFreqs,
thinSfx = '', thinExt = '', nreplicas = 100 ):
"""Define rules to gather per-replica statistics"""
for scen in GetScenarios( mutAges, mutPops, mutFreqs ):
pr.addInvokeRule( invokeFn = gatherReplicaStats,
invokeArgs = Dict( 'scen Ddata simsOut thinSfx thinExt nreplicas' ) )
# for compatibility with old code
gatherReplicaStats = gatherCausalFreqs
DefineRulesTo_gatherReplicaStats = DefineRulesTo_gatherCausalFreqs
def histogramSnpStatistic( Ddata, thinSfx, scenDir, replicaTables, replicaCond, snpTables, snpCond, snpStat,
outFile, nreplicas, binSize, binShift = 0.0, sfx = None, scenSfx = None, getio = None ):
"""Compute histogram of $snpStat for snps matching $snpCond in replicas matching $replicaCond in scenario $scenDir.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
replicaTables = MakeSeq( replicaTables )
snpTables = MakeSeq( snpTables )
replicaCondExpr = compile_expr( replicaCond )
snpCondExpr = compile_expr( snpCond )
snpStatExpr = compile_expr( snpStat )
outFile = AddFileSfx( outFile, sfx )
outFileStats = AddFileSfx( outFile, 'stats' )
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenDir,
replicaTable + ( '.tsv' if '.' not in replicaTable else '' ) )
for replicaTable in replicaTables ]
snpTableFiles = [ os.path.join( Ddata, 'snpStats' + thinSfx, scenDir,
AddFileSfx( snpTable + ( '.tsv' if '.' not in snpTable else '' ),
scenSfx if isinstance( scenSfx, types.StringTypes )
else scenSfx[ os.path.splitext( snpTable )[0] ] ) )
for snpTable in snpTables ]
#dbg('replicaTableFiles snpTableFiles')
#dbg('"*****" replicaTableFiles+snpTableFiles')
replicaTableFiles = [ f + '/' if f.endswith('.data') else f for f in replicaTableFiles ]
snpTableFiles = [ f + '/' if f.endswith('.data') else f for f in snpTableFiles ]
snpTables = [ os.path.splitext(snpTable)[0] for snpTable in snpTables ]
if getio: return dict( depends_on = replicaTableFiles + snpTableFiles,
creates = ( outFile, AddFileSfx( outFile, 'stats' ) ),
attrs = Dict( 'scenDir snpCond replicaCond snpStat' ),
mediumRuleNameSfx = ( scenDir, scenSfx ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
replicasToUse = [ eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
#dbg( 'sum(replicasToUse)' )
snpTableVals = [ IDotData( SVPath = f ) for f in snpTableFiles ]
histogramBuilder = Histogrammer( binSize = binSize, binShift = binShift )
lastReplica = np.nan
for snpTableRows in izip( *snpTableVals ):
r0 = snpTableRows[ 0 ]
assert all([ r.Chrom == r0.Chrom for r in snpTableRows ]) or all([ np.isnan( r.Chrom ) for r in snpTableRows ])
assert all([ r.Pos == r0.Pos for r in snpTableRows ])
replica = int( r0.Chrom ) if not np.isnan( r0.Chrom ) else -1
useThisReplica = not replicaTables or replicasToUse[ replica ]
if replica != lastReplica: dbg( 'replica useThisReplica histogramBuilder.getNumVals()' )
if useThisReplica:
snpDict = dict( zip( snpTables, snpTableRows ) )
if eval( snpCondExpr, globals(), snpDict ):
val = eval( snpStatExpr, globals(), snpDict )
histogramBuilder.addVal( val )
lastReplica = replica
logging.info('saving histogram to ', outFile )
histogramBuilder.save( outFile )
def histogramReplicaStatistic( Ddata, thinSfx, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
replicaTables = None,
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findReplicasMatchingConds( showHeadings = 'val', showVals = replicaStat, **args ).val )
histogramBuilder.save( outFile )
def histogramSnpStatistic2( Ddata, thinSfx, snpTables, snpCond, snpCondSfx, replicaTables, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx, snpCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
finSnpsMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize | |
<filename>hugdatafast/fastai.py
from functools import partial
from pathlib import Path
import json
from tqdm import tqdm
from torch.nn.utils.rnn import pad_sequence
import datasets
from fastai.text.all import *
@delegates()
class MySortedDL(TfmdDL):
"A :class:`DataLoader` that do smart batching and dynamic padding. Different from :class:`SortedDL`, it automatically pad every attribute of samples, is able to filter samples, and can be cached to sort/filter only at first time."
def __init__(self, dataset, srtkey_fc=None, filter_fc=False, pad_idx=None, cache_file=None, **kwargs):
"""
Args:
dataset (HF_Dataset): Actually any object implements ``__len__`` and ``__getitem__`` that return a tuple as a sample.
srtkey_fc (``*args->int``, optional): Get key for decending sorting from a sample .\n
- If ``None``, sort by length of first element of a sample.
- If ``False``, not sort.
filter_fc (``*args->bool``, optional): Return ``True`` to keep the sample.
pad_idx (``int``, optional): pad each attribute of samples to the max length of its max length within the batch.\n
- If ``List[int]``, specify pad_idx for each attribute of a sample. e.g. a sample is a tuple (masked_inputs, labels), `pad_idx=[0 ,-100]` pad masked_inputs with 0, labels with -100.
- If ``False``, do no padding.
- If ``None``, try ``dataset.pad_idx``, do no padding if no such attribute.
cache_file (``str``, optional): Path of a json file to cache info for sorting and filtering.
kwargs: key arguments for `TfmDl` or `DataLoader`
Example:
>>> samples = [ (torch.tensor([1]), torch.tensor([7,8]), torch.tensor(1)),,
... (torch.tensor([2,3]), torch.tensor([9,10,11]), torch.tensor(2)),
... (torch.tensor([4,5,6]), torch.tensor([11,12,13,14]), torch.tensor(3)), ]
... dl = MySortedDL(samples,
... srtkey_fc=lambda *args: len(args[0]),
... filter_fc=lambda x1,y1: y1<3,
... pad_idx=-1,
... cache_file='/tmp/cache.json', # calls after this will load cache
... bs=999, # other parameters go to `TfmDL` and `DataLoader`
... )
... dl.one_batch()
(tensor([[ 2, 3],
[ 1, -1]]),
tensor([[ 9, 10, 11],
[ 7, 8, -1]]),
tensor([2, 1]))
"""
# Defaults
if srtkey_fc is not False: srtkey_fc = lambda *x: len(x[0])
if pad_idx is None: pad_idx = getattr(dataset, 'pad_idx', False)
if isinstance(pad_idx, int): pad_idxs = [pad_idx] * len(dataset[0])
elif isinstance(pad_idx, (list, tuple)): pad_idxs = pad_idx
cache_file = Path(cache_file) if cache_file else None
idmap = list(range(len(dataset)))
# Save attributes
super().__init__(dataset, **kwargs)
store_attr('pad_idxs,srtkey_fc,filter_fc,cache_file,idmap', self)
# Prepare records for sorting / filtered samples
if srtkey_fc or filter_fc:
if cache_file and cache_file.exists():
# load cache and check
with cache_file.open(mode='r') as f: cache = json.load(f)
idmap, srtkeys = cache['idmap'], cache['srtkeys']
if srtkey_fc:
assert srtkeys, "srtkey_fc is passed, but it seems you didn't sort samples when creating cache."
self.srtkeys = srtkeys
if filter_fc:
assert idmap, "filter_fc is passed, but it seems you didn't filter samples when creating cache."
self.idmap = idmap
else:
# overwrite idmap if filter, get sorting keys if sort
idmap = []; srtkeys = []
for i in tqdm(range_of(dataset), leave=False):
sample = self.do_item(i)
if filter_fc and not filter_fc(*sample): continue
if filter_fc: idmap.append(i)
if srtkey_fc: srtkeys.append(srtkey_fc(*sample))
if filter_fc: self.idmap = idmap
if srtkey_fc: self.srtkeys = srtkeys
# save to cache
if cache_file:
try:
with cache_file.open(mode='w+') as f: json.dump({'idmap':idmap,'srtkeys':srtkeys}, f)
except: os.remove(str(cache_file))
# an info for sorting
if srtkey_fc: self.idx_max = np.argmax(self.srtkeys)
# update number of samples
if filter_fc: self.n = self.n = len(self.idmap)
def create_item(self, i): return self.dataset[self.idmap[i]]
def create_batch(self, samples):
if self.pad_idx is False: return super().create_batch(samples)
return tuple( pad_sequence(attr, batch_first=True, padding_value=self.pad_idxs[i]) if attr[0].shape and isinstance(self.pad_idxs[i], int) else torch.stack(attr) for i, attr in enumerate(zip(*samples)))
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
if self.srtkey_fc: return sorted(idxs, key=lambda i: self.srtkeys[i], reverse=True)
return idxs
def shuffle_fn(self,idxs):
if not self.srtkey_fc: return super().shuffle_fn(idxs)
idxs = np.random.permutation(self.n)
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.srtkeys[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=np.int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
if 'get_idxs' in kwargs: # when Learner.get_preds, dataload has `get_idxs` will be cloned. So we need to prevent sorting again
kwargs['cache_file'] = self.cache_file
# We don't use filter_fc here cuz we can't don't validate certaion samples in dev/test set.
return super().new(dataset=dataset, pad_idx=self.pad_idx, srtkey_fc=self.srtkey_fc, filter_fc=False, **kwargs)
# =========================
# Titled primitives
# =========================
class _Int(int, ShowPrint):
def __new__(cls, *args, **kwargs):
item = super().__new__(cls, *args)
for n,v in kwargs.items(): setattr(item, n, v)
return item
class _Float(float, ShowPrint):
def __new__(cls, *args, **kwargs):
item = super().__new__(cls, *args)
for n,v in kwargs.items(): setattr(item, n, v)
return item
class _Str(str, ShowPrint):
def __new__(cls, *args, **kwargs):
item = super().__new__(cls, *args)
for n,v in kwargs.items(): setattr(item, n, v)
return item
class _Tuple(fastuple, ShowPrint):
def __new__(cls, *args, **kwargs):
item = super().__new__(cls, *args)
for n,v in kwargs.items(): setattr(item, n, v)
return item
class _L(L, ShowPrint):
def __new__(cls, *args, **kwargs):
item = super().__new__(cls, *args)
for n,v in kwargs.items(): setattr(item, n, v)
return item
# only change "label" to "title"
def _show_title(o, ax=None, ctx=None, title=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while title in ax: title += '_'
ax = ax.append(pd.Series({title: o}))
return ax
class _ShowTitle:
def show(self, ctx=None, **kwargs):
kwargs['title'] = kwargs.pop('title', getattr(self, 'title', self.default_title))
return _show_title(str(self), ctx=ctx, **kwargs)
# it seems that python prioritising prior inherited class when finding methods
class _TitledInt(_ShowTitle, _Int): default_title = 'int'
class _TitledFloat(_ShowTitle, _Float): default_title = 'float'
# I created it, but it just print book likt int, haven't find a way to solve it
class _TitledBool(_ShowTitle, _Int): # python says bool can't be base class
default_title = 'bool'
class _TitledStr(_ShowTitle, _Str):
default_title = 'text'
def truncate(self, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return _TitledStr(' '.join(words), title=getattr(self, 'title', 'text'))
class _TitledTuple(_ShowTitle, _Tuple): default_title = 'list'
class _Category(_ShowTitle, _Str): default_title = 'label'
class _MultiCategory(_ShowTitle, _L):
default_title = 'labels'
def show(self, ctx=None, sep=';', color='black', **kwargs):
kwargs['title'] = kwargs.pop('title', getattr(self, 'title', self.default_title))
return _show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
""" Caution !!
These two function is inperfect.
But they cope with mutiple input columns problem (n_inp >1), which cause no df printing but just sequentail print
These will be a problem when you are doing non-text problem with n_inp > 1 (multiple input column),
which shouldn't be the case of huggingface/datasets user.
And I hope fastai come up with a good solution to show_batch multiple inputs problems for text/non-text.
"""
@typedispatch
def show_batch(x:tuple, y, samples, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
@typedispatch
def show_results(x: tuple, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
class HF_Dataset():
"""A wrapper for :class:`datasets.Dataset`. It will behavior like original :class:`datasets.Dataset`,
but also function as a :class:`fastai.data.core.datasets` that provides samples and decodes."""
def __init__(self, hf_dset, cols=None, hf_toker=None, neat_show=False, n_inp=1):
"""
Args:
hf_dset (:class:`datasets.Dataset`): Prerocessed Hugging Face dataset to be wrapped.
cols (dict, optional): columns of :class:`datasets.Dataset` to be used to construct samples, and (optionally) semantic tensor type for each of those columns to decode.\n
- cols(``Dict[Fastai Semantic Tensor]``): encode/decode column(key) with semantic tensor type(value). If {value} is ``noop``, semantic tensor of the column is by default `TensorTuple`.
- cols(``list[str]``): specify only columns and take default setting for semantic tensor type of them.\n
- if length is 1, regard the 1st element as `TensorText`
- if length is 2, regard the 1st element as `TensorText`, 2nd element as `TensorCategory`
- Otherwise, regard all elements as `TensorTuple`
- cols(None): pass :data:`hf_dset.column_names` (list[str]) as cols.
hf_toker (:class:`transformers.PreTrainedTokenizer`, optional): Hugging Face tokenizer, used in decode and provide ``pad_idx`` for dynamic padding
neat_show (bool, optional): Show the original sentence instead of tokens joined by space.
n_inp (int, optional): take the first ``n_inp`` columns of ``cols`` as x, and the rest as y .
Example:
>>> tokenized_cola_train_set[0]
{'sentence': "Our friends won't buy this analysis, let alone the next one we propose.",
'label': 1,
'idx': 0,
| |
A 470 152.647 104.508 -64.137 1.00 97.78 N
ATOM 31 CA ASP A 470 153.692 105.026 -65.011 1.00 92.19 C
ATOM 32 C ASP A 470 153.455 104.600 -66.454 1.00 84.38 C
ATOM 33 O ASP A 470 153.701 105.375 -67.384 1.00 87.02 O
ATOM 34 CB ASP A 470 155.069 104.557 -64.536 1.00 87.01 C
ATOM 35 CG ASP A 470 156.186 105.479 -64.984 1.00 77.46 C
ATOM 36 OD1 ASP A 470 155.882 106.556 -65.539 1.00 85.03 O
ATOM 37 OD2 ASP A 470 157.367 105.128 -64.780 1.00 71.40 O
ATOM 38 N ILE A 471 152.981 103.370 -66.652 1.00 79.41 N
ATOM 39 CA ILE A 471 152.685 102.866 -67.989 1.00 79.85 C
ATOM 40 C ILE A 471 151.489 103.616 -68.562 1.00 84.73 C
ATOM 41 O ILE A 471 151.480 103.984 -69.743 1.00 88.84 O
ATOM 42 CB ILE A 471 152.431 101.346 -67.963 1.00 74.88 C
ATOM 43 CG1 ILE A 471 153.749 100.589 -67.786 1.00 82.61 C
ATOM 44 CG2 ILE A 471 151.724 100.891 -69.231 1.00 64.21 C
ATOM 45 CD1 ILE A 471 154.742 100.822 -68.904 1.00 74.09 C
ATOM 46 N TYR A 472 150.482 103.863 -67.729 1.00 80.60 N
ATOM 47 CA TYR A 472 149.304 104.610 -68.155 1.00 75.53 C
ATOM 48 C TYR A 472 149.643 106.074 -68.413 1.00 52.97 C
ATOM 49 O TYR A 472 149.190 106.662 -69.396 1.00 41.97 O
ATOM 50 CB TYR A 472 148.195 104.503 -67.106 1.00 73.74 C
ATOM 51 CG TYR A 472 146.901 105.173 -67.512 1.00 67.57 C
ATOM 52 CD1 TYR A 472 146.117 104.650 -68.532 1.00 64.47 C
ATOM 53 CD2 TYR A 472 146.464 106.327 -66.875 1.00 67.13 C
ATOM 54 CE1 TYR A 472 144.934 105.258 -68.907 1.00 69.28 C
ATOM 55 CE2 TYR A 472 145.282 106.942 -67.243 1.00 73.64 C
ATOM 56 CZ TYR A 472 144.521 106.403 -68.259 1.00 73.07 C
ATOM 57 OH TYR A 472 143.344 107.012 -68.629 1.00 67.60 O
TER
"""
tst_04_start_lines = """\
CRYST1 100.000 100.000 100.000 90.00 90.00 90.00 P 1
ATOM 1 N ALA A 21 8.035 20.299 4.150 1.00 33.96 N
ATOM 2 CA ALA A 21 9.085 20.780 5.040 1.00 32.69 C
ATOM 3 C ALA A 21 9.114 19.980 6.338 1.00 32.55 C
ATOM 4 O ALA A 21 8.257 20.152 7.204 1.00 33.56 O
ATOM 5 CB ALA A 21 8.893 22.260 5.334 1.00 33.18 C
ATOM 6 N UNK A 22 10.106 19.104 6.465 1.00 31.24 N
ATOM 7 CA UNK A 22 10.248 18.275 7.655 1.00 30.10 C
ATOM 8 C UNK A 22 11.377 18.783 8.546 1.00 29.32 C
ATOM 9 O UNK A 22 12.549 18.724 8.173 1.00 30.19 O
ATOM 10 CB UNK A 22 10.491 16.826 7.266 1.00 30.34 C
ATOM 11 CG UNK A 22 10.006 16.721 7.661 1.00 20.00 C
ATOM 12 N ALA A 23 11.017 19.283 9.723 1.00 27.76 N
ATOM 13 CA ALA A 23 11.998 19.805 10.666 1.00 26.26 C
ATOM 14 C ALA A 23 11.854 19.144 12.033 1.00 23.38 C
ATOM 15 O ALA A 23 11.027 18.251 12.217 1.00 25.31 O
ATOM 16 CB ALA A 23 11.863 21.315 10.790 1.00 27.05 C
ATOM 17 N ALA A 24 12.663 19.590 12.988 1.00 19.15 N
ATOM 18 CA ALA A 24 12.633 19.039 14.337 1.00 17.40 C
ATOM 19 C ALA A 24 11.922 19.980 15.304 1.00 15.24 C
ATOM 20 O ALA A 24 12.393 21.086 15.567 1.00 15.15 O
ATOM 21 CB ALA A 24 14.044 18.750 14.822 1.00 16.09 C
"""
tst_04_answer_lines = """\
CRYST1 100.000 100.000 100.000 90.00 90.00 90.00 P 1
ATOM 1 N ALA A 21 9.842 22.066 6.712 1.00 33.96 N
ATOM 2 CA ALA A 21 8.824 21.158 6.199 1.00 32.69 C
ATOM 3 C ALA A 21 8.841 19.833 6.953 1.00 32.55 C
ATOM 4 O ALA A 21 7.792 19.247 7.219 1.00 33.56 O
ATOM 5 CB ALA A 21 9.026 20.923 4.710 1.00 33.18 C
ATOM 6 N UNK A 22 10.038 19.366 7.297 1.00 31.24 N
ATOM 7 CA UNK A 22 10.187 18.059 7.925 1.00 30.10 C
ATOM 8 C UNK A 22 11.176 18.063 9.092 1.00 29.32 C
ATOM 9 O UNK A 22 10.991 17.321 10.057 1.00 30.19 O
ATOM 10 CB UNK A 22 10.610 17.019 6.882 1.00 30.34 C
ATOM 11 CG UNK A 22 10.658 15.590 7.400 1.00 20.00 C
ATOM 12 N ALA A 23 12.213 18.891 9.011 1.00 27.76 N
ATOM 13 CA ALA A 23 13.279 18.849 10.010 1.00 26.26 C
ATOM 14 C ALA A 23 12.837 19.350 11.384 1.00 23.38 C
ATOM 15 O ALA A 23 13.231 18.793 12.408 1.00 25.31 O
ATOM 16 CB ALA A 23 14.477 19.653 9.524 1.00 27.05 C
ATOM 17 N ALA A 24 12.018 20.397 11.404 1.00 19.15 N
ATOM 18 CA ALA A 24 11.559 20.982 12.660 1.00 17.40 C
ATOM 19 C ALA A 24 10.649 20.028 13.426 1.00 15.24 C
ATOM 20 O ALA A 24 10.774 19.881 14.642 1.00 15.15 O
ATOM 21 CB ALA A 24 10.842 22.298 12.396 1.00 16.09 C
TER
"""
def get_distances(h, n_neighbours=None):
d = flex.double()
for i, a in enumerate(h.atoms()):
#for j in range(i, min(len(h.atoms()), i+10)):
if n_neighbours is not None:
r = range(i, min(len(h.atoms()), i+10))
else:
r = range(i, len(h.atoms()))
for j in r:
d.append(a.distance(h.atoms()[j]))
return d
ssb_params_norot = ssb.master_phil.extract()
ssb_params_norot.ss_idealization.fix_rotamer_outliers=False
ssb_params_norot.ss_idealization.enabled=True
def exercise_pure_polyala_alpha(prefix="tst_2_ex_ppa_"):
"""
Simple polyala one helix case.
"""
h_records = """\
HELIX 1 1 ALA A 1 ALA A 20 1 20
"""
import sys # import dependency
h = ssb.secondary_structure_from_sequence(ssb.alpha_helix_str,"A"*20)
h.write_pdb_file(file_name=prefix+'h0.pdb')
d1 = get_distances(h)
ann = ioss.annotation.from_records(records=h_records.split('\n'))
pdb_inp = iotbx.pdb.input(source_info=None,
lines=h.as_pdb_string())
model = mmtbx.model.manager(
model_input = pdb_inp)
model.process(make_restraints=True)
model.set_ss_annotation(ann)
# test_h = pdb_inp.construct_hierarchy()
for i in range(3):
rm = ssb.substitute_ss(
model = model,
params = ssb_params_norot.ss_idealization)
model.get_hierarchy().write_pdb_file(file_name=prefix+'%d.pdb' % i)
model.get_hierarchy().write_pdb_file(file_name=prefix+'h1.pdb')
d2 = get_distances(model.get_hierarchy())
dist = abs(d2-d1)
dmmm = abs(d2-d1).min_max_mean().as_tuple()
print("minmaxmean sd", dmmm, abs(d2-d1).standard_deviation_of_the_sample())
assert dmmm[1] < 0.65, dmmm[1]
assert dmmm[2] < 0.17, dmmm[2]
assert dist.standard_deviation_of_the_sample() < 0.15, dist.standard_deviation_of_the_sample()
def exercise_00(prefix="tst_2_exercise_00"):
"""
2 alpha helices and loop between them
seems that this test is bad.
"""
h_records = """\
HELIX 1 1 PRO A 3 ALA A 21 1 19
HELIX 2 2 ARG A 23 GLN A 44 1 22
"""
pdb_inp = iotbx.pdb.input(source_info=None,
lines=tst_00_start_lines)
ann = ioss.annotation.from_records(records=h_records.split('\n'))
model = mmtbx.model.manager(
model_input = pdb_inp)
model.process(make_restraints=True)
model.set_ss_annotation(ann)
d1 = get_distances(model.get_hierarchy())
# model.get_hierarchy().write_pdb_file(file_name=prefix+'_initial.pdb')
for i in range(1):
rm = ssb.substitute_ss(
model,
)
d2 = get_distances(model.get_hierarchy())
# model.get_hierarchy().write_pdb_file(file_name=prefix+'_result.pdb')
dist = abs(d2-d1)
dmmm = abs(d2-d1).min_max_mean().as_tuple()
print("minmaxmean sd", dmmm, abs(d2-d1).standard_deviation_of_the_sample())
assert dmmm[1] < 1.5, dmmm[1]
assert dmmm[2] < 0.2, dmmm[2]
assert dist.standard_deviation_of_the_sample() < 0.2
def exercise_01(prefix="tst_2_exercise_01"):
"""
Similar to exercise_00. + Side chains.
"""
h_records = """\
HELIX 1 1 PRO A 3 ALA A 21 1 19
HELIX 2 2 ARG A 23 GLN A 44 1 22
"""
pdb_inp = iotbx.pdb.input(source_info=None,
lines=tst_01_start_lines)
ann = ioss.annotation.from_records(records=h_records.split('\n'))
model = mmtbx.model.manager(
model_input = pdb_inp)
model.process(make_restraints=True)
model.set_ss_annotation(ann)
d1 = get_distances(model.get_hierarchy(), n_neighbours=20)
# model.get_hierarchy().write_pdb_file(file_name=prefix+'_initial.pdb')
for i in range(1):
rm = ssb.substitute_ss(
model,
params = ssb_params_norot.ss_idealization)
d2 = get_distances(model.get_hierarchy(), n_neighbours=20)
# model.get_hierarchy().write_pdb_file(file_name=prefix+'_result.pdb')
dist = abs(d2-d1)
dmmm = abs(d2-d1).min_max_mean().as_tuple()
print("minmaxmean sd", dmmm, abs(d2-d1).standard_deviation_of_the_sample())
# assert dmmm[1] < 0.8
assert dmmm[2] < 0.2, dmmm[2]
# print dist.standard_deviation_of_the_sample()
assert dist.standard_deviation_of_the_sample() < 0.25, dist.standard_deviation_of_the_sample()
def exercise_02(prefix="tst_2_exercise_02"):
"""
Similar to exercise_01. +beta sheets.
"""
h_records = """\
HELIX 1 1 ARG A 65 ARG A 71 1 7
SHEET 1 AA 2 PHE A 42 CYS A 47 0
SHEET 2 AA 2 CYS A 52 GLY A 57 -1 O LYS A 53 N TYR A 46
"""
pdb_inp = iotbx.pdb.input(source_info=None,
lines=tst_02_start_lines)
ann = ioss.annotation.from_records(records=h_records.split('\n'))
model = mmtbx.model.manager(
model_input = pdb_inp)
model.process(make_restraints=True)
model.set_ss_annotation(ann)
d1 = get_distances(model.get_hierarchy(), n_neighbours=20)
# h.write_pdb_file(file_name=prefix+'_initial.pdb')
for i in range(3):
rm = ssb.substitute_ss(
model,
params = ssb_params_norot.ss_idealization)
d2 = get_distances(model.get_hierarchy(), n_neighbours=20)
# h.write_pdb_file(file_name=prefix+'_result.pdb')
dist = abs(d2-d1)
dmmm = abs(d2-d1).min_max_mean().as_tuple()
print("minmaxmean sd", dmmm, abs(d2-d1).standard_deviation_of_the_sample())
#assert dmmm[1] < 0.8
assert dmmm[2] < 0.1
assert dist.standard_deviation_of_the_sample() < 0.2, dist.standard_deviation_of_the_sample()
def exercise_03(prefix="tst_2_exercise_03"):
"""
Mixed order of atoms in pdb file
"""
h_records = """\
HELIX 13 13 SER A 466 TYR A 472 1 7
"""
pdb_inp = iotbx.pdb.input(source_info=None, lines=tst_03_start_lines)
ann = ioss.annotation.from_records(records=h_records.split('\n'))
model = mmtbx.model.manager(
model_input = pdb_inp)
model.process(make_restraints=True)
model.set_ss_annotation(ann)
# model.get_hierarchy().write_pdb_file(file_name="start.pdb")
rm = ssb.substitute_ss(
model,
params = ssb_params_norot.ss_idealization)
rm.run()
# model.get_hierarchy().write_pdb_file(file_name="result.pdb")
| |
if passNum < 3:
std_threshold2 = std_threshold2*0.9
pre_window2 = pre_window2*1.75
passNum = passNum+1
event_time = nextPass
for Sidx,val in enumerate(event_idx):
whole_trace[int(val[0]):int(event_fall[Sidx,1])] = np.nan
else:
keepLokking = False
for passes in results.keys():
for st, en in results[passes]['event_idx']:
caOnset[roi,int(st):int(en)+1] = 1
for st, en in results[passes]['event_fall']:
caFall[roi,int(st):int(en)+1] = 1
return caOnset,caFall
def windowed_view(arr, window, overlap):
arr = np.asarray(arr)
window_step = window - overlap
new_shape = arr.shape[:-1] + ((arr.shape[-1] - overlap) // window_step,
window)
new_strides = (arr.strides[:-1] + (window_step * arr.strides[-1],) +
arr.strides[-1:])
return as_strided(arr, shape=new_shape, strides=new_strides)
def getAlignedSpeed(cellType,cre = None, mice = None, period = None, day=None,savePath = '/home/dana_z/ssd_2TB/6OHDA/speed2ca/'):
# function that take in the classification and return the appropreate data:
#Inputs:
# cellType - return MSN or CRE if both pass ['MNS','CRE']
# mice - (Optional) list of mice from to include. Default: None - will load data for all mice
# period - (Optional) either 'Pre' or 'Post'. difault: None - return full length of data from picked sessions
# day - (Optional) lambda function with logic for picking days. Default: None - ignore day attr when picking data
# NOTE: day will be ignored if period is specified
# cre - (Optional) which cre mouse is it. options:None (default), "PV", "CHI"
# must have trace included in dataType list to be taken into account
# WinPre - (Optional) length of pre window in secounds (default 2)
# WinPost - (Optional) length of post window in secounds (default 2)
#Output:
# data - the requested data. format: {mice_session:{dataType:data}}
dFile = 'FinalData_6OHDA_H.h5'
# double check parameters inputs are valid:
df = pd.read_csv(savePath+'sessions')
if period == None and day != None and isinstance(day,type(lambda c:None)):
df['keep'] = df.apply(lambda row: day(row.day), axis=1)
df = df[(df.keep==True)]
if period in ['Healthy','Day 1-4','Day 5-12','Day 13-20','One Month']:
df = df[(df.period==period)]
if cre in ['PV','CHI','NA']:
df = df[(df.cre==cre)]
if not isinstance(cellType,list):
cellType = [cellType]
cellType = list(set(cellType).intersection(set(['MSN','CRE'])))
if len(cellType) == 0:
raise ValueError('Not a valid cellType value. cellType must be in ["MSN","CRE"]')
# traverse the hdf5 file:
if mice == None:
mice = getMiceList(dFile)
elif not isinstance(mice,list):
mice = [mice]
if not isinstance(mice[0],str):
for m in range(0,len(mice)):
mice[m] = str(mice[m])
df = df[(df.mouse.isin(mice))]
# start extracting the data:
# alllocate memory:
nNeurons = 0;
if 'MSN' in cellType:
nNeurons = nNeurons + int(df.numMsn.sum()) - int(df.numred.sum())
if 'CRE' in cellType:
nNeurons = nNeurons + int(df.numred.sum())
# print(df, nNeurons)
dResult = np.empty([80,nNeurons],dtype=float)
ind = 0
for sess in df.sess.unique():
if 'MSN' in cellType:
try:
tempD = pickle.load(open(savePath+'MSN/'+sess,'rb'))
except:
print('ignored ',sess)
continue
# tempD = np.squeeze(tempD)
# print(tempD.shape,ind,ind+tempD.shape[1])
dResult[:,ind:ind+tempD.shape[1]] = tempD
ind = ind+tempD.shape[1]
# for every Cre neuron:
if 'CRE' in cellType:
try:
tempD = pickle.load(open(savePath+'CRE/'+sess,'rb'))
except:
continue
# tempD = np.squeeze(tempD)
dResult[:,ind:ind+tempD.shape[1]] = tempD
ind = ind+tempD.shape[1]
return dResult[:,:ind],df
def getDrugFromSess(sess):
if sess[-1] == 'A':
return 'Amph'
elif sess[-1] == 'L':
return 'L-Dopa'
elif sess[-1] == 'S':
return 'Saline'
else:
return'None'
def getAlignedLFP(cellType,cre = None, mice = None, period = None, day=None, drug=None,drugPeriod='Pre'):
# function that take in the classification and return the appropreate data:
#Inputs:
# cellType - return MSN or CRE if both pass ['MNS','CRE']
# mice - (Optional) list of mice from to include. Default: None - will load data for all mice
# period - (Optional) either 'Pre' or 'Post'. difault: None - return full length of data from picked sessions
# day - (Optional) lambda function with logic for picking days. Default: None - ignore day attr when picking data
# NOTE: day will be ignored if period is specified
# cre - (Optional) which cre mouse is it. options:None (default), "PV", "CHI"
# must have trace included in dataType list to be taken into account
# WinPre - (Optional) length of pre window in secounds (default 2)
# WinPost - (Optional) length of post window in secounds (default 2)
#Output:
# data - the requested data. format: {mice_session:{dataType:data}}
dFile = 'FinalData_6OHDA_H.h5'
# double check parameters inputs are valid:
if drugPeriod=='Post':
savePath = '/home/dana_z/HD1/lfp2ca_notNormalize/'#'/home/dana_z/HD1/lfpAligned2Ca/Post/'
else:
savePath = '/home/dana_z/HD1/lfp2ca_notNormalize/'#'/home/dana_z/HD1/lfpAligned2Ca/Pre/'
df = pd.read_csv(savePath+'sessions')
if period == None and day != None and isinstance(day,type(lambda c:None)):
df['keep'] = df.apply(lambda row: day(row.day), axis=1)
df = df[(df.keep==True)]
if period in ['Healthy','Day 1-4','Day 5-12','Day 13-20','One Month']:
df = df[(df.period==period)]
if cre in ['PV','CHI','NA']:
df = df[(df.cre==cre)]
if drug in ['Amph','L-Dopa','Saline','None']:
df = df[(df.drug==drug)]
if not isinstance(cellType,list):
cellType = [cellType]
cellType = list(set(cellType).intersection(set(['MSN','CRE'])))
if len(cellType) == 0:
raise ValueError('Not a valid cellType value. cellType must be in ["MSN","CRE"]')
# traverse the hdf5 file:
if mice == None:
mice = getMiceList(dFile)
elif not isinstance(mice,list):
mice = [mice]
if not isinstance(mice[0],str):
for m in range(0,len(mice)):
mice[m] = str(mice[m])
df = df[(df.mouse.isin(mice))]
# start extracting the data:
# alllocate memory:
nNeurons = 0;
if 'MSN' in cellType:
nNeurons = nNeurons + int(df.numMsn.sum()) - int(df.numred.sum())
if 'CRE' in cellType:
nNeurons = nNeurons + int(df.numred.sum())
dResult = np.empty([12206,87,nNeurons],dtype=float)
ind = 0
for sess in df.sess.unique():
if 'MSN' in cellType:
tempD = pickle.load(open(savePath+'MSN/'+sess,'rb'))
tempD[tempD==9999] = np.nan
tempD[tempD==-9999] = np.nan
dResult[:,:,ind:ind+tempD.shape[2]] = tempD
ind = ind+tempD.shape[2]
# for every Cre neuron:
if 'CRE' in cellType:
try:
tempD = pickle.load(open(savePath+'CRE/'+sess,'rb'))
except:
continue
tempD[tempD==9999] = np.nan
tempD[tempD==-9999] = np.nan
dResult[:,:,ind:ind+tempD.shape[2]] = tempD
ind = ind+tempD.shape[2]
return dResult[:,:,:ind],df
def getRotPeriods(ax,rot,dt,th,lth,dataPoints,Color = {'hiAC':'mediumseagreen','hiC':'limegreen','lo':'tomato'},plt=True):
Sdata = {}
#find movment onset:
lrot = smooth(rot, dataPoints/2)
hiC_rot = lrot >= th
hiAC_rot = lrot <= -th
lo_rot = (lrot <= lth) & (lrot >= -lth)
# hiRot = hiRot.T
dhiC = np.diff(1* hiC_rot)
dhiAC = np.diff(1* hiAC_rot)
dlo = np.diff(1* lo_rot)
segments = {'hiAC':{'start':[],'end':[]},'hiC':{'start':[],'end':[]},'lo':{'start':[],'end':[]}}
for cond in segments.keys():
hiRot = eval(cond+'_rot')
d = eval('d'+cond)
if hiRot[0] ==1:
tStart = 0
else:
tStart = None
for l in range(0,len(d)):
if d[l] == 1:
tStart = l
if d[l] == -1 and l-tStart > dataPoints:
segments[cond]['start'].append(tStart)
segments[cond]['end'].append(l)
tStart = None
if tStart is not None and np.sum(hiRot[tStart:]) > dataPoints:
segments[cond]['start'].append(tStart)
segments[cond]['end'].append(len(hiRot)-1)
Sdata[cond] = segments[cond]
# plot rot vd. onset:
t = np.linspace(0,len(rot)*dt,len(rot))
ax.plot(t,rot,color='black')
# Fix that to draw all thress periods in diff colors
if plt:
for l in range(0, len(segments[cond]['start'])):
ax.axvspan(t[segments[cond]['start'][l]], t[segments[cond]['end'][l]], color= Color[cond], alpha=0.5)
return Sdata
def periodCalcOld(day):
if day== 0:
return 'Healthy'
elif day<5:
return 'Day 1-4'
elif day<13:
return 'Day 5-12'
elif day<21:
return 'Day 13-20'
else:
return 'One Month'
def periodCalc(day):
if day== 0:
return 'Healthy'
elif day<13:
return 'Day 1-13'
else:
return 'Day 14-35'
def calcLFPAlign2Mvmt(savePath,mvmt='speed',onsetName='mvmtOnset',norAxis=1):
# function used to calculte LFP aligned to mvmt onset, or rotation onset 6/15/2020
Files = ['FinalData_6OHDA.h5','FinalData_6OHDA_H.h5','FinalData_6OHDA_H_skip.h5','FinalData_6OHDA_skip.h5']
miceList = getMiceList(Files[0])
f = h5py.File('Spectograms.hdf5','r') #LFP coeffs
# constents for analysis:
WinPre = 2 #s
WinPost = 2 #s
df = pd.DataFrame(columns=['mouse','sess','day','period','cre'])
for m in miceList:
data = getData(Files[0],[mvmt,'lfp'],period ='Pre', mice=m)
cre = getCreType(Files[1],m)
for sess in tqdm(data.keys()):
if sess[5] == 'B':
day = 0
else:
day = int(re.findall(r'\d+',sess[5:])[0])
# get data
speedOnset = getOnsetOrPeriod(m,sess,'Pre',onsetName)
if np.sum(speedOnset)==0:
print('no onset in sess: ',sess)
continue
# rotOnset = getOnsetOrPeriod(m,sess,'Pre','rotOnset')
coeff = np.abs(f[m][sess]['Pre']['coeff'].value)
lfpOutliers = removeLFPOutliers(data[sess]['lfp']['lfp'], sess)
try:
coeff[:,(lfpOutliers[:,0]==1)] = np.nan
if norAxis == 1:
coeff = coeff.T/np.nansum(coeff,axis=norAxis) # So that axis[0] is the time axis + normalize power in frequency per sesion
else:
coeff = coeff/np.nansum(coeff,axis=norAxis)
coeff = coeff.T
except:
print(sess)
continue
# add session to df, so can be retrived
dtS = float(1/data[sess][mvmt]['Fs'])
dtL = float(1/data[sess]['lfp']['FS'])
ts = np.arange(0, np.max(speedOnset.shape)) * dtS
tl = np.arange(0, np.max(data[sess]['lfp']['lfp'].shape)) * dtL
tPlot = np.linspace(-WinPre,WinPost,int((WinPre+WinPost)/dtL))
# convert onset to LFP time:
onsets = np.full_like(tl,False)
for si in ts[speedOnset]:
ti = np.argmin(np.abs(tl-si))
onsets[ti] = True
onsets = onsets.astype(bool)
# for every Cre neuron:
al = alignToOnset(coeff,onsets, winPost =WinPost/dtL, winPre =WinPre/dtL)
if al.ndim <3:
try:
al = np.reshape(al,(al.shape[0],al.shape[1],1))
except:
print('no onset, when there should be in sess= | |
import math
import random
import torch.nn as nn
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.models.fsmt.modeling_fsmt import (
SinusoidalPositionalEmbedding,
EncoderLayer,
DecoderLayer,
FSMTModel as _FSMTModel,
FSMTForConditionalGeneration as _FSMTForConditionalGeneration,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
# Seq2SeqLMOutput,
# Seq2SeqModelOutput,
)
from transformers.deepspeed import is_deepspeed_zero3_enabled
def invert_mask(attention_mask):
"""Turns 1->0, 0->1, False->True, True-> False"""
assert attention_mask.dim() == 2
return attention_mask.eq(0)
class FSMTConfig(FSMTConfig):
def __init__(
self,
encoder_pre_layernorm=False,
decoder_pre_layernorm=False,
**kwargs,
):
self.encoder_pre_layernorm = encoder_pre_layernorm
self.decoder_pre_layernorm = decoder_pre_layernorm
super().__init__(**kwargs)
class FSMTEncoderLayer(EncoderLayer):
def __init__(self, config: FSMTConfig):
super().__init__(config)
self.pre_layernorm = config.encoder_pre_layernorm
def forward(
self, x, encoder_padding_mask, layer_head_mask, output_attentions=False
):
"""
Args:
x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape
*(batch, src_len)* where padding elements are indicated by `1`.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(config.encoder_attention_heads,)*.
Returns:
encoded output of shape *(seq_len, batch, embed_dim)*
"""
residual = x
if self.pre_layernorm:
x = self.self_attn_layer_norm(x)
x, attn_weights = self.self_attn(
query=x,
key=x,
key_padding_mask=encoder_padding_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.self_attn_layer_norm(x)
residual = x
if self.pre_layernorm:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.final_layer_norm(x)
return x, attn_weights
class FSMTDecoderLayer(DecoderLayer):
def __init__(self, config: FSMTConfig):
super().__init__(config)
self.pre_layernorm = config.decoder_pre_layernorm
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
decoder_padding_mask=None,
output_attentions=False,
):
residual = x
if layer_state is None:
layer_state = {}
if self.pre_layernorm:
x = self.self_attn_layer_norm(x)
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
if self.pre_layernorm:
x = self.encoder_attn_layer_norm(x)
x, cross_attn_weights = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
layer_head_mask=cross_attn_layer_head_mask,
output_attentions=output_attentions,
)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
if self.pre_layernorm:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
cross_attn_weights,
) # layer_state = cache for decoding
class FSMTEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`].
Args:
config: FSMTConfig
"""
def __init__(self, config: FSMTConfig, embed_tokens):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList(
[FSMTEncoderLayer(config) for _ in range(config.encoder_layers)]
)
self.pre_layernorm = config.encoder_pre_layernorm
if self.pre_layernorm:
self.layer_norm = nn.LayerNorm(embed_dim)
def forward(
self,
input_ids,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
"""
Args:
input_ids (`torch.LongTensor`): tokens in the source language of shape
*(batch, src_len)*
attention_mask (`torch.LongTensor`): indicating which indices are padding tokens
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Returns:
BaseModelOutput or Tuple comprised of:
- **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*
- **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len,
batch, embed_dim)*. Only populated if *output_hidden_states:* is True.
- **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
x = x.transpose(0, 1) # T x B x C -> B x T x C
encoder_states += (x,)
x = x.transpose(0, 1) # B x T x C -> T x B x C
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (
dropout_probability < self.layerdrop
): # skip the layer
attn = None
else:
x, attn = encoder_layer(
x,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
if output_attentions:
all_attentions = all_attentions + (attn,)
if self.pre_layernorm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if output_hidden_states:
encoder_states += (x,)
if not return_dict:
return tuple(
v for v in [x, encoder_states, all_attentions] if v is not None
)
return BaseModelOutput(
last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions
)
class FSMTDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
Args:
config: FSMTConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
embed_dim = embed_tokens.embedding_dim
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList(
[FSMTDecoderLayer(config) for _ in range(config.decoder_layers)]
)
self.pre_layernorm = config.decoder_pre_layernorm
if self.pre_layernorm:
self.layer_norm = nn.LayerNorm(embed_dim)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(
self.embed_tokens.weight, modifier_rank=None
):
embed_tokens_weight_shape = self.embed_tokens.weight.shape
else:
embed_tokens_weight_shape = self.embed_tokens.weight.shape
self.output_projection = nn.Linear(
embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False
)
self.output_projection.weight = self.embed_tokens.weight
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
"""
Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
EMNLP 2019).
Args:
input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
previous decoder outputs for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape *(batch, tgt_len, embed_dim)*
- the cache
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
# embed positions
positions = self.embed_positions(input_ids) # , use_cache=use_cache)
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
# assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids) * self.embed_scale
x += positions
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
# Convert to FSMT output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
next_decoder_cache = []
# check if head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip(
[head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]
):
if attn_mask is not None:
assert attn_mask.size()[0] == len(self.layers), (
f"The `{mask_name}` should be specified for {len(self.layers)} layers, "
f"but it is for {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
dropout_probability = random.uniform(0, | |
<gh_stars>0
input = """bot 119 gives low to bot 18 and high to bot 3
bot 69 gives low to bot 47 and high to bot 172
bot 51 gives low to output 6 and high to bot 174
bot 11 gives low to bot 94 and high to bot 165
bot 42 gives low to bot 0 and high to bot 95
bot 195 gives low to bot 69 and high to bot 99
bot 96 gives low to bot 169 and high to bot 85
bot 24 gives low to bot 119 and high to bot 44
bot 114 gives low to output 5 and high to bot 43
bot 13 gives low to bot 203 and high to bot 197
bot 20 gives low to bot 146 and high to bot 164
bot 127 gives low to bot 132 and high to bot 11
bot 50 gives low to bot 80 and high to bot 148
bot 77 gives low to bot 35 and high to bot 67
bot 151 gives low to output 16 and high to bot 68
bot 72 gives low to bot 130 and high to bot 88
bot 89 gives low to bot 184 and high to bot 170
bot 158 gives low to bot 178 and high to bot 29
bot 85 gives low to bot 93 and high to bot 191
bot 6 gives low to bot 159 and high to bot 7
bot 156 gives low to bot 25 and high to bot 16
bot 177 gives low to output 8 and high to bot 157
bot 26 gives low to bot 131 and high to bot 149
value 61 goes to bot 119
bot 18 gives low to bot 161 and high to bot 118
bot 53 gives low to bot 128 and high to bot 74
bot 110 gives low to bot 126 and high to bot 90
bot 95 gives low to bot 52 and high to bot 62
bot 67 gives low to bot 121 and high to bot 156
bot 138 gives low to bot 151 and high to bot 171
bot 173 gives low to bot 137 and high to bot 15
bot 175 gives low to bot 136 and high to bot 117
bot 28 gives low to bot 120 and high to bot 92
bot 143 gives low to bot 70 and high to bot 26
bot 78 gives low to bot 90 and high to bot 63
bot 113 gives low to bot 73 and high to bot 193
bot 207 gives low to bot 23 and high to bot 125
bot 116 gives low to bot 100 and high to bot 158
bot 172 gives low to bot 10 and high to bot 61
bot 64 gives low to bot 24 and high to bot 41
bot 16 gives low to bot 147 and high to bot 106
value 11 goes to bot 2
bot 203 gives low to bot 125 and high to bot 197
value 5 goes to bot 18
bot 49 gives low to bot 160 and high to bot 83
bot 112 gives low to bot 39 and high to bot 32
bot 129 gives low to bot 186 and high to bot 112
bot 68 gives low to output 7 and high to bot 87
bot 201 gives low to bot 56 and high to bot 124
bot 56 gives low to bot 83 and high to bot 207
bot 38 gives low to bot 89 and high to bot 136
bot 194 gives low to bot 40 and high to bot 105
bot 15 gives low to bot 1 and high to bot 50
bot 47 gives low to bot 6 and high to bot 10
bot 63 gives low to bot 205 and high to bot 139
bot 176 gives low to bot 30 and high to bot 138
value 41 goes to bot 24
bot 71 gives low to bot 165 and high to bot 185
bot 84 gives low to bot 209 and high to bot 129
bot 204 gives low to bot 61 and high to bot 91
bot 152 gives low to bot 163 and high to bot 166
bot 139 gives low to bot 45 and high to bot 5
bot 100 gives low to bot 55 and high to bot 178
bot 81 gives low to bot 95 and high to bot 188
bot 163 gives low to output 12 and high to bot 114
bot 133 gives low to bot 124 and high to bot 13
bot 206 gives low to bot 37 and high to bot 21
bot 23 gives low to bot 116 and high to bot 82
bot 134 gives low to bot 195 and high to bot 202
bot 73 gives low to bot 54 and high to bot 22
bot 99 gives low to bot 172 and high to bot 204
bot 192 gives low to bot 34 and high to bot 115
bot 8 gives low to bot 171 and high to bot 60
bot 142 gives low to bot 199 and high to bot 28
bot 41 gives low to bot 44 and high to bot 140
bot 0 gives low to bot 71 and high to bot 52
bot 154 gives low to bot 148 and high to bot 33
bot 62 gives low to bot 66 and high to bot 65
bot 160 gives low to bot 175 and high to bot 153
value 73 goes to bot 111
bot 164 gives low to bot 141 and high to bot 9
bot 202 gives low to bot 99 and high to bot 19
bot 12 gives low to bot 46 and high to bot 48
bot 170 gives low to bot 190 and high to bot 194
bot 27 gives low to bot 101 and high to bot 200
bot 148 gives low to bot 127 and high to bot 182
bot 147 gives low to bot 57 and high to bot 96
bot 65 gives low to bot 78 and high to bot 46
bot 125 gives low to bot 82 and high to bot 79
bot 86 gives low to output 10 and high to output 18
bot 105 gives low to bot 108 and high to bot 98
bot 103 gives low to output 15 and high to bot 199
bot 14 gives low to bot 65 and high to bot 12
bot 132 gives low to bot 113 and high to bot 94
value 53 goes to bot 102
bot 136 gives low to bot 170 and high to bot 196
bot 55 gives low to bot 105 and high to bot 145
bot 36 gives low to bot 67 and high to bot 126
bot 92 gives low to bot 177 and high to bot 101
bot 161 gives low to output 17 and high to bot 208
bot 32 gives low to bot 180 and high to bot 123
bot 179 gives low to output 19 and high to bot 86
bot 90 gives low to bot 76 and high to bot 205
bot 21 gives low to bot 103 and high to bot 142
bot 174 gives low to output 11 and high to bot 30
bot 46 gives low to bot 63 and high to bot 48
bot 94 gives low to bot 193 and high to bot 77
bot 70 gives low to bot 21 and high to bot 131
bot 29 gives low to bot 181 and high to bot 144
bot 82 gives low to bot 158 and high to bot 135
bot 33 gives low to bot 182 and high to bot 0
bot 144 gives low to bot 192 and high to bot 141
bot 197 gives low to bot 79 and high to bot 20
value 17 goes to bot 2
bot 137 gives low to bot 41 and high to bot 1
bot 121 gives low to bot 8 and high to bot 25
bot 40 gives low to bot 15 and high to bot 108
bot 83 gives low to bot 153 and high to bot 23
value 2 goes to bot 64
value 47 goes to bot 184
bot 188 gives low to bot 62 and high to bot 14
bot | |
dibujarGorila(supPant, x+47, y, BRAZO_IZQ_ARRIBA)
pygame.display.update()
time.sleep(2)
dibujarGorila(supPant, x-13, y, BRAZO_IZQ_ARRIBA)
dibujarGorila(supPant, x+47, y, BRAZO_DER_ARRIBA)
pygame.display.update()
time.sleep(2)
for i in range(4):
dibujarGorila(supPant, x-13, y, BRAZO_IZQ_ARRIBA)
dibujarGorila(supPant, x+47, y, BRAZO_DER_ARRIBA)
pygame.display.update()
time.sleep(0.3)
dibujarGorila(supPant, x-13, y, BRAZO_DER_ARRIBA)
dibujarGorila(supPant, x+47, y, BRAZO_IZQ_ARRIBA)
pygame.display.update()
time.sleep(0.3)
def obtenerTiro(supPant, j1nombre, j2nombre, númJugador):
"""obtenerTiro() es llamada cuando queremos obtener el ángulo y la velocidad desde el jugador."""
pygame.draw.rect(supPant, COLOR_CIELO, (0, 0, 200, 50))
pygame.draw.rect(supPant, COLOR_CIELO, (550, 0, 00, 50))
dibujarTexto(j1nombre, supPant, 2, 2, COLOR_BLANCO, COLOR_CIELO)
dibujarTexto(j2nombre, supPant, 538, 2, COLOR_BLANCO, COLOR_CIELO)
if númJugador == 1:
x = 2
else:
x = 538
ángulo = ''
while ángulo == '':
ángulo = modoEntrada('Angle: ', supPant, x, 18, COLOR_BLANCO, COLOR_CIELO, longmax=3, permitidos='0123456789')
if ángulo is None: terminar()
ángulo = int(ángulo)
velocidad = ''
while velocidad == '':
velocidad = modoEntrada('Velocity: ', supPant, x, 34, COLOR_BLANCO, COLOR_CIELO, longmax=3, permitidos='0123456789')
if velocidad is None: terminar()
velocidad = int(velocidad)
# Borrar la entrada de datos del jugador
dibujarTexto('Ángulo: ' + str(ángulo), supPant, x, 2, COLOR_CIELO, COLOR_CIELO)
dibujarTexto('Velocidad: ' + str(ángulo), supPant, x, 2, COLOR_CIELO, COLOR_CIELO)
pygame.display.update()
if númJugador == 2:
ángulo = 180 - ángulo
return (ángulo, velocidad)
def mostrarPuntaje(supPant, puntajeUno, puntajeDos):
"""Dibuja el puntaje sobre la superficie supPant."""
dibujarTexto(str(puntajeUno) + '>Score<' + str(puntajeDos), supPant, 270, 310, COLOR_BLANCO, COLOR_CIELO, pos='izq')
def dibujarTiro(supPant, supPaisajeUrbano, ángulo, velocidad, númJugador, viento, gravedad, gor1, gor2):
# xinicio e yinicio corresponden a la esquina superior izquierda del gorila.
ángulo = ángulo / 180.0 * math.pi
velInicX = math.cos(ángulo) * velocidad
velInicY = math.sin(ángulo) * velocidad
anchoGor, alturaGor = GOR_ABAJO_SUP.get_size()
rectGor1 = pygame.Rect(gor1[0], gor1[1], anchoGor, alturaGor)
rectGor2 = pygame.Rect(gor2[0], gor2[1], anchoGor, alturaGor)
if númJugador == 1:
imgGor = BRAZO_IZQ_ARRIBA
else:
imgGor = BRAZO_DER_ARRIBA
"""El gorila del jugador 1 sobre la izquierda usa su brazo izquierdo para arrojar bananas. El gorila del jugador 2 sobre la derecha
usa su brazo derecho."""
if númJugador == 1:
xinicio = gor1[0]
yinicio = gor1[1]
elif númJugador == 2:
xinicio = gor2[0]
yinicio = gor2[1]
dibujarGorila(supPant, xinicio, yinicio, imgGor)
pygame.display.update()
time.sleep(0.3)
dibujarGorila(supPant, xinicio, yinicio, AMBOS_BRAZOS_ABAJO)
pygame.display.update()
"""Dibujar el gorila arrojando la banana."""
formaBanana = ARRIBA
if númJugador == 2:
xinicio += GOR_ABAJO_SUP.get_size()[0]
yinicio -= obtenerRectBanana(0, 0, formaBanana).height + BAN_ARRIBA_SUP.get_size()[1]
impacto = False
bananaEnJuego = True
t = 1.0
impactoSol = False
while not impacto and bananaEnJuego:
x = xinicio + (velInicX * t) + (0.5 * (viento / 5) * t**2)
y = yinicio + ((-1 * (velInicY * t)) + (0.5 * gravedad * t**2))
"""Esta es básicamente la ecuación que describe el arco de la trayectoria de la banana."""
if x >= ANCHO_PNT - 10 or x <= 3 or y >= ALTURA_PNT:
bananaEnJuego = False
rectBanana = obtenerRectBanana(x, y, formaBanana)
if formaBanana == ARRIBA:
supBanana = BAN_ARRIBA_SUP
rectBanana.left -= 2
rectBanana.top += 2
elif formaBanana == ABAJO:
supBanana = BAN_ABAJO_SUP
rectBanana.left -= 2
rectBanana.top += 2
elif formaBanana == IZQ:
supBanana = BAN_IZQ_SUP
elif formaBanana == DER:
supBanana = BAN_DER_SUP
formaBanana = siguienteFormaBanana(formaBanana)
arregloPíxFuente = pygame.PixelArray(supPaisajeUrbano)
if bananaEnJuego and y > 0:
if solRect.collidepoint(x, y):
# la banana ha golpeado al sol, entonces dibujamos la cara "sorprendida".
impactoSol = True
# dibujar el sol con la cara adecuada
dibujarSol(supPant, sorprendido=impactoSol)
if rectBanana.colliderect(rectGor1):
# la banana ha golpeado al jugador 1
"""Notar que dibujamos la explosión sobre la pantalla (en supPant) y en la superficie independiente del paisaje (on supPaisajeUrbano).
Esto es para que las bananas no golpeen al sol o algún texto y accidentalmente piensen que han chocado contra algo. También queremos
que el objeto superficie supPaisajeUrbano lleve registro de qué parte de los edificios sigue en pie."""
hacerExplosión(supPant, supPaisajeUrbano, rectBanana.centerx, rectBanana.centery, tamañoExplosión=int(TAMAÑO_EXPLOSIÓN_GOR*2/3), velocidad=0.005)
hacerExplosión(supPant, supPaisajeUrbano, rectBanana.centerx, rectBanana.centery, tamañoExplosión=TAMAÑO_EXPLOSIÓN_GOR, velocidad=0.005)
dibujarSol(supPant)
return 'gorila1'
elif rectBanana.colliderect(rectGor2):
# la banana ha golpeado al jugador 2
hacerExplosión(supPant, supPaisajeUrbano, rectBanana.centerx, rectBanana.centery, tamañoExplosión=int(TAMAÑO_EXPLOSIÓN_GOR*2/3), velocidad=0.005)
hacerExplosión(supPant, supPaisajeUrbano, rectBanana.centerx, rectBanana.centery, tamañoExplosión=TAMAÑO_EXPLOSIÓN_GOR, velocidad=0.005)
supPant.fill(COLOR_CIELO, rectBanana) # borrar banana
dibujarSol(supPant)
return 'gorila2'
elif chocaContraSinColor(arregloPíxFuente, supPant, rectBanana, COLOR_CIELO):
# la banana ha golpeado un edificio
hacerExplosión(supPant, supPaisajeUrbano, rectBanana.centerx, rectBanana.centery)
supPant.fill(COLOR_CIELO, rectBanana) # borrar banana
dibujarSol(supPant)
return 'edificio'
del arregloPíxFuente
"""Pygame no nos permite hacer "blit" sobre una superficie mientras exista un arreglo de píxeles de ella, de modo que la borramos."""
supPant.blit(supBanana, (rectBanana.topleft))
pygame.display.update()
time.sleep(0.02)
supPant.fill(COLOR_CIELO, rectBanana) # borrar banana
t += 0.1 # avanzar en el dibujo.
dibujarSol(supPant)
return 'errado'
def danzaVictoria(supPant, x, y):
"""Dadas las coordenadas x,y de la esquina superior izquierda del sprite gorila, esta función recorre
la rutina de la danza de la victoria del gorila, en la cual este agita sus brazos en el aire."""
for i in range(4):
supPant.blit(GOR_IZQ_SUP, (x, y))
pygame.display.update()
time.sleep(0.3)
supPant.blit(GOR_DER_SUP, (x, y))
pygame.display.update()
time.sleep(0.3)
def chocaContraSinColor(arrPíx, objSup, rect, color):
"""Esto comprueba si el área (descripta por "rect") en arrPíx (un arreglo de píxeles derivado del objeto superficie objSup)
contiene algún píxel que no sea del color especificado por el parámetro "color". Esta función se usa para detectar
si la banana ha golpeado alguna parte del cielo sin color (lo que significa un gorila o un edificio)."""
ladoDerecho = min(rect.right, ANCHO_PNT)
ladoInferior = min(rect.bottom, ALTURA_PNT)
for x in range(rect.left, ladoDerecho):
for y in range(rect.top, ladoInferior):
if objSup.unmap_rgb(arrPíx[x][y]) != color:
return True
return False
def obtenerRectBanana(x, y, shape):
if shape == ARRIBA:
return pygame.Rect((x, y), BAN_ARRIBA_SUP.get_size())
if shape == ABAJO:
return pygame.Rect((x, y), BAN_ABAJO_SUP.get_size())
if shape == IZQ:
return pygame.Rect((x, y), BAN_IZQ_SUP.get_size())
if shape == DER:
return pygame.Rect((x, y), BAN_DER_SUP.get_size())
def obtenerViento():
"""Determina aleatoriamente cuáles serán la velocidad y dirección del viento para este juego."""
viento = random.randint(5, 15)
if random.randint(0, 1):
viento *= -1
return viento
def dibujarViento(supPant, wind):
"""Dibuja la flecha de viento sobre el objeto supPant en la parte inferior de la pantalla. El parámetro "viento" proviene de
una llamada a obtenerViento()."""
if wind != 0:
wind *= 3
pygame.draw.line(supPant, COLOR_EXPLOSIÓN, (int(ANCHO_PNT / 2), ALTURA_PNT - 5), (int(ANCHO_PNT / 2) + wind, ALTURA_PNT - 5))
# dibujar la punta de la flecha
if wind > 0: arrowDir = -2
else: arrowDir = 2
pygame.draw.line(supPant, COLOR_EXPLOSIÓN, (int(ANCHO_PNT / 2) + wind, ALTURA_PNT - 5), (int(ANCHO_PNT / 2) + wind + arrowDir, ALTURA_PNT - 5 - 2))
pygame.draw.line(supPant, COLOR_EXPLOSIÓN, (int(ANCHO_PNT / 2) + wind, ALTURA_PNT - 5), (int(ANCHO_PNT / 2) + wind + arrowDir, ALTURA_PNT - 5 + 2))
def hacerExplosión(supPant, supPaisajeUrbano, x, y, tamañoExplosión=TAMAÑO_EXPLOSIÓN_EDIF, velocidad=0.05):
for r in range(1, tamañoExplosión):
pygame.draw.circle(supPant, COLOR_EXPLOSIÓN, (x, y), r)
pygame.draw.circle(supPaisajeUrbano, COLOR_EXPLOSIÓN, (x, y), r)
pygame.display.update()
time.sleep(velocidad)
for r in range(tamañoExplosión, 1, -1):
pygame.draw.circle(supPant, COLOR_CIELO, (x, y), tamañoExplosión)
pygame.draw.circle(supPaisajeUrbano, COLOR_CIELO, (x, y), tamañoExplosión)
pygame.draw.circle(supPant, COLOR_EXPLOSIÓN, (x, y), r)
pygame.draw.circle(supPaisajeUrbano, COLOR_EXPLOSIÓN, (x, y), r)
pygame.display.update()
time.sleep(velocidad)
pygame.draw.circle(supPant, COLOR_CIELO, (x, y), 2)
pygame.draw.circle(supPaisajeUrbano, COLOR_CIELO, (x, y), 2)
pygame.display.update()
def main():
supVentana = pygame.display.set_mode((ANCHO_PNT, ALTURA_PNT), 0, 32)
"""supVentana, siendo el objeto devuelto por pygame.display.set_mode(), será dibujado sobre la pantalla
cada vez que se llame a pygame.display.update()."""
# Descomentar cualquiera de las siguientes líneas para poner el juego en modo pantalla completa.
##supVentana = pygame.display.set_mode((SCR_WIDTH, SCR_HEIGHT), pygame.FULLSCREEN, 32)
##pygame.display.toggle_fullscreen()
pygame.display.set_caption('Gorilas.py')
mostrarPantallaInicio(supVentana)
while True:
# comenzar un nuevo juego
j1nombre, j2nombre, puntosVictoria, gravedad, pantallaSiguiente = mostrarPantallaConfiguración(supVentana)
if pantallaSiguiente == 'v':
mostrarPantallaIntro(supVentana, j1nombre, j2nombre)
# Reiniciar los puntajes y dar el turno al primer jugador.
j1puntos = 0
j2puntos = 0
turno = 1
nuevaRonda = True
while j1puntos < puntosVictoria and j2puntos < puntosVictoria:
if nuevaRonda:
# Al comienzo de una nueva ronda, crear un nuevo paisaje urbano, colocar los gorilas y obtener la velocidad del viento.
supPaisajeUrbano, coordsEdif = crearPaisajeUrbano() # Notar que el paisaje urbano se almacena en supPaisajeUrbano, no en supVentana.
posGor = ubicarGorilas(coordsEdif)
viento = obtenerViento()
nuevaRonda = False
# Hacer el dibujo completo.
supVentana.blit(supPaisajeUrbano, (0,0))
dibujarGorila(supVentana, posGor[0][0], posGor[0][1], 0)
dibujarGorila(supVentana, posGor[1][0], posGor[1][1], 0)
dibujarViento(supVentana, viento)
dibujarSol(supVentana)
mostrarPuntaje(supVentana, j1puntos, j2puntos)
pygame.display.update()
ángulo, velocidad = obtenerTiro(supVentana, j1nombre, j2nombre, turno)
if turno == 1:
gorx, gory = posGor[0][0], posGor[0][1]
elif turno == 2:
gorx, gory = posGor[1][0], posGor[1][1]
result = dibujarTiro(supVentana, supPaisajeUrbano, ángulo, velocidad, turno, viento, 9.8, posGor[0], posGor[1])
if | |
(0x218, 'M', u'ș'),
(0x219, 'V'),
(0x21A, 'M', u'ț'),
(0x21B, 'V'),
(0x21C, 'M', u'ȝ'),
(0x21D, 'V'),
(0x21E, 'M', u'ȟ'),
(0x21F, 'V'),
(0x220, 'M', u'ƞ'),
(0x221, 'V'),
(0x222, 'M', u'ȣ'),
(0x223, 'V'),
(0x224, 'M', u'ȥ'),
(0x225, 'V'),
(0x226, 'M', u'ȧ'),
(0x227, 'V'),
(0x228, 'M', u'ȩ'),
(0x229, 'V'),
(0x22A, 'M', u'ȫ'),
(0x22B, 'V'),
(0x22C, 'M', u'ȭ'),
(0x22D, 'V'),
(0x22E, 'M', u'ȯ'),
(0x22F, 'V'),
(0x230, 'M', u'ȱ'),
(0x231, 'V'),
(0x232, 'M', u'ȳ'),
(0x233, 'V'),
(0x23A, 'M', u'ⱥ'),
(0x23B, 'M', u'ȼ'),
(0x23C, 'V'),
(0x23D, 'M', u'ƚ'),
(0x23E, 'M', u'ⱦ'),
(0x23F, 'V'),
(0x241, 'M', u'ɂ'),
(0x242, 'V'),
(0x243, 'M', u'ƀ'),
(0x244, 'M', u'ʉ'),
(0x245, 'M', u'ʌ'),
(0x246, 'M', u'ɇ'),
(0x247, 'V'),
(0x248, 'M', u'ɉ'),
(0x249, 'V'),
(0x24A, 'M', u'ɋ'),
(0x24B, 'V'),
(0x24C, 'M', u'ɍ'),
(0x24D, 'V'),
(0x24E, 'M', u'ɏ'),
(0x24F, 'V'),
(0x2B0, 'M', u'h'),
(0x2B1, 'M', u'ɦ'),
(0x2B2, 'M', u'j'),
(0x2B3, 'M', u'r'),
(0x2B4, 'M', u'ɹ'),
(0x2B5, 'M', u'ɻ'),
(0x2B6, 'M', u'ʁ'),
(0x2B7, 'M', u'w'),
(0x2B8, 'M', u'y'),
(0x2B9, 'V'),
(0x2D8, '3', u' ̆'),
(0x2D9, '3', u' ̇'),
(0x2DA, '3', u' ̊'),
(0x2DB, '3', u' ̨'),
(0x2DC, '3', u' ̃'),
(0x2DD, '3', u' ̋'),
(0x2DE, 'V'),
(0x2E0, 'M', u'ɣ'),
(0x2E1, 'M', u'l'),
(0x2E2, 'M', u's'),
(0x2E3, 'M', u'x'),
(0x2E4, 'M', u'ʕ'),
(0x2E5, 'V'),
(0x340, 'M', u'̀'),
(0x341, 'M', u'́'),
(0x342, 'V'),
(0x343, 'M', u'̓'),
(0x344, 'M', u'̈́'),
(0x345, 'M', u'ι'),
(0x346, 'V'),
(0x34F, 'I'),
(0x350, 'V'),
(0x370, 'M', u'ͱ'),
(0x371, 'V'),
(0x372, 'M', u'ͳ'),
(0x373, 'V'),
(0x374, 'M', u'ʹ'),
(0x375, 'V'),
(0x376, 'M', u'ͷ'),
(0x377, 'V'),
]
def _seg_6():
return [
(0x378, 'X'),
(0x37A, '3', u' ι'),
(0x37B, 'V'),
(0x37E, '3', u';'),
(0x37F, 'M', u'ϳ'),
(0x380, 'X'),
(0x384, '3', u' ́'),
(0x385, '3', u' ̈́'),
(0x386, 'M', u'ά'),
(0x387, 'M', u'·'),
(0x388, 'M', u'έ'),
(0x389, 'M', u'ή'),
(0x38A, 'M', u'ί'),
(0x38B, 'X'),
(0x38C, 'M', u'ό'),
(0x38D, 'X'),
(0x38E, 'M', u'ύ'),
(0x38F, 'M', u'ώ'),
(0x390, 'V'),
(0x391, 'M', u'α'),
(0x392, 'M', u'β'),
(0x393, 'M', u'γ'),
(0x394, 'M', u'δ'),
(0x395, 'M', u'ε'),
(0x396, 'M', u'ζ'),
(0x397, 'M', u'η'),
(0x398, 'M', u'θ'),
(0x399, 'M', u'ι'),
(0x39A, 'M', u'κ'),
(0x39B, 'M', u'λ'),
(0x39C, 'M', u'μ'),
(0x39D, 'M', u'ν'),
(0x39E, 'M', u'ξ'),
(0x39F, 'M', u'ο'),
(0x3A0, 'M', u'π'),
(0x3A1, 'M', u'ρ'),
(0x3A2, 'X'),
(0x3A3, 'M', u'σ'),
(0x3A4, 'M', u'τ'),
(0x3A5, 'M', u'υ'),
(0x3A6, 'M', u'φ'),
(0x3A7, 'M', u'χ'),
(0x3A8, 'M', u'ψ'),
(0x3A9, 'M', u'ω'),
(0x3AA, 'M', u'ϊ'),
(0x3AB, 'M', u'ϋ'),
(0x3AC, 'V'),
(0x3C2, 'D', u'σ'),
(0x3C3, 'V'),
(0x3CF, 'M', u'ϗ'),
(0x3D0, 'M', u'β'),
(0x3D1, 'M', u'θ'),
(0x3D2, 'M', u'υ'),
(0x3D3, 'M', u'ύ'),
(0x3D4, 'M', u'ϋ'),
(0x3D5, 'M', u'φ'),
(0x3D6, 'M', u'π'),
(0x3D7, 'V'),
(0x3D8, 'M', u'ϙ'),
(0x3D9, 'V'),
(0x3DA, 'M', u'ϛ'),
(0x3DB, 'V'),
(0x3DC, 'M', u'ϝ'),
(0x3DD, 'V'),
(0x3DE, 'M', u'ϟ'),
(0x3DF, 'V'),
(0x3E0, 'M', u'ϡ'),
(0x3E1, 'V'),
(0x3E2, 'M', u'ϣ'),
(0x3E3, 'V'),
(0x3E4, 'M', u'ϥ'),
(0x3E5, 'V'),
(0x3E6, 'M', u'ϧ'),
(0x3E7, 'V'),
(0x3E8, 'M', u'ϩ'),
(0x3E9, 'V'),
(0x3EA, 'M', u'ϫ'),
(0x3EB, 'V'),
(0x3EC, 'M', u'ϭ'),
(0x3ED, 'V'),
(0x3EE, 'M', u'ϯ'),
(0x3EF, 'V'),
(0x3F0, 'M', u'κ'),
(0x3F1, 'M', u'ρ'),
(0x3F2, 'M', u'σ'),
(0x3F3, 'V'),
(0x3F4, 'M', u'θ'),
(0x3F5, 'M', u'ε'),
(0x3F6, 'V'),
(0x3F7, 'M', u'ϸ'),
(0x3F8, 'V'),
(0x3F9, 'M', u'σ'),
(0x3FA, 'M', u'ϻ'),
(0x3FB, 'V'),
(0x3FD, 'M', u'ͻ'),
(0x3FE, 'M', u'ͼ'),
(0x3FF, 'M', u'ͽ'),
(0x400, 'M', u'ѐ'),
(0x401, 'M', u'ё'),
(0x402, 'M', u'ђ'),
]
def _seg_7():
return [
(0x403, 'M', u'ѓ'),
(0x404, 'M', u'є'),
(0x405, 'M', u'ѕ'),
(0x406, 'M', u'і'),
(0x407, 'M', u'ї'),
(0x408, 'M', u'ј'),
(0x409, 'M', u'љ'),
(0x40A, 'M', u'њ'),
(0x40B, 'M', u'ћ'),
(0x40C, 'M', u'ќ'),
(0x40D, 'M', u'ѝ'),
(0x40E, 'M', u'ў'),
(0x40F, 'M', u'џ'),
(0x410, 'M', u'а'),
(0x411, 'M', u'б'),
(0x412, 'M', u'в'),
(0x413, 'M', u'г'),
(0x414, 'M', u'д'),
(0x415, 'M', u'е'),
(0x416, 'M', u'ж'),
(0x417, 'M', u'з'),
(0x418, 'M', u'и'),
(0x419, 'M', u'й'),
(0x41A, 'M', u'к'),
(0x41B, 'M', u'л'),
(0x41C, 'M', u'м'),
(0x41D, 'M', u'н'),
(0x41E, 'M', u'о'),
(0x41F, 'M', u'п'),
(0x420, 'M', u'р'),
(0x421, 'M', u'с'),
(0x422, 'M', u'т'),
(0x423, 'M', u'у'),
(0x424, 'M', u'ф'),
(0x425, 'M', u'х'),
(0x426, 'M', u'ц'),
(0x427, 'M', u'ч'),
(0x428, 'M', u'ш'),
(0x429, 'M', u'щ'),
(0x42A, 'M', u'ъ'),
(0x42B, 'M', u'ы'),
(0x42C, 'M', u'ь'),
(0x42D, 'M', u'э'),
(0x42E, 'M', u'ю'),
(0x42F, 'M', u'я'),
(0x430, 'V'),
(0x460, 'M', u'ѡ'),
(0x461, 'V'),
(0x462, 'M', u'ѣ'),
(0x463, 'V'),
(0x464, 'M', u'ѥ'),
(0x465, 'V'),
(0x466, 'M', u'ѧ'),
(0x467, 'V'),
(0x468, 'M', u'ѩ'),
(0x469, 'V'),
(0x46A, 'M', u'ѫ'),
(0x46B, 'V'),
(0x46C, 'M', u'ѭ'),
(0x46D, 'V'),
(0x46E, 'M', u'ѯ'),
(0x46F, 'V'),
(0x470, 'M', u'ѱ'),
(0x471, 'V'),
(0x472, 'M', u'ѳ'),
(0x473, 'V'),
(0x474, 'M', u'ѵ'),
(0x475, 'V'),
(0x476, 'M', u'ѷ'),
(0x477, 'V'),
(0x478, 'M', u'ѹ'),
(0x479, 'V'),
(0x47A, 'M', u'ѻ'),
(0x47B, 'V'),
(0x47C, 'M', u'ѽ'),
(0x47D, 'V'),
(0x47E, 'M', u'ѿ'),
(0x47F, 'V'),
(0x480, 'M', u'ҁ'),
(0x481, 'V'),
(0x48A, 'M', u'ҋ'),
(0x48B, 'V'),
(0x48C, 'M', u'ҍ'),
(0x48D, 'V'),
(0x48E, 'M', u'ҏ'),
(0x48F, 'V'),
(0x490, 'M', u'ґ'),
(0x491, 'V'),
(0x492, 'M', u'ғ'),
(0x493, 'V'),
(0x494, 'M', u'ҕ'),
(0x495, 'V'),
(0x496, 'M', u'җ'),
(0x497, 'V'),
(0x498, 'M', u'ҙ'),
(0x499, 'V'),
(0x49A, 'M', u'қ'),
(0x49B, 'V'),
(0x49C, 'M', u'ҝ'),
(0x49D, 'V'),
]
def _seg_8():
return [
(0x49E, 'M', u'ҟ'),
(0x49F, 'V'),
(0x4A0, 'M', u'ҡ'),
(0x4A1, 'V'),
(0x4A2, 'M', u'ң'),
(0x4A3, 'V'),
(0x4A4, 'M', u'ҥ'),
(0x4A5, 'V'),
(0x4A6, 'M', u'ҧ'),
(0x4A7, 'V'),
(0x4A8, 'M', u'ҩ'),
(0x4A9, 'V'),
(0x4AA, 'M', u'ҫ'),
(0x4AB, 'V'),
(0x4AC, 'M', u'ҭ'),
(0x4AD, 'V'),
(0x4AE, 'M', u'ү'),
(0x4AF, 'V'),
(0x4B0, 'M', u'ұ'),
(0x4B1, 'V'),
(0x4B2, 'M', u'ҳ'),
(0x4B3, 'V'),
(0x4B4, 'M', u'ҵ'),
(0x4B5, 'V'),
(0x4B6, 'M', u'ҷ'),
(0x4B7, 'V'),
(0x4B8, 'M', u'ҹ'),
(0x4B9, 'V'),
(0x4BA, 'M', u'һ'),
(0x4BB, 'V'),
(0x4BC, 'M', u'ҽ'),
(0x4BD, 'V'),
(0x4BE, 'M', u'ҿ'),
(0x4BF, 'V'),
(0x4C0, 'X'),
(0x4C1, 'M', u'ӂ'),
(0x4C2, 'V'),
(0x4C3, 'M', u'ӄ'),
(0x4C4, 'V'),
(0x4C5, 'M', u'ӆ'),
(0x4C6, 'V'),
(0x4C7, 'M', u'ӈ'),
(0x4C8, 'V'),
(0x4C9, 'M', u'ӊ'),
(0x4CA, 'V'),
(0x4CB, 'M', u'ӌ'),
(0x4CC, 'V'),
(0x4CD, 'M', u'ӎ'),
(0x4CE, 'V'),
(0x4D0, 'M', u'ӑ'),
(0x4D1, 'V'),
(0x4D2, 'M', u'ӓ'),
(0x4D3, 'V'),
(0x4D4, 'M', u'ӕ'),
(0x4D5, 'V'),
(0x4D6, 'M', u'ӗ'),
(0x4D7, 'V'),
(0x4D8, 'M', u'ә'),
(0x4D9, 'V'),
(0x4DA, 'M', u'ӛ'),
(0x4DB, 'V'),
(0x4DC, 'M', u'ӝ'),
(0x4DD, 'V'),
(0x4DE, 'M', u'ӟ'),
(0x4DF, 'V'),
(0x4E0, 'M', u'ӡ'),
(0x4E1, 'V'),
(0x4E2, 'M', u'ӣ'),
(0x4E3, 'V'),
(0x4E4, 'M', u'ӥ'),
(0x4E5, 'V'),
(0x4E6, 'M', u'ӧ'),
(0x4E7, 'V'),
(0x4E8, 'M', u'ө'),
(0x4E9, 'V'),
(0x4EA, 'M', u'ӫ'),
(0x4EB, 'V'),
(0x4EC, 'M', u'ӭ'),
(0x4ED, 'V'),
(0x4EE, 'M', u'ӯ'),
(0x4EF, 'V'),
(0x4F0, 'M', u'ӱ'),
(0x4F1, 'V'),
(0x4F2, 'M', u'ӳ'),
(0x4F3, 'V'),
(0x4F4, 'M', u'ӵ'),
(0x4F5, 'V'),
(0x4F6, 'M', u'ӷ'),
(0x4F7, 'V'),
(0x4F8, 'M', u'ӹ'),
(0x4F9, 'V'),
(0x4FA, 'M', u'ӻ'),
(0x4FB, 'V'),
(0x4FC, 'M', u'ӽ'),
(0x4FD, 'V'),
(0x4FE, 'M', u'ӿ'),
(0x4FF, 'V'),
(0x500, 'M', u'ԁ'),
(0x501, 'V'),
(0x502, 'M', u'ԃ'),
]
def _seg_9():
return [
(0x503, 'V'),
(0x504, 'M', u'ԅ'),
(0x505, 'V'),
(0x506, 'M', u'ԇ'),
(0x507, 'V'),
(0x508, 'M', u'ԉ'),
(0x509, 'V'),
(0x50A, 'M', u'ԋ'),
(0x50B, 'V'),
(0x50C, 'M', u'ԍ'),
(0x50D, 'V'),
(0x50E, 'M', u'ԏ'),
(0x50F, 'V'),
(0x510, 'M', u'ԑ'),
(0x511, 'V'),
(0x512, 'M', u'ԓ'),
(0x513, 'V'),
(0x514, 'M', u'ԕ'),
(0x515, 'V'),
(0x516, 'M', u'ԗ'),
(0x517, 'V'),
(0x518, 'M', u'ԙ'),
(0x519, 'V'),
(0x51A, 'M', u'ԛ'),
(0x51B, 'V'),
(0x51C, 'M', u'ԝ'),
(0x51D, 'V'),
(0x51E, 'M', u'ԟ'),
(0x51F, 'V'),
(0x520, 'M', u'ԡ'),
(0x521, 'V'),
(0x522, 'M', u'ԣ'),
(0x523, 'V'),
(0x524, 'M', u'ԥ'),
(0x525, 'V'),
(0x526, 'M', u'ԧ'),
(0x527, 'V'),
(0x528, 'M', u'ԩ'),
(0x529, 'V'),
(0x52A, 'M', u'ԫ'),
(0x52B, 'V'),
(0x52C, 'M', u'ԭ'),
(0x52D, 'V'),
(0x52E, 'M', u'ԯ'),
(0x52F, 'V'),
(0x530, 'X'),
(0x531, 'M', u'ա'),
(0x532, 'M', u'բ'),
(0x533, 'M', u'գ'),
(0x534, 'M', u'դ'),
(0x535, 'M', u'ե'),
(0x536, 'M', u'զ'),
(0x537, 'M', u'է'),
(0x538, 'M', u'ը'),
(0x539, 'M', u'թ'),
(0x53A, 'M', u'ժ'),
(0x53B, 'M', u'ի'),
(0x53C, 'M', u'լ'),
(0x53D, 'M', u'խ'),
(0x53E, 'M', u'ծ'),
(0x53F, 'M', u'կ'),
(0x540, 'M', u'հ'),
(0x541, 'M', u'ձ'),
(0x542, 'M', u'ղ'),
(0x543, 'M', u'ճ'),
(0x544, 'M', u'մ'),
(0x545, 'M', u'յ'),
(0x546, 'M', u'ն'),
(0x547, 'M', u'շ'),
(0x548, 'M', u'ո'),
(0x549, 'M', u'չ'),
(0x54A, 'M', u'պ'),
(0x54B, 'M', u'ջ'),
(0x54C, 'M', u'ռ'),
(0x54D, 'M', u'ս'),
(0x54E, 'M', u'վ'),
(0x54F, 'M', u'տ'),
(0x550, 'M', u'ր'),
(0x551, 'M', u'ց'),
(0x552, 'M', u'ւ'),
(0x553, 'M', u'փ'),
(0x554, 'M', u'ք'),
(0x555, 'M', u'օ'),
| |
<gh_stars>1-10
import os.path as osp
import glob
import pickle
import random
import glob
import pickle
import torch
import os.path as osp
import numpy as np
import networkx as nx
import torch_geometric as tg
from torch_geometric.data import Data
from torch_geometric.utils import degree
from torch_scatter import scatter
import torch
import torch.nn.functional as F
from rdkit import Chem
from rdkit.Chem.rdchem import ChiralType
from rdkit.Chem.rdchem import HybridizationType
from rdkit.Chem.rdchem import BondType as BT
from rdkit.Chem.rdchem import ChiralType
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
angle_mask_ref = torch.LongTensor([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1]]).to(device)
angle_combos = torch.LongTensor([[0, 1],
[0, 2],
[1, 2],
[0, 3],
[1, 3],
[2, 3]]).to(device)
def align_coords_Kabsch(p_cycle_coords, q_cycle_coords, p_mask, q_mask=None):
"""
align p_cycle_coords with q_cycle_coords
mask indicates which atoms to apply RMSD minimization over; these atoms are used to calculate the
final rotation and translation matrices, which are applied to ALL atoms
"""
if not q_mask:
q_mask = p_mask
q_cycle_coords_centered = q_cycle_coords[:, q_mask] - q_cycle_coords[:, q_mask].mean(dim=1, keepdim=True)
p_cycle_coords_centered = p_cycle_coords[:, :, p_mask] - p_cycle_coords[:, :, p_mask].mean(dim=2, keepdim=True)
H = torch.matmul(p_cycle_coords_centered.permute(0, 1, 3, 2), q_cycle_coords_centered.unsqueeze(0))
u, s, v = torch.svd(H)
d = torch.sign(torch.det(torch.matmul(v, u.permute(0, 1, 3, 2))))
R_1 = torch.diag_embed(torch.ones([p_cycle_coords.size(0), q_cycle_coords.size(0), 3]))
R_1[:, :, 2, 2] = d
R = torch.matmul(v, torch.matmul(R_1, u.permute(0, 1, 3, 2)))
b = q_cycle_coords[:, q_mask].mean(dim=1) - torch.matmul(R, p_cycle_coords[:, :, p_mask].mean(dim=2).unsqueeze(
-1)).squeeze(-1)
p_cycle_coords_aligned = torch.matmul(R, p_cycle_coords.permute(0, 1, 3, 2)).permute(0, 1, 3, 2) + b.unsqueeze(2)
return p_cycle_coords_aligned
def get_neighbor_ids(data):
"""
Takes the edge indices and returns dictionary mapping atom index to neighbor indices
Note: this only includes atoms with degree > 1
"""
neighbors = data.neighbors.pop(0)
n_atoms_per_mol = data.batch.bincount()
n_atoms_prev_mol = 0
for i, n_dict in enumerate(data.neighbors):
new_dict = {}
n_atoms_prev_mol += n_atoms_per_mol[i].item()
for k, v in n_dict.items():
new_dict[k + n_atoms_prev_mol] = v + n_atoms_prev_mol
neighbors.update(new_dict)
return neighbors
def get_neighbor_bonds(edge_index, bond_type):
"""
Takes the edge indices and bond type and returns dictionary mapping atom index to neighbor bond types
Note: this only includes atoms with degree > 1
"""
start, end = edge_index
idxs, vals = torch.unique(start, return_counts=True)
vs = torch.split_with_sizes(bond_type, tuple(vals))
return {k.item(): v for k, v in zip(idxs, vs) if len(v) > 1}
def get_leaf_hydrogens(neighbors, x):
"""
Takes the edge indices and atom features and returns dictionary mapping atom index to neighbors, indicating true
for hydrogens that are leaf nodes
Note: this only works because degree = 1 and hydrogen atomic number = 1 (checks when 1 == 1)
Note: we use the 5th feature index bc this corresponds to the atomic number
"""
leaf_hydrogens = {}
h_mask = x[:, 0] == 1
for k, v in neighbors.items():
leaf_hydrogens[k] = h_mask[neighbors[k]]
return leaf_hydrogens
def get_dihedral_pairs(edge_index, data):
"""
Given edge indices, return pairs of indices that we must calculate dihedrals for
"""
start, end = edge_index
degrees = degree(end)
dihedral_pairs_true = torch.nonzero(torch.logical_and(degrees[start] > 1, degrees[end] > 1))
dihedral_pairs = edge_index[:, dihedral_pairs_true].squeeze(-1)
# # first method which removes one (pseudo) random edge from a cycle
dihedral_idxs = torch.nonzero(dihedral_pairs.sort(dim=0).indices[0, :] == 0).squeeze().detach().cpu().numpy()
# prioritize rings for assigning dihedrals
dihedral_pairs = dihedral_pairs.t()[dihedral_idxs]
G = nx.to_undirected(tg.utils.to_networkx(data))
cycles = nx.cycle_basis(G)
keep, sorted_keep = [], []
if len(dihedral_pairs.shape) == 1:
dihedral_pairs = dihedral_pairs.unsqueeze(0)
for pair in dihedral_pairs:
x, y = pair
if sorted(pair) in sorted_keep:
continue
y_cycle_check = [y in cycle for cycle in cycles]
x_cycle_check = [x in cycle for cycle in cycles]
if any(x_cycle_check) and any(y_cycle_check):
cycle_indices = get_current_cycle_indices(cycles, x_cycle_check, x)
keep.extend(cycle_indices)
sorted_keep.extend([sorted(c) for c in cycle_indices])
continue
if any(y_cycle_check):
cycle_indices = get_current_cycle_indices(cycles, y_cycle_check, y)
keep.append(pair)
keep.extend(cycle_indices)
sorted_keep.append(sorted(pair))
sorted_keep.extend([sorted(c) for c in cycle_indices])
continue
keep.append(pair)
keep = [t.to(device) for t in keep]
return torch.stack(keep).t()
def batch_distance_metrics_from_coords(coords, mask):
"""
Given coordinates of neighboring atoms, compute bond
distances and 2-hop distances in local neighborhood
"""
d_mat_mask = mask.unsqueeze(1) * mask.unsqueeze(2)
if coords.dim() == 4:
two_dop_d_mat = torch.square(coords.unsqueeze(1) - coords.unsqueeze(2) + 1e-10).sum(dim=-1).sqrt() * d_mat_mask.unsqueeze(-1)
one_hop_ds = torch.linalg.norm(torch.zeros_like(coords[0]).unsqueeze(0) - coords, dim=-1)
elif coords.dim() == 5:
two_dop_d_mat = torch.square(coords.unsqueeze(2) - coords.unsqueeze(3) + 1e-10).sum(dim=-1).sqrt() * d_mat_mask.unsqueeze(-1).unsqueeze(1)
one_hop_ds = torch.linalg.norm(torch.zeros_like(coords[0]).unsqueeze(0) - coords, dim=-1)
return one_hop_ds, two_dop_d_mat
def batch_angle_between_vectors(a, b):
"""
Compute angle between two batches of input vectors
"""
inner_product = (a * b).sum(dim=-1)
# norms
a_norm = torch.linalg.norm(a, dim=-1)
b_norm = torch.linalg.norm(b, dim=-1)
# protect denominator during division
den = a_norm * b_norm + 1e-10
cos = inner_product / den
return cos
def batch_angles_from_coords(coords, mask):
"""
Given coordinates, compute all local neighborhood angles
"""
if coords.dim() == 4:
all_possible_combos = coords[:, angle_combos]
v_a, v_b = all_possible_combos.split(1, dim=2)
angle_mask = angle_mask_ref[mask.sum(dim=1).long()]
angles = batch_angle_between_vectors(v_a.squeeze(2), v_b.squeeze(2)) * angle_mask.unsqueeze(-1)
elif coords.dim() == 5:
all_possible_combos = coords[:, :, angle_combos]
v_a, v_b = all_possible_combos.split(1, dim=3)
angle_mask = angle_mask_ref[mask.sum(dim=1).long()]
angles = batch_angle_between_vectors(v_a.squeeze(3), v_b.squeeze(3)) * angle_mask.unsqueeze(-1).unsqueeze(-1)
return angles
def batch_local_stats_from_coords(coords, mask):
"""
Given neighborhood neighbor coordinates, compute bond distances,
2-hop distances, and angles in local neighborhood (this assumes
the central atom has coordinates at the origin)
"""
one_hop_ds, two_dop_d_mat = batch_distance_metrics_from_coords(coords, mask)
angles = batch_angles_from_coords(coords, mask)
return one_hop_ds, two_dop_d_mat, angles
def batch_dihedrals(p0, p1, p2, p3, angle=False):
s1 = p1 - p0
s2 = p2 - p1
s3 = p3 - p2
sin_d_ = torch.linalg.norm(s2, dim=-1) * torch.sum(s1 * torch.cross(s2, s3, dim=-1), dim=-1)
cos_d_ = torch.sum(torch.cross(s1, s2, dim=-1) * torch.cross(s2, s3, dim=-1), dim=-1)
if angle:
return torch.atan2(sin_d_, cos_d_ + 1e-10)
else:
den = torch.linalg.norm(torch.cross(s1, s2, dim=-1), dim=-1) * torch.linalg.norm(torch.cross(s2, s3, dim=-1), dim=-1) + 1e-10
return sin_d_/den, cos_d_/den
def batch_vector_angles(xn, x, y, yn):
uT = xn.view(-1, 3)
uX = x.view(-1, 3)
uY = y.view(-1, 3)
uZ = yn.view(-1, 3)
b1 = uT - uX
b2 = uZ - uY
num = torch.bmm(b1.view(-1, 1, 3), b2.view(-1, 3, 1)).squeeze(-1).squeeze(-1)
den = torch.linalg.norm(b1, dim=-1) * torch.linalg.norm(b2, dim=-1) + 1e-10
return (num / den).view(-1, 9)
def von_Mises_loss(a, b, a_sin=None, b_sin=None):
"""
:param a: cos of first angle
:param b: cos of second angle
:return: difference of cosines
"""
if torch.is_tensor(a_sin):
out = a * b + a_sin * b_sin
else:
out = a * b + torch.sqrt(1-a**2 + 1e-5) * torch.sqrt(1-b**2 + 1e-5)
return out
def rotation_matrix(neighbor_coords, neighbor_mask, neighbor_map, mu=None):
"""
Given predicted neighbor coordinates from model, return rotation matrix
:param neighbor_coords: neighbor coordinates for each edge as defined by dihedral_pairs
(n_dihedral_pairs, 4, n_generated_confs, 3)
:param neighbor_mask: mask describing which atoms are present (n_dihedral_pairs, 4)
:param neighbor_map: mask describing which neighbor corresponds to the other central dihedral atom
(n_dihedral_pairs, 4) each entry in neighbor_map should have one TRUE entry with the rest as FALSE
:return: rotation matrix (n_dihedral_pairs, n_model_confs, 3, 3)
"""
if not torch.is_tensor(mu):
mu_num = neighbor_coords[~neighbor_map.bool()].view(neighbor_coords.size(0), 3, neighbor_coords.size(2), -1).sum(dim=1)
mu_den = (neighbor_mask.sum(dim=-1, keepdim=True).unsqueeze(-1) - 1 + 1e-10)
mu = mu_num / mu_den
mu = mu.squeeze(1)
p_Y = neighbor_coords[neighbor_map.bool(), :]
h1 = p_Y / (torch.linalg.norm(p_Y, dim=-1, keepdim=True) + 1e-10)
h3_1 = torch.cross(p_Y, mu, dim=-1)
h3 = h3_1 / (torch.linalg.norm(h3_1, dim=-1, keepdim=True) + 1e-10)
h2 = -torch.cross(h1, h3, dim=-1)
H = torch.cat([h1.unsqueeze(-2),
h2.unsqueeze(-2),
h3.unsqueeze(-2)], dim=-2)
return H
def rotation_matrix_v2(neighbor_coords, neighbor_mask, neighbor_map):
"""
Given predicted neighbor coordinates from model, return rotation matrix
:param neighbor_coords: neighbor coordinates for each edge as defined by dihedral_pairs
(n_dihedral_pairs, 4, n_generated_confs, 3)
:param neighbor_mask: mask describing which atoms are present (n_dihedral_pairs, 4)
:param neighbor_map: mask describing which neighbor corresponds to the other central dihedral atom
(n_dihedral_pairs, 4) each entry in neighbor_map should have one TRUE entry with the rest as FALSE
:return: rotation matrix (n_dihedral_pairs, n_model_confs, 3, 3)
"""
p_Y = neighbor_coords[neighbor_map.bool(), :]
eta_1 = torch.rand_like(p_Y)
eta_2 = eta_1 - torch.sum(eta_1 * p_Y, dim=-1, keepdim=True) / (torch.linalg.norm(p_Y, dim=-1, keepdim=True)**2 + 1e-10) * p_Y
eta = eta_2 / torch.linalg.norm(eta_2, dim=-1, keepdim=True)
h1 = p_Y / (torch.linalg.norm(p_Y, dim=-1, keepdim=True) + 1e-10)
h3_1 = torch.cross(p_Y, eta, dim=-1)
h3 = h3_1 / (torch.linalg.norm(h3_1, dim=-1, keepdim=True) + 1e-10)
h2 = -torch.cross(h1, h3, dim=-1)
H = torch.cat([h1.unsqueeze(-2),
h2.unsqueeze(-2),
h3.unsqueeze(-2)], dim=-2)
return H
def signed_volume(local_coords):
"""
Compute signed volume given ordered neighbor local coordinates
:param local_coords: (n_tetrahedral_chiral_centers, 4, n_generated_confs, 3)
:return: signed volume of each tetrahedral center (n_tetrahedral_chiral_centers, n_generated_confs)
"""
v1 = local_coords[:, 0] - local_coords[:, 3]
v2 = local_coords[:, | |
import logging
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from pathlib import Path
logger = logging.getLogger(__name__)
class CustomPreprocessor():
def __init__(
self,
df: pd.DataFrame,
cfg: object
):
"""CustomPreprocessor customizes checks and cleans VAERS data from merged DataFrame.
Generates clean dataframe for VAERS dataset for modelling and EDA visualization purposes. Includes:
- custom preprocessing
- feature engineering
Attributes:
df (pd.DataFrame): raw input dataframe
cfg (module/object): configuration file specific to the dataset. Includes:
- DATA_DICT (dict): data types for each column
- VISUALIZE_DATA_DICT (dict) : data types to convert to for visualization purposes
- DROP_COLS (list): columns to drop to obtain modelling dataset
- CHECK_ERROR (dict): columns to check for errors
- IMPUTE_NA (dict): column to fill NaN with a specified value
- DROP_NA (list): list of columns of which rows with NA will be dropped.
- DATE_DIFF (dict): dictionary with new column name as key, list as value to calculate difference
in days. Difference in days calculated using list element 0 - list element 1.
- CLIP_UPPER (dict): Dictionary of key-value pairs with column names as keys and
upper clip threshold limit as values
- REPLACE_VALUES (dict): Dictionary of key-value pairs with column names as keys and
dictionary of replacement values for the column.
- REPLACE_NONE_SYNONYMS (dict): Dictionary of key-value pairs with feature names
as keys and list of none synonym strings to match as values
- HISTORY_SEARCH (dict): Dictionary of key-value pairs with new column names
as keys and list of substrings to match in HISTORY column as values
- SYMP_ERROR (list): list of substrings to search for in SYMP. A match (True) indicates that the error
term is present in SYMP. The substrings stated in symp_error are medical or administrative errors
which should not have occurred in the first place or no adverse event and are therefore excluded
from modelling.
- SYMP_SEARCH (dict): Dictionary of key-value pairs with new column names
as keys and list of substrings to match in SYMP column as values
- TARGET_COMPOSITE (list): list of binary features which indicate
type of serious adverse side effect experienced
- FILEPATH (dict): details on relative input and output filepaths.
- output_filepath (str): relative file path to folder to save processed DataFrame as csv
"""
self.df = df
self.check_error = getattr(cfg, "CHECK_ERROR", None)
self.data_dict = getattr(cfg, "DATA_DICT", None)
self.visualize_data_dict = getattr(cfg, "VISUALIZE_DATA_DICT", None)
self.drop_cols = getattr(cfg, "DROP_COLS", None)
self.drop_na = getattr(cfg, "DROP_NA", None)
self.impute_na = getattr(cfg, "IMPUTE_NA", None)
self.date_diff = getattr(cfg, "DATE_DIFF", None)
self.clip_upper = getattr(cfg, "CLIP_UPPER", None)
self.replace_values = getattr(cfg, "REPLACE_VALUES", None)
self.replace_none_synonyms = getattr(
cfg,
"REPLACE_NONE_SYNONYMS",
None
)
self.history_search = getattr(cfg, "HISTORY_SEARCH", None)
self.symp_error = getattr(cfg, "SYMP_ERROR", None)
self.symp_search = getattr(cfg, "SYMP_SEARCH", None)
self.target_composite_list = getattr(cfg, "TARGET_COMPOSITE", None)
self.filepath = getattr(cfg, "FILEPATH", None)
self.clean_filepath = self.filepath.get("clean_filepath")
self.model_filepath = self.filepath.get("model_filepath")
###########
# Helpers
###########
def get_bool_error_rows(
self,
df: pd.DataFrame,
col: str,
criteria: list
) -> pd.Series:
"""Get a boolean series indicating whether the row value of the column (col)
is less than the sane value as stated in the criteria list.
Args:
df (pd.DataFrame): main DataFrame
col (str): column name to check for erroneous rows
criteria (list): list with the first (0) element indicating the condition to
check for (either "<" or ">"), and second element (1) indicating the sane value.
Returns:
pd.Series: boolean series indicating whether the row value of the column (col)
is less than the sane value stated in the criteria list
"""
condition = criteria[0]
sane_value = criteria[1]
if condition == "<":
row_condition = df[col] < sane_value
elif condition == ">":
row_condition = df[col] > sane_value
return row_condition
def date_diff_days(
self,
df: pd.DataFrame,
new_col: str,
date_col1: str,
date_col2: str
) -> pd.DataFrame:
"""Creates a new column calculated from the difference in days between 2 date columns
(date_col1 - date_col2)
Args:
df (pd.DataFrame): pandas DataFrame
new_col (str): name of new column to create
date_col1 (str): name of the first date column to calculate the date difference from
date_col2 (str): name of date column to subtract date_col1 with
Returns:
df (pd.DataFrame): pandas DataFrame with new column calculated from
the difference in days between 2 date columns (date_col1 - date_col2)
"""
df[new_col] = (df[date_col1] - df[date_col2]).dt.days.astype(int)
return df
def col_matched_substring(
self,
df: pd.DataFrame,
col: str,
searchfor:list,
new_col: str
) -> pd.DataFrame:
"""Creates a new binary column in the dataframe based on matches of a specified substring
- If the row of the column contains any of the substring, value will be 1
- If the row of the column does not contain any of the substring, value will be 0
Args:
df (pd.DataFrame): pandas DataFrame
col (str): name of column to match substrings
searchfor (list): list of strings or substrings to search for
new_col (str): name of the new column to create in the dataframe
Returns:
df (pd.DataFrame): pandas dataframe with newly created columns
"""
df_mask = df[col].str.contains('|'.join(searchfor), na=False)
df.loc[df_mask == True, new_col] = 1
df.loc[df_mask == False, new_col] = 0
return df
def replace_less_than(
self,
df: pd.DataFrame,
col:str,
ref_value: str,
replace_col: str
) -> pd.DataFrame:
"""Replace values in a column of a dataframe that are less than the
reference value (col_values < ref value)
- corresponding values from replace_col will be used as replacement value
Args:
df (pd.DataFrame): pandas DataFrame
col (str): column name
ref_value (str or pd.Series): reference value to check against.
replace_col (str): Column name to replace corresponding values with.
Returns:
df (pd.DataFrame): pandas DataFrame with column values replaced
with another replace_col value if rows are less than the reference
value (col_values < ref value)
"""
condition = df[col] < ref_value
logger.info(
f"Replacing {condition.sum()} rows in {col} with values from {replace_col}"
)
df[col] = np.where(condition, df[replace_col], df[col])
return df
#################
# Core Functions
#################
def drop_error_rows(
self,
df: pd.DataFrame,
check_error: dict
) -> pd.DataFrame:
"""Drops erroneous rows for a stated column, condition and sane value as specified
in the config variable CHECK_ERROR.
Args:
df (pd.DataFrame): main DataFrame
check_error (dict): dictionary with the following key-value pairs:
- key: column name to check erroneous row values
- value: list with the first (0) element indicating the reference condition to
check (either "<" or ">"), and second element (1) indicating the sane value to check against.
Returns:
df (pd.DataFrame): main DataFrame with erroneous rows dropped for selected columns
"""
for key, value in check_error.items():
row_condition = self.get_bool_error_rows(
df = df,
col = key,
criteria = value
)
total_drop = row_condition.sum()
percent_drop = round(total_drop/len(df)*100, 2)
logger.info(f"Dropping {total_drop} ({percent_drop}%) for {key} {value}")
df = df.loc[~row_condition, :]
return df
def drop_error_symp(
self,
df: pd.DataFrame,
symp_error: list
) -> pd.DataFrame:
"""Drops rows if SYMP values match any of the substrings stated in the symp_error list specified in the config.
Args:
df (pd.DataFrame): main DataFrame
symp_error (list): list of substrings to search for in SYMP. A match (True) indicates that the error term is
present in SYMP. The substrings stated in symp_error are medical or administrative errors which should
not have occurred in the first place or no adverse event and are therefore excluded from modelling.
(example: Product administered to patient of inappropriate age)
Returns:
df (pd.DataFrame): main DataFrame with erroneous rows dropped for SYMP column
"""
symp_error = [string.lower() for string in symp_error]
error_mask = df['SYMP'].str.contains('|'.join(symp_error), na=False)
logger.info(
f"Dropping {error_mask.sum()} ({round(error_mask.sum()/len(df)*100, 2)}%) error rows in SYMP"
)
df = df.loc[error_mask == False, :]
return df
def impute_missing(
self,
df: pd.DataFrame,
impute_na: dict
) -> pd.DataFrame:
"""Impute missing values with a specified value as stated in the configuration file.
Args:
df (pd.DataFrame): main DataFrame
impute_na | |
return merge_latency[ss]
snodes = state2nset(ss, idn)
if len(stage_seqs) == 1:
assert len(snodes) == 1
merge_latency[ss] = float(
np.mean(cost_model.get_stage_latency([[snodes[0]]], batch_size, warmup, number, repeat)))
else:
convs = [nd for nd in snodes if isinstance(nd, Conv)]
assert len(convs) == len(snodes)
terms = get_input(convs, block, nid, idn)
out_channels = sum(nd.out_channels for nd in convs)
kernel = (max(nd.kernel[0] for nd in convs), max(nd.kernel[1] for nd in convs))
stride = (convs[0].stride[0], convs[0].stride[1])
padding = (max(nd.padding[0] for nd in convs), max(nd.padding[1] for nd in convs))
groups = convs[0].groups
act = convs[0].act
conv = Conv('c', '', inputs=terms, out_channels=out_channels, kernel=kernel, stride=stride, padding=padding,
groups=groups, act=act, output_shape=None)
merge_latency[ss] = float(
np.mean(cost_model.get_stage_latency([[conv]], batch_size, warmup, number, repeat)))
return merge_latency[ss]
elif qtype == 'parallel':
if ss in parallel_latency:
return parallel_latency[ss]
stage_seqs_nodes = []
for seq in stage_seqs:
seq_nodes = []
for uid in seq:
seq_nodes.append(idn[uid])
stage_seqs_nodes.append(seq_nodes)
parallel_latency[ss] = float(
np.mean(cost_model.get_stage_latency(stage_seqs_nodes, batch_size, warmup, number, repeat)))
return parallel_latency[ss]
else:
raise ValueError
def build_graph(all_nodes: List[Node], nid):
"""
Build a graph of given operators. The global index in nid is used to represent the operator in the graph.
"""
g: Dict[int, List[int]] = {}
for nu in all_nodes:
iu = nid[nu]
g[iu] = []
for use in nu.uses:
nv = use[0]
if nv in all_nodes:
iv = nid[nv]
g[iu].append(iv)
g[iu] = list(set(g[iu])) # dump duplicate targets
return g
def topological_order(graph: IGraph) -> List[int]:
"""
Generate a topological order for given graph
"""
in_degree = {u: 0 for u in graph.keys()}
for u in graph.keys():
for v in graph[u]:
in_degree[v] += 1
qu = [u for u in graph.keys() if in_degree[u] == 0]
order = []
while len(qu) > 0:
u = qu.pop()
order.append(u)
for v in graph[u]:
in_degree[v] -= 1
if in_degree[v] == 0:
qu.append(v)
assert len(order) == len(graph) # no loop
return order
def graph_transitive_closure(graph: IGraph, include_self=False) -> Dict[int, Set[int]]:
"""
Generate the transitive closure of a computation graph.
"""
tc: Dict[int, Set[int]] = {u: set() for u in graph.keys()}
for u in reversed(topological_order(graph)):
if include_self:
tc[u].update([u])
for v in graph[u]:
tc[u].update(tc[v])
tc[u].update([v])
return tc
def transitive_closure_to_graph(tc):
"""
Convert a transitive closure to IGraph format.
"""
graph: IGraph = {}
for u in tc:
graph[u] = list(tc[u])
return graph
def longest_chain(graph: IGraph) -> List[int]:
"""
Return the longest chain in the directed acyclic graph (DAG).
"""
depth: Dict[int, int] = {u: 1 for u in graph}
comes: Dict[int, int] = {u: None for u in graph}
for u in reversed(topological_order(graph)):
for v in graph[u]:
if depth[u] < depth[v] + 1:
depth[u] = depth[v] + 1
comes[u] = v
u = max(depth.keys(), key=lambda u: depth[u])
chain = []
while u is not None:
chain.append(u)
u = comes[u]
return chain
def sub_graph(graph: IGraph, uset) -> IGraph:
"""
Generate the sub-graph derived from a subset of operators in the graph.
"""
new_graph = {}
for u in graph:
if u in uset:
new_graph[u] = [v for v in graph[u] if v in uset]
return new_graph
def graph_chain_decomposition(graph: IGraph) -> List[List[int]]:
"""
Conduct a graph chain decomposition. At each time, split out a longest chain. Repeat this progress until no
operators are left.
"""
chains = []
graph = transitive_closure_to_graph(graph_transitive_closure(graph))
while len(graph) > 0:
chain = longest_chain(graph)
chains.append(chain)
graph = sub_graph(graph, set(graph.keys()) - set(chain))
return chains
def ending_iterator(
s: int,
chains: List[List[int]],
nid: Dict[Node, int],
idn: Dict[int, Node],
max_group_size: int,
max_num_groups: int):
"""
Enumerate endings of a set of operators. An ending of operator set S is defined as a subset S' of S, such that all
edges between S-S' and S' are from S-S' to S'.
"""
iset = state2iset(s)
begins = []
ends = []
lengths = []
# get the range for each chain
for ichain, chain in enumerate(chains):
end = 0
for iu, u in enumerate(chain):
if u in iset:
end = iu + 1
else:
break
begin = max(0, end - max_group_size)
begins.append(begin)
ends.append(end)
lengths.append(end - begin)
bases = [length + 1 for length in lengths]
strides = list(itertools.accumulate(bases, operator.mul))
total = strides[-1]
# build sub graph and transitive clousure
tc = graph_transitive_closure(build_graph(state2nset(s, idn), nid), include_self=True)
# enuermate ending
for w in range(total):
end_list = []
for i, chain in enumerate(chains):
div = strides[i - 1] if i >= 1 else 1
idx = (w % strides[i]) // div
if idx == lengths[i]: # empty
continue
end_list.append(chain[begins[i] + idx])
if len(end_list) == 0:
continue
if len(end_list) > max_num_groups:
continue
isdisjoint = True
for i in range(len(end_list)):
for j in range(i + 1, len(end_list)):
if not tc[end_list[i]].isdisjoint(tc[end_list[j]]):
isdisjoint = False
break
if not isdisjoint:
break
if isdisjoint:
groups = [sorted(tc[u]) for u in end_list]
if any(len(group) > max_group_size for group in groups):
continue
yield groups
def dop(s: int,
block, chains, on_debug, debug_dp_info, idn, nid, dp, ep, opt_type, max_group_size, max_num_groups,
merge_latency, parallel_latency, cost_model, batch_size, warmup, number, repeat, bar_state) -> float:
"""
The main dynamic programming progress.
"""
if s == 0:
return 0.0
if s in dp:
return dp[s]
if on_debug:
debug_dp_info['#states'][-1] += 1
debug_dp_info['meta'][-1][s] = 0
iset = state2iset(s)
successor_dict: Dict[int, Set] = {u: set() for u in iset}
for u in reversed(iset):
successors = successor_dict[u]
successors.add(u)
for use in idn[u].uses:
if use[0] in nid and nid[use[0]] in iset:
successors.update(successor_dict[nid[use[0]]])
dpv = 1e19
s1 = sum(1 << u for u in iset if len(successor_dict[u]) == 1)
if "merge" in opt_type:
for ss in iter_subset(s1):
if check_merge(ss, idn):
stage = [[u] for u in state2iset(ss)], 'merge'
val1 = dop(s - ss, block, chains, on_debug, debug_dp_info, idn, nid, dp, ep, opt_type, max_group_size,
max_num_groups, merge_latency, parallel_latency, cost_model, batch_size, warmup, number,
repeat, bar_state)
val2 = latency(stage, block, merge_latency, parallel_latency, cost_model, idn, nid, batch_size, warmup,
number, repeat)
val = val1 + val2
if on_debug:
debug_dp_info['#transitions'][-1] += 1
debug_dp_info['meta'][-1][s] += debug_dp_info['meta'][-1][s - ss]
if val < dpv:
dpv = val
ep[s] = stage
# # the follow method is used previously, which is inefficient and is replaced by the second implementation.
# s2 = sum(1 << u for u in iset if len(successor_dict[u]) <= max_group_size)
# if "parallel" in opt_type:
# for ss in iter_subset(s2):
# if check_parallel(ss, successor_dict, max_num_groups):
# stage = [list(sorted(list(successor_dict[u]))) for u in state2iset(ss)], 'parallel'
# consumed = sum(1 << u for u in itertools.chain(*stage[0]))
# val1 = dop(s - consumed, block, chains, on_debug, debug_dp_info, idn, nid, dp, ep, opt_type,
# max_group_size, max_num_groups, merge_latency, parallel_latency, cost_model, batch_size,
# warmup, number, repeat, bar_state)
# val2 = latency(stage, block, merge_latency, parallel_latency, cost_model, idn, nid, batch_size,
# warmup, number, repeat)
# val = val1 + val2
# if on_debug:
# debug_dp_info['#transitions'][-1] += 1
# debug_dp_info['meta'][-1][s] += debug_dp_info['meta'][-1][s - consumed]
# debug_dp_info['width'][-1] = max(debug_dp_info['width'][-1], len(stage[0]))
# if val < dpv:
# dpv = val
# ep[s] = stage
if "parallel" in opt_type:
for groups in ending_iterator(s, chains, nid, idn, max_group_size, max_num_groups):
stage = groups, 'parallel'
consumed = sum(1 << u for u in itertools.chain(*stage[0]))
val1 = dop(s - consumed, block, chains, on_debug, debug_dp_info, idn, nid, dp, ep, opt_type, max_group_size,
max_num_groups, merge_latency, parallel_latency, cost_model, batch_size, warmup, number, repeat, bar_state)
val2 = latency(stage, block, merge_latency, parallel_latency, cost_model, idn, nid, batch_size, warmup,
number, repeat)
val = val1 + val2
if on_debug:
debug_dp_info['#transitions'][-1] += 1
debug_dp_info['meta'][-1][s] += debug_dp_info['meta'][-1][s - consumed]
debug_dp_info['width'][-1] = max(debug_dp_info['width'][-1], len(stage[0]))
if val < dpv:
dpv = val
ep[s] = stage
dp[s] = dpv
if bar_state is not None:
bar_state.update(1)
return dpv
def get_stage_list(ep, s):
"""
Get the list of stages according to the choices of each state stored in ep.
"""
stage_list = []
while s != 0:
stage = ep[s]
stage_list.append(stage)
s = s - sum(1 << u for seq in ep[s][0] for u in seq)
stage_list = list(reversed(stage_list))
return stage_list
def construct(stage_list: List[Tuple[List[List[int]], str]], block, constructed_blocks, graph_enter, idn, nid,
compute_weight) -> Block:
"""
Construct the optimized computation graph.
"""
inner_nodes = []
stages = []
if len(constructed_blocks) == 0:
new_enter_node = graph_enter
else:
new_enter_node = constructed_blocks[-1].exit_node
out_dict = {block.enter_node: (new_enter_node, 0, new_enter_node.output_shape[0])}
def merge_inputs(inputs: List[List[Value]]):
merge_inputs_flag = True
if merge_inputs_flag:
while True: | |
<gh_stars>1-10
# Non-local block using embedded gaussian
# DyReG
import torch
from torch import nn
from torch.nn import functional as F
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import sys
import os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..')))
import time
from opts import parser
from ops.rstg import *
from ops.models_utils import *
global args, best_prec1
from opts import parse_args
args = parser.parse_args()
args = parse_args()
import pickle
import pdb
class DynamicGraph(nn.Module):
def __init__(self,
backbone_dim=1024,
H=14, W=14,oH=7,oW=7,iH=None,iW=None,
h=3,w=3,
node_dim=512, out_num_ch = 2048, project_i3d=False, name=''):
super(DynamicGraph, self).__init__()
self.params = args
# the dimenstion of the input used at training time
# to be used for resize for fullsize evaluation
self.iH = iH
self.iW = iW
self.name = name
self.rstg = RSTG(backbone_dim=backbone_dim, node_dim=node_dim, project_i3d=project_i3d)
if not args.distributed:
self.B = (self.params.test_crops * self.params.batch_size
// max(1,torch.cuda.device_count())
* (2 if self.params.twice_sample else 1)
)
else:
self.B = self.params.batch_size
# self.T = self.params.rstg_time_steps
self.T = self.params.num_segments
self.BT = self.B * self.T
self.backbone_dim = backbone_dim
if self.params.project_at_input:
self.C = self.rstg.node_dim
else:
self.C = backbone_dim
# self.W = self.H = 14
self.out_num_ch = out_num_ch
self.H = H
self.W = W
self.oH = oH
self.oW = oW
self.h = h
self.w = w
self.num_nodes = self.h * self.w
self.project_i3d = project_i3d
# self.initial_offsets = torch.zeros(self.BT, self.h, self.w, 4)
# get anchors
ph, pw = self.compute_anchors()
self.register_buffer('ph_buf', ph)
self.register_buffer('pw_buf', pw)
self.norm_dict = nn.ModuleDict({})
# input projection
if self.project_i3d and self.params.project_at_input:
self.project_i3d_linear = nn.Conv2d(backbone_dim, self.C, [1, 1])
if self.params.remap_first == False:
self.project_back_i3d_linear = nn.Conv1d(self.C, backbone_dim, [1])
else:
self.project_back_i3d_linear = nn.Conv2d(self.C, backbone_dim, [1,1])
if self.params.rstg_skip_connection:
self.project_skip_graph = nn.Conv2d(node_dim,
self.out_num_ch//args.ch_div, [1, 1]
)
self.norm_dict[f'skip_conn'] = LayerNormAffine2D(
self.out_num_ch//args.ch_div,
(self.out_num_ch//args.ch_div,self.H,self.W)
)
self.avg_pool_7 = nn.AdaptiveAvgPool2d((self.oH,self.oW))
# not use otherwise but created already so this is to avoid resume errors
if self.params.dynamic_regions == 'constrained_fix_size':
# create kernel constants
# const_dh_ones = torch.ones(self.BT, self.num_nodes, 1)
# const_dw_ones = torch.ones(self.BT, self.num_nodes, 1)
const_dh_ones = torch.ones(1, self.num_nodes, 1)
const_dw_ones = torch.ones(1, self.num_nodes, 1)
self.register_buffer('const_dh_ones_buf', const_dh_ones)
self.register_buffer('const_dw_ones_buf', const_dw_ones)
if self.params.dynamic_regions == 'GloRe':
self.mask_fishnet = Fishnet(
input_channels=self.C, input_height=self.H, keep_spatial_size=True
)
self.mask_pred = nn.Conv2d(self.C, self.num_nodes, [1, 1])
self.create_offset_modules()
# project kernel
if args.full_res in [
"resize_offset_generator_output", "resize_offset_generator_input",
]:
self.kernel_projection1 = torch.nn.Linear(self.iH * self.iW, self.C)
self.kernel_location_pool = nn.AdaptiveAvgPool2d((self.iH,self.iW))
else:
self.kernel_projection1 = torch.nn.Linear(self.H * self.W, self.C)
if not self.params.combine_by_sum:
self.kernel_projection2 = torch.nn.Linear(2 * self.C, self.C)
# B*T x 1 x H
arange_h = (torch.arange(0, self.H)
.unsqueeze(0).unsqueeze(0)
.to(self.kernel_projection1.weight.device)
)
# # B*T x 1 x W
arange_w = (torch.arange(0, self.W)
.unsqueeze(0).unsqueeze(0)
.to(self.kernel_projection1.weight.device)
)
self.register_buffer('arange_h_buf', arange_h)
self.register_buffer('arange_w_buf', arange_w)
# norm
self.norm_dict[f'offset_ln_feats_coords'] = LayerNormAffineXC(
self.params.offset_lstm_dim,
(self.num_nodes, self.params.offset_lstm_dim)
)
self.norm_dict[f'offset_ln_lstm'] = LayerNormAffineXC(
self.params.offset_lstm_dim,
(self.num_nodes, self.params.offset_lstm_dim)
)
if args.tmp_norm_before_dynamic_graph:
self.norm_dict[f'before_dynamic_graph'] = LayerNormAffine2D(
backbone_dim,
(backbone_dim,self.H,self.W)
)
if (self.project_i3d and self.params.project_at_input
and args.tmp_norm_after_project_at_input):
self.norm_dict[f'dynamic_graph_projection'] = LayerNormAffine2D(
self.C, (self.C,self.H,self.W) )
if self.params.fix_offsets:
fix_offsets = torch.zeros(size=[self.BT,self.num_nodes, 4], requires_grad=False)
self.register_buffer('fix_offsets', fix_offsets)
# fix kernel
if self.params.dynamic_regions == 'none':
# if True:
fix_kernel = self.get_fix_kernel()
self.register_buffer('fix_kernel', fix_kernel)
self.fix_kernel = self.fix_kernel.unsqueeze(0).repeat(self.BT,1,1,1,1)
if self.params.node_confidence == 'pool_feats':
self.node_confidence = nn.Conv1d(self.C, 1, [1])
if self.params.contrastive_mlp == True:
self.contrastive_mlp = nn.Sequential(
nn.Linear(self.C, self.C),
nn.ReLU(),
nn.Linear(self.C, self.C)
)
self.aux_feats = {}
def get_norm(self, input, name, zero_init=False):
# input: B*T x N x C
return self.norm_dict[name](input)
def create_offset_modules(self):
# for fullresolution evaluation: pool the offset_generation input
offset_input_size = self.H
if self.params.full_res == 'resize_offset_generator_input':
# TODO: de schimbat hardcodarea
self.params.input_size = 256
self.input_pooling = nn.AdaptiveAvgPool2d((self.iH,self.iW))
offset_input_size = self.iH
if self.params.offset_generator == 'fishnet':
self.fishnet = Fishnet(input_channels=self.C, input_height=offset_input_size)
full_res_pad = 0
if args.full_res == 'resize_offset_generator_output':
full_res_pad = 1
# TODO: hack, at training time the global conv acts as a fully connected
# for fullsize evaluation it should be the same size as at training
global_conv_size = self.fishnet.norm_size[-1] - full_res_pad
self.global_conv = nn.Sequential(
nn.Conv2d(self.fishnet.offset_channels[-1], self.num_nodes * self.params.offset_lstm_dim,
[global_conv_size, global_conv_size]),
nn.AdaptiveAvgPool2d((1,1))
)
elif self.params.offset_generator == 'glore':
self.glore = GloRe(input_channels=self.C, input_height=self.H, h=self.h, w=self.w)
elif self.params.offset_generator == 'fishnet-glore':
self.fishnet = Fishnet(input_channels=self.C, input_height=self.H)
if self.H < 14:
glore_input_height = 5
else:
glore_input_height = 6
self.glore = GloRe(input_channels=16, input_height=glore_input_height, h=self.h, w=self.w)
self.offset_lstm = recurrent_net(batch_first=True,
input_size=self.params.offset_lstm_dim, hidden_size=self.params.offset_lstm_dim)
if self.params.node_confidence == 'offset':
# the first 4 offsets are used for region location and size
# the 5-th offset is used as region confidence
self.offset_pred = torch.nn.Linear(self.params.offset_lstm_dim, 5)
init_alpha1 = torch.zeros(size=[4], requires_grad=True)
init_alpha2 = torch.ones(size=[1], requires_grad=True)
init_alpha = torch.cat((init_alpha1, init_alpha2),dim=-1)
self.offset_alpha = torch.nn.Parameter(init_alpha)
else:
self.offset_pred = torch.nn.Linear(self.params.offset_lstm_dim, 4)
self.offset_alpha = torch.nn.Parameter(torch.zeros(size=[4], requires_grad=True))
def compute_anchors(self):
if self.params.init_regions == 'center':
ph = self.H / 2 * torch.ones(1,self.h, 1)
pw = self.H / 2 * torch.ones(1,self.w, 1)
# ph = ph.repeat(self.BT, 1, self.w).view(self.BT, self.h * self.w, 1)
# # ph: BT x (h * w) x 1
# pw = pw.repeat(self.BT, self.h, 1)
ph = ph.repeat(1, 1, self.w).view(1, self.h * self.w, 1)
# ph: BT x (h * w) x 1
pw = pw.repeat(1, self.h, 1)
elif self.params.init_regions == 'grid':
ph = torch.linspace(0, self.H, 2 * self.h + 1)[1::2]
ph = ph.unsqueeze(-1).unsqueeze(0)
pw = torch.linspace(0, self.W, 2 * self.w + 1)[1::2]
pw = pw.unsqueeze(-1).unsqueeze(0)
ph = ph.repeat(1, 1, self.w).view(1, self.h * self.w, 1)
#ph: BT x (h * w) x 1
pw = pw.repeat(1, self.h, 1)
else:
print(f'init_regions: center or grid')
sys.exit()
return ph, pw
def get_offsets(self, input_features, name='offset'):
# resize the input for the offset generation functions
if self.params.full_res == 'resize_offset_generator_input':
input_features = self.input_pooling(input_features)
# input_features: B * T, C, H, W
if self.params.offset_generator == 'fishnet':
global_features = self.fishnet(input_features)
assert global_features.shape[-1] == self.fishnet.norm_size[-1] and global_features.shape[-2] == self.fishnet.norm_size[-1]
# assert global_features.shape[-1] == 5 and global_features.shape[-2] == 5
global_features = self.global_conv(global_features)
global_features = global_features.view(self.BT, self.num_nodes, self.params.offset_lstm_dim)
elif self.params.offset_generator == 'glore':
global_features = self.glore(input_features)
global_features = global_features.permute(0,2,1)
elif self.params.offset_generator == 'fishnet-glore':
# print(f"Input fihsnet {input_features.shape}")
global_features = self.fishnet(input_features)
# print(f"Output fishnet {global_features.shape}")
global_features = self.glore(global_features)
global_features = global_features.permute(0,2,1)
global_features = self.get_norm(global_features, f'{name}_ln_feats_coords')
global_features = global_features.view(self.B, self.T, self.num_nodes, self.params.offset_lstm_dim)
global_features = global_features.permute(0,2,1,3).contiguous()
global_features = global_features.view(self.B * self.num_nodes, self.T, self.params.offset_lstm_dim)
self.offset_lstm.flatten_parameters()
offset_feats, _ = self.offset_lstm(global_features)
offset_feats = offset_feats.view(self.B, self.num_nodes, self.T, self.params.offset_lstm_dim)
offset_feats = offset_feats.permute(0,2,1,3).contiguous()
offset_feats = offset_feats.view(self.BT, self.num_nodes, self.params.offset_lstm_dim)
offset_feats = self.get_norm(offset_feats, f'{name}_ln_lstm')
offsets = self.offset_pred(offset_feats)
offsets = offsets * self.offset_alpha
# if args.freeze_regions in ['offsets_input', 'just_offsets'] :
# offsets = offsets.detach()
return offsets
def get_fix_kernel(self):
import numpy as np
saved_filters_name = f'/root/experimantal/dynamic_graphs_5aug/dynamic_graph_regions/util_pickles/image_resize_area_align_false_filters_HWhw_{self.H}_{self.W}_{self.h}_{self.w}.pickle'
print(saved_filters_name)
if os.path.exists(saved_filters_name):
with open(saved_filters_name, 'rb') as handle:
saved_filters = pickle.load(handle)
filters = saved_filters['filters'].astype(np.float32)
kernel = torch.tensor(filters, requires_grad=False)
return kernel
def get_glore_kernel(self, input_features):
global_features = self.mask_fishnet(input_features)
mask = self.mask_pred(global_features)
mask = mask.view(mask.shape[0], self.h, self.h, *mask.shape[2:])
return mask
def get_dynamic_kernel(self, offsets):
offsets = offsets.view(offsets.shape[0],-1,4)
# create the fixed anchors
# ph = self.ph
# pw = self.pw
ph = self.ph_buf
pw = self.pw_buf
# ph = self.ph_buf.repeat((self.BT,1,1))
# pw = self.pw_buf.repeat((self.BT,1,1))
self.arange_h = self.arange_h_buf
self.arange_w = self.arange_w_buf
# pw: BT x (h * w) x 1
# scale the offsets so that they always stay in the input region
if self.params.use_detector:
# TODO: poate separat pe H, W
# print(f'input_size: {self.params.input_size}')
offsets = offsets / self.params.input_size * self.H
# offsets: w1 h1 w2 h2
# offsets: h1 w1 h2 w2
regions_h = (offsets[:,:,2].unsqueeze(-1) - offsets[:,:,0].unsqueeze(-1) ) / 2.0
regions_w = (offsets[:,:,3].unsqueeze(-1) - offsets[:,:,1].unsqueeze(-1) ) / 2.0
regions_dh = (offsets[:,:,0].unsqueeze(-1) + offsets[:,:,2].unsqueeze(-1) ) / 2.0 - ph
regions_dw = (offsets[:,:,1].unsqueeze(-1) + offsets[:,:,3].unsqueeze(-1) ) / 2.0 - pw
if self.params.gt_detection_mult > 1.0:
regions_h = regions_h * self.params.gt_detection_mult
regions_w = regions_w * self.params.gt_detection_mult
elif self.params.dynamic_regions == 'constrained':
regions_h = torch.exp(offsets[:,:,0]).unsqueeze(-1)
regions_w = torch.exp(offsets[:,:,1]).unsqueeze(-1)
regions_dh = torch.tanh(offsets[:,:,2].unsqueeze(-1) + atanh( 2 * ph / self.H - 1))
regions_dw = torch.tanh(offsets[:,:,3].unsqueeze(-1) + atanh( 2 * pw / self.W - 1))
if args.tmp_init_different != 0.0:
h_init = self.H / (args.tmp_init_different*self.h)
w_init = self.W / (args.tmp_init_different*self.w)
else:
h_init = self.H / (2 * self.h) + 1
w_init = self.W / (2 * self.w) + 1
regions_h = regions_h * h_init
regions_w = regions_w * w_init
regions_dh = regions_dh * (self.H / 2) + self.H / 2 - ph
regions_dw = regions_dw * (self.W / 2) + self.W / 2 - pw
if self.params.kernel_type == 'gaussian':
regions_h = | |
<gh_stars>1-10
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
import json
import os
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from twisted.spread import pb
from buildbot import pbutil
from buildbot.process.properties import Properties
from buildbot.schedulers import base
from buildbot.util import ascii2unicode
from buildbot.util import bytes2NativeString
from buildbot.util import netstrings
from buildbot.util.maildir import MaildirService
class TryBase(base.BaseScheduler):
def filterBuilderList(self, builderNames):
"""
Make sure that C{builderNames} is a subset of the configured
C{self.builderNames}, returning an empty list if not. If
C{builderNames} is empty, use C{self.builderNames}.
@returns: list of builder names to build on
"""
# self.builderNames is the configured list of builders
# available for try. If the user supplies a list of builders,
# it must be restricted to the configured list. If not, build
# on all of the configured builders.
if builderNames:
for b in builderNames:
if b not in self.builderNames:
log.msg("%s got with builder %s" % (self, b))
log.msg(" but that wasn't in our list: %s"
% (self.builderNames,))
return []
else:
builderNames = self.builderNames
return builderNames
class BadJobfile(Exception):
pass
class JobdirService(MaildirService):
# NOTE: tightly coupled with Try_Jobdir, below. We used to track it as a "parent"
# via the MultiService API, but now we just track it as the member
# "self.scheduler"
def __init__(self, scheduler, basedir=None):
self.scheduler = scheduler
MaildirService.__init__(self, basedir)
def messageReceived(self, filename):
with self.moveToCurDir(filename) as f:
rv = self.scheduler.handleJobFile(filename, f)
return rv
class Try_Jobdir(TryBase):
compare_attrs = ('jobdir',)
def __init__(self, name, builderNames, jobdir, **kwargs):
TryBase.__init__(self, name, builderNames, **kwargs)
self.jobdir = jobdir
self.watcher = JobdirService(scheduler=self)
# TryBase used to be a MultiService and managed the JobdirService via a parent/child
# relationship. We stub out the addService/removeService and just keep track of
# JobdirService as self.watcher. We'll refactor these things later and remove
# the need for this.
def addService(self, child):
pass
def removeService(self, child):
pass
# activation handlers
@defer.inlineCallbacks
def activate(self):
yield TryBase.activate(self)
if not self.enabled:
return
# set the watcher's basedir now that we have a master
jobdir = os.path.join(self.master.basedir, self.jobdir)
self.watcher.setBasedir(jobdir)
for subdir in "cur new tmp".split():
if not os.path.exists(os.path.join(jobdir, subdir)):
os.mkdir(os.path.join(jobdir, subdir))
# bridge the activate/deactivate to a startService/stopService on the
# child service
self.watcher.startService()
@defer.inlineCallbacks
def deactivate(self):
yield TryBase.deactivate(self)
if not self.enabled:
return
# bridge the activate/deactivate to a startService/stopService on the
# child service
self.watcher.stopService()
def parseJob(self, f):
# jobfiles are serialized build requests. Each is a list of
# serialized netstrings, in the following order:
# format version number:
# "1" the original
# "2" introduces project and repository
# "3" introduces who
# "4" introduces comment
# "5" introduces properties and JSON serialization of values after
# version
# jobid: arbitrary string, used to find the buildSet later
# branch: branch name, "" for default-branch
# baserev: revision, "" for HEAD
# patch_level: usually "1"
# patch_body: patch to be applied for build
# repository
# project
# who: user requesting build
# comment: comment from user about diff and/or build
# builderNames: list of builder names
# properties: dict of build properties
p = netstrings.NetstringParser()
f.seek(0, 2)
if f.tell() > basic.NetstringReceiver.MAX_LENGTH:
raise BadJobfile(
"The patch size is greater that NetStringReceiver.MAX_LENGTH. Please Set this higher in the master.cfg")
f.seek(0, 0)
try:
p.feed(f.read())
except basic.NetstringParseError:
raise BadJobfile("unable to parse netstrings")
if not p.strings:
raise BadJobfile("could not find any complete netstrings")
ver = bytes2NativeString(p.strings.pop(0))
v1_keys = ['jobid', 'branch', 'baserev', 'patch_level', 'patch_body']
v2_keys = v1_keys + ['repository', 'project']
v3_keys = v2_keys + ['who']
v4_keys = v3_keys + ['comment']
keys = [v1_keys, v2_keys, v3_keys, v4_keys]
# v5 introduces properties and uses JSON serialization
parsed_job = {}
def extract_netstrings(p, keys):
for i, key in enumerate(keys):
parsed_job[key] = bytes2NativeString(p.strings[i])
def postprocess_parsed_job():
# apply defaults and handle type casting
parsed_job['branch'] = parsed_job['branch'] or None
parsed_job['baserev'] = parsed_job['baserev'] or None
parsed_job['patch_level'] = int(parsed_job['patch_level'])
for key in 'repository project who comment'.split():
parsed_job[key] = parsed_job.get(key, '')
parsed_job['properties'] = parsed_job.get('properties', {})
if ver <= "4":
i = int(ver) - 1
extract_netstrings(p, keys[i])
parsed_job['builderNames'] = [bytes2NativeString(s)
for s in p.strings[len(keys[i]):]]
postprocess_parsed_job()
elif ver == "5":
try:
data = bytes2NativeString(p.strings[0])
parsed_job = json.loads(data)
except ValueError:
raise BadJobfile("unable to parse JSON")
postprocess_parsed_job()
else:
raise BadJobfile("unknown version '%s'" % ver)
return parsed_job
def handleJobFile(self, filename, f):
try:
parsed_job = self.parseJob(f)
builderNames = parsed_job['builderNames']
except BadJobfile:
log.msg("%s reports a bad jobfile in %s" % (self, filename))
log.err()
return defer.succeed(None)
# Validate/fixup the builder names.
builderNames = self.filterBuilderList(builderNames)
if not builderNames:
log.msg(
"incoming Try job did not specify any allowed builder names")
return defer.succeed(None)
who = ""
if parsed_job['who']:
who = parsed_job['who']
comment = ""
if parsed_job['comment']:
comment = parsed_job['comment']
sourcestamp = dict(branch=parsed_job['branch'],
codebase='',
revision=parsed_job['baserev'],
patch_body=parsed_job['patch_body'],
patch_level=parsed_job['patch_level'],
patch_author=who,
patch_comment=comment,
# TODO: can't set this remotely - #1769
patch_subdir='',
project=parsed_job['project'],
repository=parsed_job['repository'])
reason = u"'try' job"
if parsed_job['who']:
reason += u" by user %s" % ascii2unicode(parsed_job['who'])
properties = parsed_job['properties']
requested_props = Properties()
requested_props.update(properties, "try build")
return self.addBuildsetForSourceStamps(
sourcestamps=[sourcestamp],
reason=reason,
external_idstring=ascii2unicode(parsed_job['jobid']),
builderNames=builderNames,
properties=requested_props)
class RemoteBuildSetStatus(pb.Referenceable):
def __init__(self, master, bsid, brids):
self.master = master
self.bsid = bsid
self.brids = brids
@defer.inlineCallbacks
def remote_getBuildRequests(self):
brids = dict()
for builderid, brid in iteritems(self.brids):
builderDict = yield self.master.data.get(('builders', builderid))
brids[builderDict['name']] = brid
defer.returnValue([(n, RemoteBuildRequest(self.master, n, brid))
for n, brid in iteritems(brids)])
class RemoteBuildRequest(pb.Referenceable):
def __init__(self, master, builderName, brid):
self.master = master
self.builderName = builderName
self.brid = brid
self.consumer = None
@defer.inlineCallbacks
def remote_subscribe(self, subscriber):
brdict = yield self.master.data.get(('buildrequests', self.brid))
if not brdict:
return
builderId = brdict['builderid']
# make sure we aren't double-reporting any builds
reportedBuilds = set([])
# subscribe to any new builds..
def gotBuild(key, msg):
if msg['buildrequestid'] != self.brid or key[-1] != 'new':
return
if msg['buildid'] in reportedBuilds:
return
reportedBuilds.add(msg['buildid'])
return subscriber.callRemote('newbuild',
RemoteBuild(
self.master, msg, self.builderName),
self.builderName)
self.consumer = yield self.master.mq.startConsuming(
gotBuild, ('builders', str(builderId), 'builds', None, None))
subscriber.notifyOnDisconnect(lambda _:
self.remote_unsubscribe(subscriber))
# and get any existing builds
builds = yield self.master.data.get(('buildrequests', self.brid, 'builds'))
for build in builds:
if build['buildid'] in reportedBuilds:
continue
reportedBuilds.add(build['buildid'])
yield subscriber.callRemote('newbuild',
RemoteBuild(
self.master, build, self.builderName),
self.builderName)
def remote_unsubscribe(self, subscriber):
if self.consumer:
self.consumer.stopConsuming()
self.consumer = None
class RemoteBuild(pb.Referenceable):
def __init__(self, master, builddict, builderName):
self.master = master
self.builddict = builddict
self.builderName = builderName
self.consumer = None
@defer.inlineCallbacks
def remote_subscribe(self, subscriber, interval):
# subscribe to any new steps..
def stepChanged(key, msg):
log.msg("SC")
if key[-1] == 'started':
return subscriber.callRemote('stepStarted',
self.builderName, self, msg['name'], None)
elif key[-1] == 'finished':
return subscriber.callRemote('stepFinished',
self.builderName, self, msg['name'], None, msg['results'])
self.consumer = yield self.master.mq.startConsuming(
stepChanged,
('builds', str(self.builddict['buildid']), 'steps', None, None))
subscriber.notifyOnDisconnect(lambda _:
self.remote_unsubscribe(subscriber))
def remote_unsubscribe(self, subscriber):
if self.consumer:
self.consumer.stopConsuming()
self.consumer = None
@defer.inlineCallbacks
def remote_waitUntilFinished(self):
d = defer.Deferred()
def buildEvent(key, msg):
log.msg("BE")
if key[-1] == 'finished':
d.callback(None)
consumer = yield self.master.mq.startConsuming(
buildEvent,
('builds', str(self.builddict['buildid']), None))
yield d # wait for event
consumer.stopConsuming()
defer.returnValue(self) # callers expect result=self
@defer.inlineCallbacks
def remote_getResults(self):
buildid = self.builddict['buildid']
builddict = yield self.master.data.get(('builds', buildid))
defer.returnValue(builddict['results'])
@defer.inlineCallbacks
def remote_getText(self):
buildid = self.builddict['buildid']
builddict = yield self.master.data.get(('builds', buildid))
defer.returnValue([builddict['state_string']])
class Try_Userpass_Perspective(pbutil.NewCredPerspective):
def __init__(self, scheduler, username):
self.scheduler = scheduler
self.username = username
@defer.inlineCallbacks
def perspective_try(self, branch, revision, patch, repository, project,
builderNames, who="", comment="", properties=None):
log.msg("user %s requesting build on builders %s" % (self.username,
builderNames))
if properties is None:
properties = {}
# build the intersection of the request and our configured list
builderNames = self.scheduler.filterBuilderList(builderNames)
if not builderNames:
return
reason = u"'try' job"
if who:
reason += u" by user %s" % ascii2unicode(who)
if comment:
reason += u" (%s)" % ascii2unicode(comment)
sourcestamp = dict(
branch=branch, revision=revision, | |
the intensity of the first 50 frames as the meta-representation
im_stack = np.zeros((min(50, frame_num), self.height, self.width))
for i in range(min(50, frame_num)):
im_i = pl.imread(self.file_list_trench_detect[i])
if np.max(im_i) > 255:
im_i = self.to_8_bit(im_i)
if self.drift_correct == 1:
# correct for drift
move_x = self.drift_x[i]
temp = np.zeros((self.height, self.width))
if move_x > 0:
temp[:, :self.width-move_x] = im_i[:,move_x:]
else:
temp[:, (-move_x):] = im_i[:, :self.width+move_x]
im_i = temp
im_stack[i] = im_i
perc = np.percentile(im_stack, 85, axis=0).astype(np.uint8)
out_file = "perc_85_frame_50.tiff"
# convert to 8-bit, using the imageJ way
out = PIL.Image.frombytes("L", (self.width, self.height), perc.tobytes())
out.save(out_file)
# identify tops & bottoms
if self.spatial != 2:
intensity_scan = np.percentile(perc, 90, axis=1)
# intensity_scan = np.max(perc,axis=1)
intensity_scan = intensity_scan / float(sum(intensity_scan))
# normalize intensity
im_min = intensity_scan.min()
im_max = intensity_scan.max()
scaling_factor = (im_max - im_min)
intensity_scan = (intensity_scan - im_min)
intensity_scan = (intensity_scan / scaling_factor)
else:
perc_top = perc[:int(self.height/2),:]
perc_bot = perc[int(self.height/2):,:]
intensity_scan_top = np.percentile(perc_top, 90, axis=1)
# intensity_scan_top = np.max(perc_top,axis=1)
intensity_scan_top = intensity_scan_top / float(sum(intensity_scan_top))
# normalize intensity
im_min_top = intensity_scan_top.min()
im_max_top = intensity_scan_top.max()
scaling_factor_top = (im_max_top - im_min_top)
intensity_scan_top = (intensity_scan_top - im_min_top)
intensity_scan_top = (intensity_scan_top / scaling_factor_top)
intensity_scan_bot = np.percentile(perc_bottom, 90, axis=1)
# intensity_scan_bot = np.max(perc_bot, axis=1)
intensity_scan_bot = intensity_scan_bot / float(sum(intensity_scan_bot))
# normalize intensity
im_min_bot = intensity_scan_bot.min()
im_max_bot = intensity_scan_bot.max()
scaling_factor_bot = (im_max_bot - im_min_bot)
intensity_scan_bot = (intensity_scan_bot - im_min_bot)
intensity_scan_bot = (intensity_scan_bot / scaling_factor_bot)
pl.plot(intensity_scan_bot)
pl.show()
pl.plot(intensity_scan_top)
pl.show()
if self.spatial == 0: # top
top = max(0, np.where(intensity_scan > 0.2)[0][0] - 30)
bottom = top + self.trench_length + 60
self.tops.append(top)
self.bottoms.append(bottom)
elif self.spatial == 1: # bottom
bottom = min(self.height,np.where(intensity_scan > 0.2)[0][-1] + 30)
top = bottom - self.trench_length - 60
self.tops.append(top)
self.bottoms.append(bottom)
else: # both
# top one
top = max(0, np.where(intensity_scan_top > 0.2)[0][0] - 30)
bottom = top + self.trench_length + 60
self.tops.append(top)
self.bottoms.append(bottom)
# bottom one
bottom = min(self.height,np.where(intensity_scan_bot > 0.2)[0][-1] + 30 + int(self.height/2))
top = bottom - self.trench_length - 60
self.tops.append(top)
self.bottoms.append(bottom)
# identify trenches
peak_ind_dict = {}
if self.spatial == 2:
for i in range(2):
im_trenches = perc[self.tops[i]:self.bottoms[i]]
im_trenches_perc = np.percentile(im_trenches, 80, axis=0)
# normalize intensity
im_min = im_trenches_perc.min()
im_max = im_trenches_perc.max()
scaling_factor = (im_max - im_min)
im_trenches_perc = (im_trenches_perc - im_min)
im_trenches_perc = (im_trenches_perc / scaling_factor)
peak_ind = self.detect_peaks(im_trenches_perc, mph=0.35, mpd=trench_width)
# corrected
peak_ind = np.array(self.peak_correct(peak_ind, im_trenches_perc))
if peak_ind[0] < (self.trench_length / 2):
peak_ind = peak_ind[1:]
if (self.width - peak_ind[-1]) < (self.trench_length / 2):
peak_ind = peak_ind[:-1]
left_ind = np.array(peak_ind) - int(self.trench_width / 2)
right_ind = peak_ind + int(self.trench_width / 2)
ind_list = list(zip(left_ind, right_ind))
ind_list = np.array(ind_list)
peak_ind_dict[i] = ind_list
else:
im_trenches = perc[self.tops[0]:self.bottoms[0]]
im_trenches_perc = np.percentile(im_trenches, 80, axis=0)
# normalize intensity
im_min = im_trenches_perc.min()
im_max = im_trenches_perc.max()
scaling_factor = (im_max - im_min)
im_trenches_perc = (im_trenches_perc - im_min)
im_trenches_perc = (im_trenches_perc / scaling_factor)
peak_ind = self.detect_peaks(im_trenches_perc, mph=0.35, mpd=trench_width)
if peak_ind[0] < (self.trench_length / 2):
peak_ind = peak_ind[1:]
if (self.width - peak_ind[-1]) < (self.trench_length / 2):
peak_ind = peak_ind[:-1]
left_ind = peak_ind - int(self.trench_width / 2)
right_ind = peak_ind + int(self.trench_width / 2)
ind_list = list(zip(left_ind, right_ind))
ind_list = np.array(ind_list)
peak_ind_dict[0] = ind_list
self.box_info = []
if self.spatial == 2:
print(len(peak_ind_dict[0]), len(peak_ind_dict[1]))
h5_name_top = "Lane_" + str(self.lane).zfill(2) + "_pos_" + str(self.pos).zfill(3) + "_top.h5"
self.box_info.append(h5_name_top)
hf_t = h5py.File(h5_name_top, 'w')
hf_t.create_dataset('box', data=peak_ind_dict[0])
hf_t.create_dataset('upper_index', data=self.tops[0])
hf_t.create_dataset('lower_index', data=self.bottoms[0])
hf_t.close()
h5_name_bottom = "Lane_" + str(self.lane).zfill(2) + "_pos_" + str(self.pos).zfill(3) + "_bottom.h5"
self.box_info.append(h5_name_bottom)
hf_b = h5py.File(h5_name_bottom, 'w')
hf_b.create_dataset('box', data=peak_ind_dict[1])
hf_b.create_dataset('upper_index', data=self.tops[1])
hf_b.create_dataset('lower_index', data=self.bottoms[1])
hf_b.close()
# print(peak_ind_dict)
else:
local = ['top', 'bottom']
h5_name = "Lane_" + str(self.lane).zfill(2) + "_pos_" + str(self.pos).zfill(3) + "_" + local[
self.spatial] + ".h5"
self.box_info.append(h5_name)
hf = h5py.File(h5_name, 'w')
hf.create_dataset('box', data=peak_ind_dict[0])
hf.create_dataset('upper_index', data=self.tops[0])
hf.create_dataset('lower_index', data=self.bottoms[0])
hf.close()
return
def kymograph(self):
if self.box_info == None:
self.box_info = []
if self.spatial == 2:
h5_name_top = "Lane_" + str(self.lane).zfill(2) + "_pos_" + str(self.pos).zfill(3) + "_top.h5"
self.box_info.append(h5_name_top)
h5_name_bottom = "Lane_" + str(self.lane).zfill(2) + "_pos_" + str(self.pos).zfill(3) + "_bottom.h5"
self.box_info.append(h5_name_bottom)
else:
local = ['top', 'bottom']
h5_name = "Lane_" + str(self.lane).zfill(2) + "_pos_" + str(self.pos).zfill(3) + "_" + local[
self.spatial] + ".h5"
self.box_info.append(h5_name)
os.chdir(self.file_path)
kymo_path = self.file_path + '/Kymographs'
if not os.path.exists(kymo_path):
os.makedirs(kymo_path)
for i in range(len(self.box_info)):
hf = h5py.File(self.box_info[i], 'r')
ind_list = hf.get('box').value
upper_index = hf.get('upper_index').value
# lower_index = hf.get('lower_index').value + 20
lower_index = hf.get('lower_index').value
hf.close()
trench_num = len(ind_list)
if trench_num > 0:
all_kymo = {}
for t_i in range(trench_num):
all_kymo[t_i] = np.zeros((len(self.file_list), lower_index - upper_index, self.trench_width))
# file_list = ori_files[self.frame_start:self.frame_limit]
for f_i in range(len(self.file_list)):
try:
file_i = self.file_list[f_i]
except:
print("something is wrong")
continue
im_t = pl.imread(file_i)
if self.drift_correct == 1:
# correct for drift
move_x = self.drift_x[f_i]
move_y = self.drift_y[f_i]
else:
move_x = 0
move_y = 0
for t_i in range(trench_num):
trench_left, trench_right = ind_list[t_i]
trench = np.zeros((lower_index - upper_index, self.trench_width))
trench[:,:max(0, trench_left+move_x)+self.trench_width] = im_t[upper_index+move_y:lower_index+move_y, max(0, trench_left+move_x):max(0, trench_left+move_x)+self.trench_width]
all_kymo[t_i][f_i] = trench.astype(np.uint16)
for t_i in range(trench_num):
if i == 0:
trench_name = kymo_path + "/Lane_" + str(self.lane).zfill(
2) + "_pos_" + str(
self.pos).zfill(3) + "_trench_" + str(t_i + 1).zfill(2) + "_top_c_" + self.channel+".tiff"
trench_name_stack = kymo_path + "/Stack_Lane_" + str(self.lane).zfill(
2) + "_pos_" + str(
self.pos).zfill(3) + "_trench_" + str(t_i + 1).zfill(2) + "_top_c_" + self.channel + ".tiff"
else:
trench_name = kymo_path + "/Lane_" + str(self.lane).zfill(
2) + "_pos_" + str(
self.pos).zfill(3) + "_trench_" + str(t_i + 1).zfill(2) + "_bottom_c_"+ self.channel+".tiff"
trench_name_stack = kymo_path + "/Stack_Lane_" + str(self.lane).zfill(
2) + "_pos_" + str(
self.pos).zfill(3) + "_trench_" + str(t_i + 1).zfill(
2) + "_bottom_c_" + self.channel + ".tiff"
imsave(trench_name_stack,all_kymo[t_i].astype(np.uint16))
this_kymo = np.concatenate(all_kymo[t_i], axis=1).astype(np.uint16)
all_kymo[t_i] = None
out = PIL.Image.frombytes("I;16", (this_kymo.shape[1], this_kymo.shape[0]), this_kymo.tobytes())
out.save(trench_name)
else:
print("no trenches detected")
return
def run_kymo(self):
self.get_file_list()
# identify
if self.find_correct == 1:
self.find_drift()
else:
print('wtf')
if self.drift_correct == 1:
self.read_drift()
if self.channel == seg_channel:
self.get_trenches()
self.kymograph()
else:
self.kymograph()
return
@staticmethod
def to_8_bit(im):
im_min = im.min()
im_max = im.max()
scaling_factor = (im_max - im_min)
im = (im - im_min)
im = (im * 255. / scaling_factor).astype(np.uint8)
return im
@staticmethod
def moveImage(im, move_x, move_y, pad=0):
"""
Moves the image without changing frame dimensions, and
pads the edges with given value (default=0).
"""
im_new = np.ones((im.shape[0], im.shape[1]), dtype=np.uint16) * pad
xbound = im.shape[1]
ybound = im.shape[0]
if move_x >= 0:
im_new[:, move_x:] = im[:, :xbound - move_x]
else:
im_new[:, :xbound + move_x] = im[:, -move_x:]
if move_y >= 0:
im_new[move_y:, :] = im[:ybound - move_y, :]
else:
im_new[:ybound + move_y, :] = im[-move_y:, :]
return im_new
@staticmethod
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='both', kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, | |
indoor temperature above which ventilation is shutoff.
| Units: C
| Default value: 100.0
| value >= -100.0
| value <= 100.0
Args:
value (float): value for IDD Field `Maximum Indoor Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_indoor_temperature` or None if not set
"""
return self["Maximum Indoor Temperature"]
@maximum_indoor_temperature.setter
def maximum_indoor_temperature(self, value=100.0):
"""Corresponds to IDD field `Maximum Indoor Temperature`"""
self["Maximum Indoor Temperature"] = value
@property
def maximum_indoor_temperature_schedule_name(self):
"""field `Maximum Indoor Temperature Schedule Name`
| This schedule contains the indoor temperature versus time above which
| ventilation is shutoff.
Args:
value (str): value for IDD Field `Maximum Indoor Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `maximum_indoor_temperature_schedule_name` or None if not set
"""
return self["Maximum Indoor Temperature Schedule Name"]
@maximum_indoor_temperature_schedule_name.setter
def maximum_indoor_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Maximum Indoor Temperature Schedule
Name`"""
self["Maximum Indoor Temperature Schedule Name"] = value
@property
def delta_temperature(self):
"""field `Delta Temperature`
| This is the temperature differential between indoor and outdoor below
| which ventilation is shutoff.
| Units: deltaC
| Default value: -100.0
| value >= -100.0
Args:
value (float): value for IDD Field `Delta Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `delta_temperature` or None if not set
"""
return self["Delta Temperature"]
@delta_temperature.setter
def delta_temperature(self, value=-100.0):
"""Corresponds to IDD field `Delta Temperature`"""
self["Delta Temperature"] = value
@property
def delta_temperature_schedule_name(self):
"""field `Delta Temperature Schedule Name`
| This schedule contains the temperature differential between indoor and outdoor
| versus time below which ventilation is shutoff.
Args:
value (str): value for IDD Field `Delta Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `delta_temperature_schedule_name` or None if not set
"""
return self["Delta Temperature Schedule Name"]
@delta_temperature_schedule_name.setter
def delta_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Delta Temperature Schedule Name`"""
self["Delta Temperature Schedule Name"] = value
@property
def minimum_outdoor_temperature(self):
"""field `Minimum Outdoor Temperature`
| This is the outdoor temperature below which ventilation is shutoff.
| Units: C
| Default value: -100.0
| value >= -100.0
| value <= 100.0
Args:
value (float): value for IDD Field `Minimum Outdoor Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_outdoor_temperature` or None if not set
"""
return self["Minimum Outdoor Temperature"]
@minimum_outdoor_temperature.setter
def minimum_outdoor_temperature(self, value=-100.0):
"""Corresponds to IDD field `Minimum Outdoor Temperature`"""
self["Minimum Outdoor Temperature"] = value
@property
def minimum_outdoor_temperature_schedule_name(self):
"""field `Minimum Outdoor Temperature Schedule Name`
| This schedule contains the outdoor temperature versus time below which
| ventilation is shutoff.
Args:
value (str): value for IDD Field `Minimum Outdoor Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `minimum_outdoor_temperature_schedule_name` or None if not set
"""
return self["Minimum Outdoor Temperature Schedule Name"]
@minimum_outdoor_temperature_schedule_name.setter
def minimum_outdoor_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Minimum Outdoor Temperature Schedule
Name`"""
self["Minimum Outdoor Temperature Schedule Name"] = value
@property
def maximum_outdoor_temperature(self):
"""field `Maximum Outdoor Temperature`
| This is the outdoor temperature above which ventilation is shutoff.
| Units: C
| Default value: 100.0
| value >= -100.0
| value <= 100.0
Args:
value (float): value for IDD Field `Maximum Outdoor Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_outdoor_temperature` or None if not set
"""
return self["Maximum Outdoor Temperature"]
@maximum_outdoor_temperature.setter
def maximum_outdoor_temperature(self, value=100.0):
"""Corresponds to IDD field `Maximum Outdoor Temperature`"""
self["Maximum Outdoor Temperature"] = value
@property
def maximum_outdoor_temperature_schedule_name(self):
"""field `Maximum Outdoor Temperature Schedule Name`
| This schedule contains the outdoor temperature versus time above which
| ventilation is shutoff.
Args:
value (str): value for IDD Field `Maximum Outdoor Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `maximum_outdoor_temperature_schedule_name` or None if not set
"""
return self["Maximum Outdoor Temperature Schedule Name"]
@maximum_outdoor_temperature_schedule_name.setter
def maximum_outdoor_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Maximum Outdoor Temperature Schedule
Name`"""
self["Maximum Outdoor Temperature Schedule Name"] = value
@property
def maximum_wind_speed(self):
"""field `Maximum Wind Speed`
| This is the outdoor wind speed above which ventilation is shutoff.
| Units: m/s
| Default value: 40.0
| value <= 40.0
Args:
value (float): value for IDD Field `Maximum Wind Speed`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_wind_speed` or None if not set
"""
return self["Maximum Wind Speed"]
@maximum_wind_speed.setter
def maximum_wind_speed(self, value=40.0):
"""Corresponds to IDD field `Maximum Wind Speed`"""
self["Maximum Wind Speed"] = value
class ZoneAirBalanceOutdoorAir(DataObject):
""" Corresponds to IDD object `ZoneAirBalance:OutdoorAir`
Provide a combined zone outdoor air flow by including interactions between
mechanical ventilation, infiltration and duct leakage.
This object will combine outdoor flows from all ZoneInfiltration and
ZoneVentilation objects in the same zone. Balanced flows will be summed, while
unbalanced flows will be added in quadrature.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'zone name',
{'name': u'Zone Name',
'pyname': u'zone_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'air balance method',
{'name': u'Air Balance Method',
'pyname': u'air_balance_method',
'default': u'Quadrature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Quadrature',
u'None'],
'autocalculatable': False,
'type': 'alpha'}),
(u'induced outdoor air due to unbalanced duct leakage',
{'name': u'Induced Outdoor Air Due to Unbalanced Duct Leakage',
'pyname': u'induced_outdoor_air_due_to_unbalanced_duct_leakage',
'default': 0.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'induced outdoor air schedule name',
{'name': u'Induced Outdoor Air Schedule Name',
'pyname': u'induced_outdoor_air_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Zone Airflow',
'min-fields': 0,
'name': u'ZoneAirBalance:OutdoorAir',
'pyname': u'ZoneAirBalanceOutdoorAir',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def zone_name(self):
"""field `Zone Name`
Args:
value (str): value for IDD Field `Zone Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_name` or None if not set
"""
return self["Zone Name"]
@zone_name.setter
def zone_name(self, value=None):
"""Corresponds to IDD field `Zone Name`"""
self["Zone Name"] = value
@property
def air_balance_method(self):
"""field `Air Balance Method`
| None: Only perform simple calculations without using a combined zone outdoor air.
| Quadrature: A combined outdoor air is used in the quadrature sum.
| Default value: Quadrature
Args:
value (str): value for IDD Field `Air Balance Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_balance_method` or None if not set
"""
return self["Air Balance Method"]
@air_balance_method.setter
def air_balance_method(self, value="Quadrature"):
"""Corresponds to IDD field `Air Balance Method`"""
self["Air Balance Method"] = value
@property
def induced_outdoor_air_due_to_unbalanced_duct_leakage(self):
"""field `Induced Outdoor Air Due to Unbalanced Duct Leakage`
| Units: m3/s
Args:
value (float): value for IDD Field `Induced Outdoor Air Due to Unbalanced Duct Leakage`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `induced_outdoor_air_due_to_unbalanced_duct_leakage` or None if not set
"""
return self["Induced Outdoor Air Due to Unbalanced Duct Leakage"]
@induced_outdoor_air_due_to_unbalanced_duct_leakage.setter
def induced_outdoor_air_due_to_unbalanced_duct_leakage(self, value=None):
"""Corresponds to IDD field `Induced Outdoor Air Due to Unbalanced Duct
Leakage`"""
self["Induced Outdoor Air Due to Unbalanced Duct Leakage"] = value
@property
def induced_outdoor_air_schedule_name(self):
"""field `Induced Outdoor Air Schedule Name`
| This schedule contains the fraction values applied to the Induced Outdoor Air given in the
| previous input field (0.0 - 1.0).
Args:
value (str): value for IDD Field `Induced Outdoor Air Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `induced_outdoor_air_schedule_name` or None if not set
"""
return self["Induced Outdoor Air Schedule Name"]
@induced_outdoor_air_schedule_name.setter
def induced_outdoor_air_schedule_name(self, value=None):
"""Corresponds to IDD field `Induced Outdoor Air Schedule Name`"""
self["Induced Outdoor Air Schedule Name"] = value
class ZoneMixing(DataObject):
"""Corresponds to IDD | |
import keras
from keras import layers
import torch
import torch.nn as nn
import torchmetrics
from sklearn.preprocessing import *
import numpy as np
import aiqc
from aiqc import *
# Still required even with `*` above.
from aiqc import datum
name = "tests"
def list_test_queues(format:str=None):
queues = [
{
'queue_name': 'keras_multiclass'
, 'data_type': 'tabular'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'multi label'
, 'datum': 'iris.tsv'
},
{
'queue_name': 'keras_binary'
, 'data_type': 'tabular'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'binary'
, 'datum': 'sonar.csv'
},
{
'queue_name': 'keras_regression'
, 'data_type': 'tabular'
, 'supervision': 'supervised'
, 'analysis': 'regression'
, 'sub_analysis': None
, 'datum': 'houses.csv'
},
{
'queue_name': 'keras_image_binary'
, 'data_type': 'image'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'binary'
, 'datum': 'brain_tumor.csv'
},
{
'queue_name': 'keras_sequence_multiclass'
, 'data_type': 'sequence'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'binary'
, 'datum': 'epilepsy.parquet'
},
{
'queue_name': 'keras_tabular_forecast'
, 'data_type': 'tabular'
, 'supervision': 'unsupervised'
, 'analysis': 'regression'
, 'sub_analysis': 'windowed'
, 'datum': 'dehli_climate.parquet'
},
{
'queue_name': 'keras_image_forecast'
, 'data_type': 'image'
, 'supervision': 'unsupervised'
, 'analysis': 'regression'
, 'sub_analysis': 'windowed'
, 'datum': 'liberty_moon.csv'
},
{
'queue_name': 'pytorch_multiclass'
, 'data_type': 'tabular'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'multi label'
, 'datum': 'iris.tsv'
},
{
'queue_name': 'pytorch_binary'
, 'data_type': 'tabular'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'binary'
, 'datum': 'sonar.csv'
},
{
'queue_name': 'pytorch_regression'
, 'data_type': 'tabular'
, 'supervision': 'supervised'
, 'analysis': 'regression'
, 'sub_analysis': None
, 'datum': 'houses.csv'
},
{
'queue_name': 'pytorch_image_binary'
, 'data_type': 'image'
, 'supervision': 'supervised'
, 'analysis': 'classification'
, 'sub_analysis': 'binary'
, 'datum': 'brain_tumor.csv'
}
]
formats_df = [None, 'pandas', 'df' ,'dataframe']
formats_lst = ['list', 'l']
if (format in formats_df):
pd.set_option('display.max_column',100)
pd.set_option('display.max_colwidth', 500)
df = pd.DataFrame.from_records(queues)
return df
elif (format in formats_lst):
return queues
else:
raise ValueError(f"\nYikes - The format you provided <{format}> is not one of the following:{formats_df} or {formats_lst}\n")
"""
Remember, `pickle` does not accept nested functions.
So the model_build and model_train functions must be defined outside of the function that accesses them.
For example when creating an `def test_method()... Algorithm.fn_build`
Each test takes a slightly different approach to `fn_optimizer`.
"""
# ------------------------ KERAS TABULAR MULTICLASS ------------------------
def keras_multiclass_fn_build(features_shape, label_shape, **hp):
model = keras.models.Sequential()
model.add(layers.Dense(units=features_shape[0], activation='relu', kernel_initializer='he_uniform'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(units=hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(layers.Dense(units=label_shape[0], activation='softmax'))
return model
def keras_multiclass_fn_optimize(**hp):
optimizer = keras.optimizers.Adamax(hp['learning_rate'])
return optimizer
def keras_multiclass_fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
model.compile(
loss = loser
, optimizer = optimizer
, metrics = ['accuracy']
)
model.fit(
samples_train["features"]
, samples_train["labels"]
, validation_data = (
samples_evaluate["features"]
, samples_evaluate["labels"]
)
, verbose = 0
, batch_size = hp['batch_size']
, epochs = hp['epoch_count']
, callbacks=[keras.callbacks.History()]
)
return model
def make_test_queue_keras_multiclass(repeat_count:int=1, fold_count:int=None):
hyperparameters = {
"neuron_count": [9, 12]
, "batch_size": [3]
, "learning_rate": [0.03, 0.05]
, "epoch_count": [30, 60]
}
if fold_count is not None:
file_path = datum.get_path('iris_10x.tsv')
else:
file_path = datum.get_path('iris.tsv')
dataset = Dataset.Tabular.from_path(
file_path = file_path
, source_file_format = 'tsv'
, dtype = None
)
label_column = 'species'
label = dataset.make_label(columns=[label_column])
feature = dataset.make_feature(exclude_columns=[label_column])
if (fold_count is not None):
size_test = 0.25
size_validation = None
elif (fold_count is None):
size_test = 0.18
size_validation = 0.14
splitset = Splitset.make(
feature_ids = [feature.id]
, label_id = label.id
, size_test = size_test
, size_validation = size_validation
)
if fold_count is not None:
foldset = splitset.make_foldset(
fold_count = fold_count
)
foldset_id = foldset.id
else:
foldset_id = None
encoderset = feature.make_encoderset()
label.make_labelcoder(
sklearn_preprocess = OneHotEncoder(sparse=False)
)
encoderset.make_featurecoder(
sklearn_preprocess = StandardScaler(copy=False)
, columns = ['petal_width']
)
encoderset.make_featurecoder(
sklearn_preprocess = StandardScaler(copy=False)
, dtypes = ['float64']
)
algorithm = Algorithm.make(
library = "keras"
, analysis_type = "classification_multi"
, fn_build = keras_multiclass_fn_build
, fn_optimize = keras_multiclass_fn_optimize
, fn_train = keras_multiclass_fn_train
)
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, foldset_id = foldset_id
, hyperparamset_id = hyperparamset.id
, repeat_count = repeat_count
)
return queue
# ------------------------ KERAS TABULAR BINARY ------------------------
def keras_binary_fn_build(features_shape, label_shape, **hp):
model = keras.models.Sequential()
model.add(layers.Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(layers.Dropout(0.30))
model.add(layers.Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(layers.Dropout(0.30))
model.add(layers.Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(layers.Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform'))
return model
def keras_binary_fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
model.compile(
loss=loser
, optimizer=optimizer
, metrics=['accuracy']
)
model.fit(
samples_train['features'], samples_train['labels']
, validation_data = (samples_evaluate['features'], samples_evaluate['labels'])
, verbose = 0
, batch_size = 3
, epochs = hp['epochs']
, callbacks = [keras.callbacks.History()]
)
return model
def make_test_queue_keras_binary(repeat_count:int=1, fold_count:int=None):
hyperparameters = {
"neuron_count": [25, 50]
, "epochs": [75, 150]
}
file_path = datum.get_path('sonar.csv')
dataset = Dataset.Tabular.from_path(
file_path = file_path
, source_file_format = 'csv'
, name = '<NAME>'
, dtype = None
)
label_column = 'object'
label = dataset.make_label(columns=[label_column])
feature = dataset.make_feature(exclude_columns=[label_column])
if (fold_count is not None):
size_test = 0.25
size_validation = None
elif (fold_count is None):
size_test = 0.18
size_validation = 0.14
splitset = Splitset.make(
feature_ids = [feature.id]
, label_id = label.id
, size_test = size_test
, size_validation = size_validation
)
if (fold_count is not None):
foldset = splitset.make_foldset(
fold_count = fold_count
)
foldset_id = foldset.id
else:
foldset_id = None
label.make_labelcoder(
sklearn_preprocess = LabelBinarizer(sparse_output=False)
)
encoderset = feature.make_encoderset()
encoderset.make_featurecoder(
sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False)
, dtypes = ['float64']
)
algorithm = Algorithm.make(
library = "keras"
, analysis_type = "classification_binary"
, fn_build = keras_binary_fn_build
, fn_train = keras_binary_fn_train
)
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, foldset_id = foldset_id
, hyperparamset_id = hyperparamset.id
, repeat_count = repeat_count
)
return queue
def make_test_queue_keras_text_binary(repeat_count:int=1, fold_count:int=None):
hyperparameters = {
"neuron_count": [25, 50]
, "epochs": [75, 150]
}
file_path = datum.get_path('spam.csv')
dataset = Dataset.Text.from_path(
file_path = file_path
, source_file_format = 'csv'
, name = 'text test dataset'
, dtype = None
)
label_column = 'label'
label = dataset.make_label(columns=[label_column])
feature = dataset.make_feature(exclude_columns=[label_column])
if (fold_count is not None):
size_test = 0.25
size_validation = None
elif (fold_count is None):
size_test = 0.18
size_validation = 0.14
splitset = Splitset.make(
feature_ids = [feature.id]
, label_id = label.id
, size_test = size_test
, size_validation = size_validation
)
if (fold_count is not None):
foldset = splitset.make_foldset(
fold_count = fold_count
)
foldset_id = foldset.id
else:
foldset_id = None
label.make_labelcoder(
sklearn_preprocess = LabelBinarizer(sparse_output=False)
)
encoderset = feature.make_encoderset()
encoderset.make_featurecoder(
sklearn_preprocess = CountVectorizer(max_features = 200)
, columns=['TextData']
)
algorithm = Algorithm.make(
library = "keras"
, analysis_type = "classification_binary"
, fn_build = keras_binary_fn_build
, fn_train = keras_binary_fn_train
)
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, foldset_id = foldset_id
, hyperparamset_id = hyperparamset.id
, repeat_count = repeat_count
)
return queue
# ------------------------ KERAS TABULAR REGRESSION ------------------------
def keras_regression_fn_build(features_shape, label_shape, **hp):
model = keras.models.Sequential()
model.add(layers.Dense(units=hp['neuron_count'], kernel_initializer='normal', activation='relu'))
model.add(layers.Dropout(0.15))
model.add(layers.Dense(units=hp['neuron_count'], kernel_initializer='normal', activation='relu'))
model.add(layers.Dense(units=label_shape[0], kernel_initializer='normal'))
return model
def keras_regression_fn_optimize(**hp):
optimizer = keras.optimizers.RMSprop()
return optimizer
def keras_regression_fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
model.compile(
loss=loser
, optimizer=optimizer
, metrics = ['mean_squared_error']
)
model.fit(
samples_train['features'], samples_train['labels']
, validation_data = (
samples_evaluate['features'],
samples_evaluate['labels'])
, verbose = 0
, batch_size = 3
, epochs = hp['epochs']
, callbacks = [keras.callbacks.History()]
)
return model
def make_test_queue_keras_regression(repeat_count:int=1, fold_count:int=None):
hyperparameters = {
"neuron_count": [24, 48]
, "epochs": [50, 75]
}
df = datum.to_pandas('houses.csv')
# testing Labelpolater (we don't have a regression-sequence example yet).
df['price'][0] = np.NaN
df['price'][5] = np.NaN
df['price'][10] = np.NaN
# testing Featurepolater 2D.
df['nox'][5] = np.NaN
df['indus'][10] = np.NaN
df['age'][19] = np.NaN
dataset = Dataset.Tabular.from_pandas(dataframe=df)
label_column = 'price'
label = dataset.make_label(columns=[label_column])
label.make_labelpolater(
interpolate_kwargs = dict(
method = 'linear'
, limit_direction = 'both'
, limit_area = None
, axis = 0
, order = 1
)
)
feature = dataset.make_feature(exclude_columns=[label_column])
interpolaterset = feature.make_interpolaterset()
interpolaterset.make_featurepolater(columns='nox')
interpolaterset.make_featurepolater(dtypes='float64')
if (fold_count is not None):
size_test = 0.25
size_validation = None
elif (fold_count is None):
size_test = 0.18
size_validation = 0.14
splitset = Splitset.make(
feature_ids = [feature.id]
, label_id = label.id
, size_test = size_test
, size_validation = size_validation
, bin_count = 3
)
if fold_count is not None:
foldset = splitset.make_foldset(
fold_count = fold_count
, bin_count = 3
)
foldset_id = foldset.id
else:
foldset_id = None
label.make_labelcoder(
sklearn_preprocess = PowerTransformer(method='box-cox', copy=False)
)
encoderset = feature.make_encoderset()
encoderset.make_featurecoder(
include = False
, dtypes = ['int64']
, sklearn_preprocess = MinMaxScaler(copy=False)
)
# We expect double None (dtypes,columns) to use all columns because nothing is excluded.
encoderset.make_featurecoder(
include = False
, dtypes = None
, columns = None
, sklearn_preprocess = OrdinalEncoder()
)
algorithm = Algorithm.make(
library = "keras"
, analysis_type = "regression"
, fn_build = keras_regression_fn_build
, fn_train = keras_regression_fn_train
, fn_optimize = keras_regression_fn_optimize
)
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, foldset_id = foldset_id
, hyperparamset_id = hyperparamset.id
, repeat_count = repeat_count
)
return queue
# ------------------------ KERAS IMAGE BINARY ------------------------
def keras_image_binary_fn_build(features_shape, label_shape, **hp):
model = keras.models.Sequential()
# incoming features_shape = channels * rows * columns
# https://keras.io/api/layers/reshaping_layers/reshape/
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D
# Conv1D shape = `batch_shape + (steps, input_dim)`
model.add(layers.Reshape(
(features_shape[1],features_shape[2])#,features_shape[0])#dropping
, input_shape=features_shape)
)
model.add(layers.Conv1D(128*hp['neuron_multiply'], kernel_size=hp['kernel_size'], input_shape=features_shape, padding='same', activation='relu', kernel_initializer=hp['cnn_init']))
model.add(layers.MaxPooling1D(pool_size=hp['pool_size']))
model.add(layers.Dropout(hp['dropout']))
model.add(layers.Conv1D(256*hp['neuron_multiply'], kernel_size=hp['kernel_size'], padding='same', activation='relu', kernel_initializer=hp['cnn_init']))
model.add(layers.MaxPooling1D(pool_size=hp['pool_size']))
model.add(layers.Dropout(hp['dropout']))
model.add(layers.Flatten())
model.add(layers.Dense(hp['dense_neurons']*hp['neuron_multiply'], activation='relu'))
model.add(layers.Dropout(0.2))
if hp['include_2nd_dense'] == True:
model.add(layers.Dense(hp['2nd_dense_neurons'], activation='relu'))
model.add(layers.Dense(units=label_shape[0], activation='sigmoid'))
return model
def keras_image_binary_fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
model.compile(
optimizer=optimizer
, loss=loser
, metrics=['accuracy']
)
metrics_cuttoffs = [
{"metric":"val_accuracy", "cutoff":0.70, "above_or_below":"above"},
{"metric":"accuracy", "cutoff":0.70, "above_or_below":"above"},
{"metric":"val_loss", "cutoff":0.50, "above_or_below":"below"},
{"metric":"loss", "cutoff":0.50, "above_or_below":"below"}
]
cutoffs = aiqc.TrainingCallback.Keras.MetricCutoff(metrics_cuttoffs)
model.fit(
samples_train["features"]
, samples_train["labels"]
, validation_data = (
samples_evaluate["features"]
, samples_evaluate["labels"]
)
, verbose = 0
, batch_size = hp['batch_size']
, callbacks=[keras.callbacks.History(), cutoffs]
, epochs = hp['epoch_count']
)
return model
def make_test_queue_keras_image_binary(repeat_count:int=1, fold_count:int=None):
hyperparameters = {
"include_2nd_dense": [True]
, "neuron_multiply": [1.0]
, "epoch_count": [250]
, "learning_rate": [0.01]
, "pool_size": [2]
, "dropout": [0.4]
, "batch_size": [8]
, "kernel_size": [3]
, "dense_neurons": [64]
, "2nd_dense_neurons": [24, 16]
, "cnn_init": ['he_normal', 'he_uniform']
}
df = datum.to_pandas(name='brain_tumor.csv')
# Dataset.Tabular
dataset_tabular = Dataset.Tabular.from_pandas(dataframe=df)
label = dataset_tabular.make_label(columns=['status'])
# Dataset.Image
image_urls = datum.get_remote_urls(manifest_name='brain_tumor.csv')
dataset_image = Dataset.Image.from_urls_pillow(urls=image_urls)
feature = dataset_image.make_feature()
if (fold_count is not None):
size_test = 0.25
size_validation = None
elif (fold_count is None):
size_test = 0.18
size_validation = 0.14
splitset = Splitset.make(
feature_ids = [feature.id]
,label_id = label.id
, size_test = size_test
, size_validation = size_validation
)
if (fold_count is not None):
foldset = splitset.make_foldset(
fold_count = fold_count
)
foldset_id = foldset.id
else:
foldset_id = None
algorithm = Algorithm.make(
library = "keras"
, analysis_type = "classification_binary"
, fn_build = keras_image_binary_fn_build
, fn_train = keras_image_binary_fn_train
)
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, foldset_id = foldset_id
, hyperparamset_id = hyperparamset.id
, repeat_count = repeat_count
)
return queue
# ------------------------ KERAS SEQUENCE BINARY ------------------------
def keras_sequence_binary_fn_build(features_shape, label_shape, **hp):
model = keras.models.Sequential()
model.add(keras.layers.LSTM(
hp['neuron_count']
, input_shape=(features_shape[0], features_shape[1])
))
model.add(keras.layers.Dense(units=label_shape[0], activation='sigmoid'))
return model
def keras_sequence_binary_fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
model.compile(
loss=loser
, optimizer=optimizer
, metrics=['accuracy']
)
model.fit(
samples_train['features'], samples_train['labels']
, validation_data = (samples_evaluate['features'], samples_evaluate['labels'])
, verbose = 0
, batch_size = hp['batch_size']
, epochs = hp['epochs']
, callbacks = [keras.callbacks.History()]
)
return model
def make_test_queue_keras_sequence_binary(repeat_count:int=1, fold_count:int=None):
df = datum.to_pandas('epilepsy.parquet')
# testing Featurepolater 3D.
df['sensor_1'][999] = np.NaN
df['sensor_1'][0] = np.NaN
df['sensor_150'][130] = np.NaN
df['sensor_152'][22] = np.NaN
df['sensor_170'][0] = np.NaN
label_df = df[['seizure']]
dataset_tab = aiqc.Dataset.Tabular.from_pandas(label_df)
label = dataset_tab.make_label(columns='seizure')
sensor_arr3D = df.drop(columns=['seizure']).to_numpy().reshape(1000,178,1).astype('float64')
sensor_dataset = aiqc.Dataset.Sequence.from_numpy(sensor_arr3D)
feature = sensor_dataset.make_feature()
interpolaterset = feature.make_interpolaterset()
interpolaterset.make_featurepolater(dtypes="float64")
encoderset = feature.make_encoderset()
encoderset.make_featurecoder(
sklearn_preprocess = StandardScaler()
, columns = ['0']
)
if (fold_count is not None):
size_test = 0.25
size_validation = None
elif (fold_count is None):
size_test = 0.22
size_validation = 0.12
splitset = Splitset.make(
feature_ids = [feature.id]
, label_id = label.id
, size_test = size_test
, size_validation = size_validation
)
if (fold_count is not None):
foldset = splitset.make_foldset(
fold_count = fold_count
)
foldset_id = foldset.id
else:
foldset_id = None
algorithm = aiqc.Algorithm.make(
library = "keras"
, analysis_type = "classification_binary"
, fn_build = keras_sequence_binary_fn_build
, fn_train = keras_sequence_binary_fn_train
)
hyperparameters = {
"neuron_count": [25]
, "batch_size": [8]
, "epochs": [5]
}
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, hyperparamset_id = hyperparamset.id
, repeat_count = repeat_count
, foldset_id = foldset_id
)
return queue
# ------------------------ KERAS TABULAR FORECAST ------------------------
def keras_tabular_forecast_fn_build(features_shape, label_shape, **hp):
model = keras.models.Sequential()
model.add(keras.layers.GRU(
hp['neuron_count']
, input_shape=(features_shape[0], features_shape[1])
, return_sequences=False
, activation='tanh'
))
# Automatically flattens.
model.add(keras.layers.Dense(label_shape[0]*label_shape[1]*hp['dense_multiplier'], activation='tanh'))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(label_shape[0]*label_shape[1], activation='tanh'))
model.add(keras.layers.Dropout(0.3))
# Reshape to be 3D.
model.add(keras.layers.Reshape((label_shape[0], label_shape[1])))
return model
def keras_tabular_forecast_fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
model.compile(
loss=loser
, optimizer=optimizer
, metrics=['mean_squared_error']
)
model.fit(
samples_train['features'], samples_train['features']
, | |
<reponame>kufusha/cabot<gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) 2021 IBM Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
import os
import sys
from matplotlib import pyplot as plt
from matplotlib.path import Path
import math
import numpy as np
from geometry_msgs.msg import Point, Point32, Polygon, PolygonStamped, Pose, PoseStamped
from people_msgs.msg import People, Person
from queue_msgs.msg import Queue
import rospy
from std_msgs.msg import Header, String
import tf
import tf2_ros
import tf2_geometry_msgs
from visualization_msgs.msg import Marker, MarkerArray
from queue_utils_py import geometry_utils
from queue_utils_py import navigate_utils
from queue_utils_py import file_utils
class DetectQueuePeople():
def __init__(self, queue_name, frame_id, queue_annotation, queue_velocity_threshold, queue_distance_threshold, queue_adjust_tolerance, dist_interval_queue_navigate_path):
self.queue_name = queue_name
self.frame_id = frame_id
self.queue_annotation = queue_annotation
self.queue_velocity_threshold = queue_velocity_threshold
self.queue_distance_threshold = queue_distance_threshold
self.queue_adjust_tolerance = queue_adjust_tolerance
self.dist_interval_queue_navigate_path = dist_interval_queue_navigate_path
rospy.loginfo("initialized DetectQueuePeople, frame_id = " + str(self.frame_id) + ", queue_annotation = " + str(queue_annotation))
def update_frame_id(self, update_frame_id):
if self.frame_id==update_frame_id:
self.queue_expected_path_pose_array = self.queue_annotation["queue_expected_path"]
self.adjusted_queue_expected_path_pose_array = copy.deepcopy(self.queue_expected_path_pose_array)
self.queue_obstacle_point_array = self.queue_annotation["queue_obstacle_polygon"]
self.queue_obstacle_path = Path(self.queue_obstacle_point_array)
self.queue_obstacle_polygon_msg = Polygon()
for point in self.queue_obstacle_point_array:
self.queue_obstacle_polygon_msg.points.append(Point32(x=point[0], y=point[1], z=0.0))
# create line segments for queue expected path
self.queue_expected_path_line_segments = []
self.queue_expected_path_line_segments_length = []
self.queue_expected_path_line_segments_norm_vec = []
for idx, queue_expected_path_pose in enumerate(self.queue_expected_path_pose_array):
if idx<len(self.queue_expected_path_pose_array)-1:
point1 = np.array([self.queue_expected_path_pose_array[idx].position.x, self.queue_expected_path_pose_array[idx].position.y])
point2 = np.array([self.queue_expected_path_pose_array[idx+1].position.x, self.queue_expected_path_pose_array[idx+1].position.y])
self.queue_expected_path_line_segments.append((point1, point2))
self.queue_expected_path_line_segments_length.append(np.linalg.norm(point2-point1))
self.queue_expected_path_line_segments_norm_vec.append((point2-point1)/np.linalg.norm(point2-point1))
self.adjusted_queue_expected_path_line_segments = copy.deepcopy(self.queue_expected_path_line_segments)
self.adjusted_queue_expected_path_line_segments_length = copy.deepcopy(self.queue_expected_path_line_segments_length)
self.adjusted_queue_expected_path_line_segments_norm_vec = copy.deepcopy(self.queue_expected_path_line_segments_norm_vec)
else:
self.queue_expected_path_pose_array = None
self.adjusted_queue_expected_path_pose_array = None
self.queue_obstacle_point_array = None
self.queue_obstacle_path = None
self.queue_obstacle_polygon_msg = None
self.queue_expected_path_line_segments = None
self.queue_expected_path_line_segments_length = None
self.queue_expected_path_line_segments_norm_vec = None
self.adjusted_queue_expected_path_line_segments = None
self.adjusted_queue_expected_path_line_segments_length = None
self.adjusted_queue_expected_path_line_segments_norm_vec = None
# get head-tail queue position in map frame
def get_head_tail_from_queue(self, sorted_queue_people_position_list):
head_tail_position = []
if len(sorted_queue_people_position_list)==1:
# get same position as head and tail person position
for idx in range(2):
head_tail_position.append(sorted_queue_people_position_list[0])
elif len(sorted_queue_people_position_list)>1:
for idx in range(2):
if idx==0:
# get head person position
person_idx = 0
else:
# get tail person position
person_idx = -1
head_tail_position.append(sorted_queue_people_position_list[person_idx])
return head_tail_position
def people_cb(self, people, map_people_pose_stamped_list):
if self.queue_expected_path_pose_array is None or self.adjusted_queue_expected_path_pose_array is None:
rospy.logerr("floor queue data is not specified.")
return
# step1 : find people in queue
# select people who are enogh close to queue expected path
queue_people_name_list = []
queue_people_position_list = []
queue_closest_path_segment_idx_list = []
queue_closest_path_point_list = []
for person, map_person_pose_stamped in zip(people, map_people_pose_stamped_list):
map_person_pos = np.array([map_person_pose_stamped.pose.position.x, map_person_pose_stamped.pose.position.y])
# confirm person is not in obstacle area
if not self.queue_obstacle_path.contains_point(map_person_pos):
# find the closesest queue expected path segment (start from segments far from head point)
min_dist = None
min_dist_segment_idx = None
min_dist_closest_path_point = None
for segment_idx in range(len(self.queue_expected_path_line_segments)):
line_segment = self.queue_expected_path_line_segments[segment_idx]
# find the closest point from person to queue expected path segments
closest_path_point = geometry_utils.get_closest_point_to_line(map_person_pos, line_segment[0], line_segment[1])
# confirm that line from person to the closest point does not intersect with queue obstacle area
person_to_line_point_array = []
person_to_line_point_array.append(map_person_pos.tolist())
person_to_line_point_array.append(closest_path_point.tolist())
person_to_line_path = Path(person_to_line_point_array)
if not self.queue_obstacle_path.intersects_path(person_to_line_path, filled=False):
# get distance from person to queue expected path segments
dist = np.linalg.norm(closest_path_point - map_person_pos)
if (dist<self.queue_distance_threshold) and ((min_dist is None) or (dist<min_dist)):
min_dist = dist
min_dist_segment_idx = segment_idx
min_dist_closest_path_point = closest_path_point
# queue prson is found
if (min_dist is not None) and (min_dist_segment_idx is not None) and (min_dist_closest_path_point is not None):
# project person velocity vector to closest queue expected path segment
vec_person_vel = np.array([person.velocity.x, person.velocity.y])
norm_vec_min_dist_line_segment = self.queue_expected_path_line_segments_norm_vec[min_dist_segment_idx]
projected_vec_person_vel = np.dot(vec_person_vel, norm_vec_min_dist_line_segment)
# subtract person velocity projected to closest queue expected path segment (ignore velocity along queue expected path)
sub_projected_vec_person_vel = np.linalg.norm(vec_person_vel) - projected_vec_person_vel
# check if person is moving fast to differnt direction to queue path
if sub_projected_vec_person_vel < self.queue_velocity_threshold:
queue_people_name_list.append(person.name)
queue_people_position_list.append(map_person_pose_stamped.pose.position)
queue_closest_path_segment_idx_list.append(min_dist_segment_idx)
queue_closest_path_point_list.append(min_dist_closest_path_point)
else:
rospy.loginfo("person is not moving along queue path, name = " + person.name + ", velocity = " + str(sub_projected_vec_person_vel))
else:
rospy.loginfo("person is not close to queue expected path, name = " + person.name)
else:
rospy.loginfo("person is in obstacle area, name = " + person.name)
# step2 : sort people in queue
# sort people by distance between people's position and queue head point
if len(queue_people_name_list)>0 and self.queue_expected_path_pose_array:
dist_from_start_list = []
for person_idx in range(len(queue_people_name_list)):
# calculate distance between person in queue and queue head point
# each person's position is mapped to the closest point in queue expected path
dist = geometry_utils.get_distance_to_queue_head(queue_closest_path_segment_idx_list[person_idx], queue_closest_path_point_list[person_idx],
self.queue_expected_path_line_segments, self.queue_expected_path_line_segments_length)
dist_from_start_list.append(dist)
sort_queue_people_idx_list = np.argsort(np.array(dist_from_start_list))
sorted_queue_people_name_list = [queue_people_name_list[idx] for idx in sort_queue_people_idx_list]
sorted_queue_people_position_list = [queue_people_position_list[idx] for idx in sort_queue_people_idx_list]
sorted_queue_closest_path_segment_idx_list = [queue_closest_path_segment_idx_list[idx] for idx in sort_queue_people_idx_list]
else:
sorted_queue_people_name_list = queue_people_name_list
sorted_queue_people_position_list = queue_people_position_list
sorted_queue_closest_path_segment_idx_list = queue_closest_path_segment_idx_list
# step3 : calculate head tail position
head_tail_position = self.get_head_tail_from_queue(sorted_queue_people_position_list)
# step4 : calculate adjusted pose list for navigating queue
navigate_pose_list = navigate_utils.calc_navigate_pose_list(self.queue_expected_path_pose_array, self.dist_interval_queue_navigate_path)
# step5 : calculate key queue navigate poses using queue_expected_path_pose_msg_array and queue people's positions
if len(sorted_queue_people_name_list)>0:
# prepare buffer of people close to path segments
path_segment_idx_people_name_dict = {}
path_segment_idx_people_position_dict = {}
for (person_name, person_position, path_segment_idx) in zip(sorted_queue_people_name_list, sorted_queue_people_position_list, sorted_queue_closest_path_segment_idx_list):
if path_segment_idx not in path_segment_idx_people_name_dict:
path_segment_idx_people_name_dict[path_segment_idx] = [person_name]
path_segment_idx_people_position_dict[path_segment_idx] = [person_position]
else:
path_segment_idx_people_name_dict[path_segment_idx].append(person_name)
path_segment_idx_people_position_dict[path_segment_idx].append(person_position)
if len(sorted_queue_closest_path_segment_idx_list)>0:
# select the path segment where tail person is close
segment_idx = sorted_queue_closest_path_segment_idx_list[-1]
# if the selected path segment is not the last segment (not close to the casher), move it using people's positions
if (segment_idx<len(self.adjusted_queue_expected_path_line_segments)-1) and (segment_idx in path_segment_idx_people_name_dict) and (segment_idx in path_segment_idx_people_position_dict):
people_position_list = path_segment_idx_people_position_dict[segment_idx]
# calculate average pose diff
diff_pose_mat = None
for person_position in people_position_list:
line_segment = self.adjusted_queue_expected_path_line_segments[segment_idx]
perpendicular_point = geometry_utils.get_perpendicular_point_to_line(np.array([person_position.x, person_position.y]), line_segment[0], line_segment[1])
diff_pose_vec = np.array([[person_position.x-perpendicular_point[0], person_position.y-perpendicular_point[1]]])
if diff_pose_mat is None:
diff_pose_mat = diff_pose_vec
else:
diff_pose_mat = np.vstack((diff_pose_mat, diff_pose_vec))
if diff_pose_mat.shape[0]==1:
pose_move_vec = diff_pose_mat[0]
else:
pose_move_vec = np.mean(diff_pose_mat, axis=1)
# confirm adjusted path segments will move larger than queue_adjust_tolerance
if np.linalg.norm(pose_move_vec)>self.queue_adjust_tolerance:
if segment_idx==0:
proj_pose_move_vec = pose_move_vec
else:
prev_line_segment = self.adjusted_queue_expected_path_line_segments[segment_idx-1]
vec_prev_line_segment = np.array(prev_line_segment[1]) - np.array(prev_line_segment[0])
norm_vec_prev_line_segment = np.linalg.norm(vec_prev_line_segment)
projection = np.dot(pose_move_vec, vec_prev_line_segment)/norm_vec_prev_line_segment
proj_pose_move_vec = vec_prev_line_segment * (projection/norm_vec_prev_line_segment)
moved_pose_position = [self.adjusted_queue_expected_path_pose_array[segment_idx].position.x+proj_pose_move_vec[0], self.adjusted_queue_expected_path_pose_array[segment_idx].position.y+proj_pose_move_vec[1]]
# move poses for the path segment where tail person is close
next_line_segment = self.adjusted_queue_expected_path_line_segments[segment_idx+1]
vec_next_line_segment = np.array(next_line_segment[1]) - np.array(next_line_segment[0])
norm_vec_next_line_segment = np.linalg.norm(vec_next_line_segment)
projection = np.dot(pose_move_vec, vec_next_line_segment)/norm_vec_next_line_segment
proj_pose_move_vec = vec_next_line_segment * (projection/norm_vec_next_line_segment)
moved_next_pose_position = [self.adjusted_queue_expected_path_pose_array[segment_idx+1].position.x+proj_pose_move_vec[0], self.adjusted_queue_expected_path_pose_array[segment_idx+1].position.y+proj_pose_move_vec[1]]
moved_path_point_array = []
moved_path_point_array.append(moved_pose_position)
moved_path_point_array.append(moved_next_pose_position)
moved_path = Path(moved_path_point_array)
# confirm that moved path segments does not intersect with queue obstacle area
if not self.queue_obstacle_path.intersects_path(moved_path, filled=False):
self.adjusted_queue_expected_path_pose_array[segment_idx].position.x = moved_pose_position[0]
self.adjusted_queue_expected_path_pose_array[segment_idx].position.y = moved_pose_position[1]
self.adjusted_queue_expected_path_pose_array[segment_idx+1].position.x = moved_next_pose_position[0]
self.adjusted_queue_expected_path_pose_array[segment_idx+1].position.y = moved_next_pose_position[1]
# calculate queue path segments for adjusted queue expected pose lists
for idx, pose in enumerate(self.adjusted_queue_expected_path_pose_array):
if idx<len(self.adjusted_queue_expected_path_pose_array)-1:
point1 = np.array([self.adjusted_queue_expected_path_pose_array[idx].position.x, self.adjusted_queue_expected_path_pose_array[idx].position.y])
point2 = np.array([self.adjusted_queue_expected_path_pose_array[idx+1].position.x, self.adjusted_queue_expected_path_pose_array[idx+1].position.y])
self.adjusted_queue_expected_path_line_segments[idx] = (point1, point2)
self.adjusted_queue_expected_path_line_segments_length[idx] = np.linalg.norm(point2-point1)
# update key pose orientation as they matches to moved pose lists
for pose_idx, pose in enumerate(self.adjusted_queue_expected_path_pose_array):
if pose_idx<len(self.adjusted_queue_expected_path_pose_array)-1:
next_pose = self.adjusted_queue_expected_path_pose_array[pose_idx+1]
pose_orientation = math.atan2(next_pose.position.y-pose.position.y, next_pose.position.x-pose.position.x)
pose_orientation_quat = tf.transformations.quaternion_from_euler(0.0, 0.0, pose_orientation)
pose.orientation.x = pose_orientation_quat[0]
pose.orientation.y = pose_orientation_quat[1]
pose.orientation.z = pose_orientation_quat[2]
pose.orientation.w = pose_orientation_quat[3]
# step6 : calculate adjusted pose list for navigating queue
adjusted_navigate_pose_list = navigate_utils.calc_navigate_pose_list(self.adjusted_queue_expected_path_pose_array, self.dist_interval_queue_navigate_path)
return sorted_queue_people_name_list, sorted_queue_people_position_list, head_tail_position, navigate_pose_list, adjusted_navigate_pose_list
class DetectQueuePeopleNode():
def __init__(self, detect_queue_people_list, debug_without_mf_localization, debug_queue_annotation_map_frame, n_colors=100):
self.detect_queue_people_list = detect_queue_people_list
self.debug_without_mf_localization = debug_without_mf_localization
self.debug_queue_annotation_map_frame = debug_queue_annotation_map_frame
# start initialization
rospy.init_node('detect_queue_people_py', anonymous=True)
if self.debug_without_mf_localization:
# when debug without multi floor localization is set, load specified queue annotation
rospy.loginfo("debug_queue_annotation_map_frame = " + debug_queue_annotation_map_frame)
for detect_queue_people in self.detect_queue_people_list:
detect_queue_people.update_frame_id(debug_queue_annotation_map_frame)
self.current_frame = | |
# encoding=utf-8
r"""
The :code:`api.py` file stores all the :code:`Petfinder` class and all associated functions and methods for
interacting with the Petfinder API. Before getting started with :code:`petpy`, please be sure to obtain an
API and secret key from Petfinder by registering for an account on the Petfinder developers page at
https://www.petfinder.com/developers/
"""
from pandas import DataFrame
from pandas.io.json import json_normalize
import requests
from urllib.parse import urljoin
#################################################################################################################
#
# Petfinder Class
#
#################################################################################################################
class Petfinder(object):
r"""
Wrapper class for the PetFinder API.
Attributes
----------
host : str
The base URL of the Petfinder API.
key : str
The key from the Petfinder API passed when the :code:`Petfinder` class is initialized.
secret : str
The secret key obtained from the Petfinder API passed when the :code:`Petfinder` class is initialized.
auth : str
The authorization token string returned when the connection to the Petfinder API is made with the specified
:code:`key` and :code:`secret`.
Methods
-------
animal_types(types, return_df=False)
Returns data on an animal type, or types, available from the Petfinder API.
breeds(types=None, return_df=False, raw_results=False)
Returns available breeds of specified animal type(s) from the Petfinder API.
animals(animal_id=None, animal_type=None, breed=None, size=None, gender=None, age=None, color=None,
coat=None, status=None, name=None, organization_id=None, location=None, distance=None,
sort=None, pages=None, results_per_page=20, return_df=False)
Finds adoptable animals based on given criteria.
organizations(organization_id=None, name=None, location=None, distance=None, state=None, country=None,
query=None, sort=None, results_per_page=20, pages=None, return_df=False)
Finds animal organizations based on specified criteria in the Petfinder API database.
"""
def __init__(self, key, secret):
r"""
Initialization method of the :code:`Petfinder` class.
Parameters
----------
key : str
API key given after `registering on the PetFinder site <https://www.petfinder.com/developers/api-key>`_
secret : str
Secret API key given in addition to general API key. The secret key is required as of V2 of
the PetFinder API and is obtained from the Petfinder website at the same time as the access key.
"""
self.key = key
self.secret = secret
self._host = 'http://api.petfinder.com/v2/'
self._auth = self._authenticate()
def _authenticate(self):
r"""
Internal function for authenticating users to the Petfinder API.
Raises
------
Returns
-------
str
Access token granted by the Petfinder API. The access token stays live for 3600 seconds, or one hour,
at which point the user must reauthenticate.
"""
endpoint = 'oauth2/token'
url = urljoin(self._host, endpoint)
data = {
'grant_type': 'client_credentials',
'client_id': self.key,
'client_secret': self.secret
}
r = requests.post(url, data=data)
if r.status_code == 401:
raise PetfinderInvalidCredentials(message=r.reason, err='Invalid Credentials')
return r.json()['access_token']
def animal_types(self, types=None):
r"""
Returns data on an animal type, or types available from the Petfinder API. This data includes the
available type's coat names and colors, gender and other specific information relevant to the
specified type(s). The animal type must be of 'dog', 'cat', 'rabbit', 'small-furry', 'horse', 'bird',
'scales-fins-other', 'barnyard'.
Parameters
----------
types : str, list or tuple, optional
Specifies the animal type or types to return. Can be a string representing a single animal type, or a
tuple or list of animal types if more than one type is desired. If not specified, all animal types are
returned.
Raises
------
ValueError
Raised when the :code:`types` parameter receives an invalid animal type.
TypeError
If the :code:`types` is not given either a str, list or tuple, or None, a :code:`TypeError` will be
raised.
Returns
-------
dict
Dictionary object representing JSON data returned from the Petfinder API.
"""
if types is not None:
type_check = types
if isinstance(types, str):
type_check = [types]
diff = set(type_check).difference(('dog', 'cat', 'rabbit', 'small-furry', 'horse', 'bird',
'scales-fins-other', 'barnyard'))
if len(diff) > 0:
raise ValueError("animal types must be of the following 'dog', 'cat', 'rabbit', "
"'small-furry', 'horse', 'bird', 'scales-fins-other', 'barnyard'")
if types is None:
url = urljoin(self._host, 'types')
r = _get_result(url,
headers={
'Authorization': 'Bearer ' + self._auth
})
result = r.json()
elif isinstance(types, str):
url = urljoin(self._host, 'types/{type}'.format(type=types))
r = _get_result(url,
headers={
'Authorization': 'Bearer ' + self._auth
})
result = r.json()
elif isinstance(types, (tuple, list)):
types_collection = []
for type in types:
url = urljoin(self._host, 'types/{type}'.format(type=type))
r = _get_result(url,
headers={
'Authorization': 'Bearer ' + self._auth
})
types_collection.append(r.json()['type'])
result = {'types': types_collection}
else:
raise TypeError('types parameter must be either None, str, list or tuple')
return result
def breeds(self, types=None, return_df=False, raw_results=False):
r"""
Returns breed names of specified animal type, or types.
Parameters
----------
types : str, list or tuple, optional
String representing a single animal type or a list or tuple of a collection of animal types. If not
specified, all available breeds for each animal type is returned. The animal type must be of 'dog',
'cat', 'rabbit', 'small-furry', 'horse', 'bird', 'scales-fins-other', 'barnyard'.
return_df : boolean, default False
If :code:`True`, the result set will be coerced into a pandas :code:`DataFrame` with two columns,
breed and name. If :code:`return_df` is set to :code:`True`, it will override the :code:`raw_result`
parameter if it is also set to :code:`True` and return a pandas :code:`DataFrame`.
raw_results: boolean, default False
The PetFinder API :code:`breeds` endpoint returns some extraneous data in its result set along with the
breed names of the specified animal type(s). If :code:`raw_results` is :code:`False`, the method will
return a cleaner JSON object result set with the extraneous data removed. This parameter can be set to
:code:`True` for those interested in retrieving the entire result set. If the parameter :code:`return_df`
is set to :code:`True`, a pandas :code:`DataFrame` will be returned regardless of the value specified for
the :code:`raw_result` parameter.
Raises
------
ValueError
Raised when the :code:`types` parameter receives an invalid animal type.
TypeError
If the :code:`types` is not given either a str, list or tuple, or None, a :code:`TypeError` will be
raised.
Returns
-------
dict or pandas DataFrame
If the parameter :code:`return_df` is :code:`False`, a dictionary object representing the JSON data
returned from the Petfinder API is returned. If :code:`return_df=True`, the resulting dictionary is
coerced into a pandas DataFrame. Note if :code:`return_df=True`, the parameter :code:`raw_results` is
overridden.
"""
if types is not None:
type_check = types
if isinstance(types, str):
type_check = [types]
diff = set(type_check).difference(('dog', 'cat', 'rabbit', 'small-furry', 'horse', 'bird',
'scales-fins-other', 'barnyard'))
if len(diff) > 0:
raise ValueError("animal types must be of the following 'dog', 'cat', 'rabbit', "
"'small-furry', 'horse', 'bird', 'scales-fins-other', 'barnyard'")
if types is None or isinstance(types, (list, tuple)):
breeds = []
if types is None:
types = ('dog', 'cat', 'rabbit', 'small-furry',
'horse', 'bird', 'scales-fins-other', 'barnyard')
for t in types:
url = urljoin(self._host, 'types/{type}/breeds'.format(type=t))
r = _get_result(url,
headers={
'Authorization': 'Bearer ' + self._auth
})
breeds.append({t: r.json()})
result = {'breeds': breeds}
elif isinstance(types, str):
url = urljoin(self._host, 'types/{type}/breeds'.format(type=types))
r = _get_result(url,
headers={
'Authorization': 'Bearer ' + self._auth
})
result = r.json()
else:
raise TypeError('types parameter must be either None, str, list or tuple')
if return_df:
raw_results = True
df_results = DataFrame()
if isinstance(types, (tuple, list)):
for t in range(0, len(types)):
df_results = df_results.append(json_normalize(result['breeds'][t][types[t]]['breeds']))
else:
df_results = df_results.append(json_normalize(result['breeds']))
df_results.rename(columns={'_links.type.href': 'breed'}, inplace=True)
df_results['breed'] = df_results['breed'].str.replace('/v2/types/', '').str.capitalize()
result = df_results
if not raw_results:
json_result = {
'breeds': {
}
}
if isinstance(types, (tuple, list)):
for t in range(0, len(types)):
json_result['breeds'][types[t]] = []
for breed in result['breeds'][t][types[t]]['breeds']:
json_result['breeds'][types[t]].append(breed['name'])
else:
json_result['breeds'][types] = []
for breed in result['breeds']:
json_result['breeds'][types].append(breed['name'])
result = json_result
return result
def animals(self, animal_id=None, animal_type=None, breed=None, size=None, gender=None,
age=None, color=None, coat=None, status=None, name=None, organization_id=None,
location=None, distance=None, sort=None, pages=1, results_per_page=20, return_df=False):
r"""
Returns adoptable animal data from Petfinder based on specified criteria.
Parameters
----------
animal_id : int, tuple or list of int, optional
Integer or list or tuple of integers representing animal IDs obtained from Petfinder. When
:code:`animal_id` is specified, the other function parameters are overridden. If :code:`animal_id`
is not specified, a search of animals on Petfinder matching given criteria is performed.
animal_type : {'dog', 'cat', 'rabbit', 'small-furry', 'horse', 'bird', 'scales-fins-other', 'barnyard'}, str, optional
String representing desired animal type to search. Must be one of 'dog', 'cat', 'rabbit', 'small-furry',
'horse', 'bird', 'scales-fins-other', or 'barnyard'.
breed: str, tuple or list of str, optional
String or tuple or list of strings of desired animal type breed to search. Available animal breeds in
the Petfinder database can be found using the :code:`breeds()` method.
size: {'small', 'medium', 'large', 'xlarge'}, str, tuple or | |
from printind.printind_decorators import printi_all_method_calls as printidc
from printind.printind_function import printi, printiv
from tensorforce.environments import Environment
import tensorforce
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
# a bit hacky, but meeehh... FIXME!!
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + "/../Simulation/")
from dolfin import Expression, File, plot
from probes import PenetratedDragProbeANN, PenetratedLiftProbeANN, PressureProbeANN, VelocityProbeANN, RecirculationAreaProbe
from generate_msh import generate_mesh
from flow_solver import FlowSolver
from msh_convert import convert
from dolfin import *
import numpy as np
import os
import random as random
import pickle
import time
import math
import csv
import shutil
# TODO: check that right types etc from tensorfoce examples
# typically:
# from tensorforce.contrib.openai_gym import OpenAIGym
# environment = OpenAIGym('MountainCarContinuous-v0', visualize=False)
# printiv(environment.states)
# environment.states = {'shape': (2,), 'type': 'float'}
# printiv(environment.actions)
# environment.actions = {'max_value': 1.0, 'shape': (1,), 'min_value': -1.0, 'type': 'float'}
def constant_profile(mesh, degree):
'''
Time independent inflow profile.
'''
bot = mesh.coordinates().min(axis=0)[1]
top = mesh.coordinates().max(axis=0)[1]
H = top - bot
Um = 1.5
return Expression(('-4*Um*(x[1]-bot)*(x[1]-top)/H/H',
'0'), bot=bot, top=top, H=H, Um=Um, degree=degree, time=0)
class RingBuffer():
"A 1D ring buffer using numpy arrays"
def __init__(self, length):
self.data = np.zeros(length, dtype='f')
self.index = 0
def extend(self, x):
"adds array x to ring buffer"
x_index = (self.index + np.arange(x.size)) % self.data.size
self.data[x_index] = x
self.index = x_index[-1] + 1
def get(self):
"Returns the first-in-first-out data in the ring buffer"
idx = (self.index + np.arange(self.data.size)) % self.data.size
return self.data[idx]
# @printidc()
class Env2DCylinder(Environment):
"""Environment for 2D flow simulation around a cylinder."""
def __init__(self, path_root, geometry_params, flow_params, solver_params, output_params,
optimization_params, inspection_params, n_iter_make_ready=None, verbose=0, size_history=2000,
reward_function='plain_drag', size_time_state=50, number_steps_execution=1, simu_name="Simu"):
"""
"""
# TODO: should actually save the dicts in to double check when loading that using compatible simulations together
printi("--- call init ---")
self.path_root = path_root
self.flow_params = flow_params
self.geometry_params = geometry_params
self.solver_params = solver_params
self.output_params = output_params
self.optimization_params = optimization_params
self.inspection_params = inspection_params
self.verbose = verbose
self.n_iter_make_ready = n_iter_make_ready
self.size_history = size_history
self.reward_function = reward_function
self.size_time_state = size_time_state
self.number_steps_execution = number_steps_execution
self.simu_name = simu_name
#Relatif a l'ecriture des .csv
name="output.csv"
last_row = None
if(os.path.exists("saved_models/"+name)):
with open("saved_models/"+name, 'r') as f:
for row in reversed(list(csv.reader(f, delimiter=";", lineterminator="\n"))):
last_row = row
break
if(not last_row is None):
self.episode_number = int(last_row[0])
self.last_episode_number = int(last_row[0])
else:
self.last_episode_number = 0
self.episode_number = 0
self.episode_drags = np.array([])
self.episode_areas = np.array([])
self.episode_lifts = np.array([])
self.initialized_visualization = False
self.start_class(complete_reset=True)
printi("--- done init ---")
def start_class(self, complete_reset=True):
if complete_reset == False:
self.solver_step = 0
else:
self.solver_step = 0
self.accumulated_drag = 0
self.accumulated_lift = 0
self.initialized_output = False
self.resetted_number_probes = False
self.area_probe = None
self.history_parameters = {}
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
self.history_parameters["jet_{}".format(crrt_jet)] = RingBuffer(self.size_history)
self.history_parameters["number_of_jets"] = len(self.geometry_params["jet_positions"])
for crrt_probe in range(len(self.output_params["locations"])):
if self.output_params["probe_type"] == 'pressure':
self.history_parameters["probe_{}".format(crrt_probe)] = RingBuffer(self.size_history)
elif self.output_params["probe_type"] == 'velocity':
self.history_parameters["probe_{}_u".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["probe_{}_v".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["number_of_probes"] = len(self.output_params["locations"])
self.history_parameters["drag"] = RingBuffer(self.size_history)
self.history_parameters["lift"] = RingBuffer(self.size_history)
self.history_parameters["recirc_area"] = RingBuffer(self.size_history)
# ------------------------------------------------------------------------
# remesh if necessary
h5_file = '.'.join([self.path_root, 'h5'])
msh_file = '.'.join([self.path_root, 'msh'])
self.geometry_params['mesh'] = h5_file
# Regenerate mesh?
if self.geometry_params['remesh']:
if self.verbose > 0:
printi("Remesh")
printi("generate_mesh start...")
generate_mesh(self.geometry_params, template=self.geometry_params['template'])
if self.verbose > 0:
printi("generate_mesh done!")
assert os.path.exists(msh_file)
convert(msh_file, h5_file)
assert os.path.exists(h5_file)
# ------------------------------------------------------------------------
# if necessary, load initialization fields
if self.n_iter_make_ready is None:
if self.verbose > 0:
printi("Load initial flow")
self.flow_params['u_init'] = 'mesh/u_init.xdmf'
self.flow_params['p_init'] = 'mesh/p_init.xdmf'
if self.verbose > 0:
printi("Load buffer history")
with open('mesh/dict_history_parameters.pkl', 'rb') as f:
self.history_parameters = pickle.load(f)
if not "number_of_probes" in self.history_parameters:
self.history_parameters["number_of_probes"] = 0
if not "number_of_jets" in self.history_parameters:
self.history_parameters["number_of_jets"] = len(self.geometry_params["jet_positions"])
printi("Warning!! The number of jets was not set in the loaded hdf5 file")
if not "lift" in self.history_parameters:
self.history_parameters["lift"] = RingBuffer(self.size_history)
printi("Warning!! No value for the lift founded")
if not "recirc_area" in self.history_parameters:
self.history_parameters["recirc_area"] = RingBuffer(self.size_history)
printi("Warning!! No value for the recirculation area founded")
# if not the same number of probes, reset
if not self.history_parameters["number_of_probes"] == len(self.output_params["locations"]):
for crrt_probe in range(len(self.output_params["locations"])):
if self.output_params["probe_type"] == 'pressure':
self.history_parameters["probe_{}".format(crrt_probe)] = RingBuffer(self.size_history)
elif self.output_params["probe_type"] == 'velocity':
self.history_parameters["probe_{}_u".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["probe_{}_v".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["number_of_probes"] = len(self.output_params["locations"])
printi("Warning!! Number of probes was changed! Probes buffer content reseted")
self.resetted_number_probes = True
# ------------------------------------------------------------------------
# create the flow simulation object
self.flow = FlowSolver(self.flow_params, self.geometry_params, self.solver_params)
# ------------------------------------------------------------------------
# Setup probes
if self.output_params["probe_type"] == 'pressure':
self.ann_probes = PressureProbeANN(self.flow, self.output_params['locations'])
elif self.output_params["probe_type"] == 'velocity':
self.ann_probes = VelocityProbeANN(self.flow, self.output_params['locations'])
else:
raise RuntimeError("unknown probe type")
# Setup drag measurement
self.drag_probe = PenetratedDragProbeANN(self.flow)
self.lift_probe = PenetratedLiftProbeANN(self.flow)
# ------------------------------------------------------------------------
# No flux from jets for starting
self.Qs = np.zeros(len(self.geometry_params['jet_positions']))
# ------------------------------------------------------------------------
# prepare the arrays for plotting positions
self.compute_positions_for_plotting()
# ------------------------------------------------------------------------
# if necessary, make converge
if self.n_iter_make_ready is not None:
self.u_, self.p_ = self.flow.evolve(self.Qs)
path=''
if "dump" in self.inspection_params:
path = 'results/area_out.pvd'
self.area_probe = RecirculationAreaProbe(self.u_, 0, store_path=path)
if self.verbose > 0:
printi("Compute initial flow")
printiv(self.n_iter_make_ready)
for _ in range(self.n_iter_make_ready):
self.u_, self.p_ = self.flow.evolve(self.Qs)
self.probes_values = self.ann_probes.sample(self.u_, self.p_).flatten()
self.drag = self.drag_probe.sample(self.u_, self.p_)
self.lift = self.lift_probe.sample(self.u_, self.p_)
self.recirc_area = self.area_probe.sample(self.u_, self.p_)
self.write_history_parameters()
self.visual_inspection()
self.output_data()
self.solver_step += 1
if self.n_iter_make_ready is not None:
encoding = XDMFFile.Encoding_HDF5
mesh = convert(msh_file, h5_file)
comm = mesh.mpi_comm()
# save field data
XDMFFile(comm, 'mesh/u_init.xdmf').write_checkpoint(self.u_, 'u0', 0, encoding)
XDMFFile(comm, 'mesh/p_init.xdmf').write_checkpoint(self.p_, 'p0', 0, encoding)
# save buffer dict
with open('mesh/dict_history_parameters.pkl', 'wb') as f:
pickle.dump(self.history_parameters, f, pickle.HIGHEST_PROTOCOL)
# ----------------------------------------------------------------------
# if reading from disk, show to check everything ok
if self.n_iter_make_ready is None:
#Let's start in a random position of the vortex shading
if self.optimization_params["random_start"]:
rd_advancement = np.random.randint(650)
for j in range(rd_advancement):
self.flow.evolve(self.Qs)
print("Simulated {} iterations before starting the control".format(rd_advancement))
self.u_, self.p_ = self.flow.evolve(self.Qs)
path=''
if "dump" in self.inspection_params:
path = 'results/area_out.pvd'
self.area_probe = RecirculationAreaProbe(self.u_, 0, store_path=path)
self.probes_values = self.ann_probes.sample(self.u_, self.p_).flatten()
self.drag = self.drag_probe.sample(self.u_, self.p_)
self.lift = self.lift_probe.sample(self.u_, self.p_)
self.recirc_area = self.area_probe.sample(self.u_, self.p_)
self.write_history_parameters()
# self.visual_inspection()
# self.output_data()
# self.solver_step += 1
# time.sleep(10)
# ----------------------------------------------------------------------
# if necessary, fill the probes buffer
if self.resetted_number_probes:
printi("Need to fill again the buffer; modified number of probes")
for _ in range(self.size_history):
self.execute()
# ----------------------------------------------------------------------
# ready now
#Initialisation du prob de recirculation area
#path=''
#if "dump" in self.inspection_params:
# path = 'results/area_out.pvd'
#self.area_probe = RecirculationAreaProbe(self.u_, 0, store_path=path)
self.ready_to_use = True
def write_history_parameters(self):
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
self.history_parameters["jet_{}".format(crrt_jet)].extend(self.Qs[crrt_jet])
if self.output_params["probe_type"] == 'pressure':
for crrt_probe in range(len(self.output_params["locations"])):
self.history_parameters["probe_{}".format(crrt_probe)].extend(self.probes_values[crrt_probe])
elif self.output_params["probe_type"] == 'velocity':
for crrt_probe in range(len(self.output_params["locations"])):
self.history_parameters["probe_{}_u".format(crrt_probe)].extend(self.probes_values[2 * crrt_probe])
self.history_parameters["probe_{}_v".format(crrt_probe)].extend(self.probes_values[2 * crrt_probe + 1])
self.history_parameters["drag"].extend(np.array(self.drag))
self.history_parameters["lift"].extend(np.array(self.lift))
self.history_parameters["recirc_area"].extend(np.array(self.recirc_area))
def compute_positions_for_plotting(self):
# where the pressure probes are
self.list_positions_probes_x = []
self.list_positions_probes_y = []
total_number_of_probes = len(self.output_params['locations'])
printiv(total_number_of_probes)
# get the positions
for crrt_probe in self.output_params['locations']:
if self.verbose > 2:
printiv(crrt_probe)
self.list_positions_probes_x.append(crrt_probe[0])
self.list_positions_probes_y.append(crrt_probe[1])
# where the jets are
radius_cylinder = self.geometry_params['cylinder_size'] / 2.0 / self.geometry_params['clscale']
self.list_positions_jets_x = []
self.list_positions_jets_y = []
# compute the positions
for crrt_jet_angle in self.geometry_params['jet_positions']:
crrt_jet_angle_rad = math.pi / 180.0 * crrt_jet_angle
crrt_x = radius_cylinder * math.cos(crrt_jet_angle_rad)
crrt_y = radius_cylinder * math.sin(crrt_jet_angle_rad)
self.list_positions_jets_x.append(crrt_x)
self.list_positions_jets_y.append(1.1 * crrt_y)
def show_flow(self):
plt.figure()
plot(self.u_)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("Y")
plt.xlabel("X")
plt.show()
plt.figure()
p = plot(self.p_)
cb = plt.colorbar(p, fraction=0.1, shrink=0.3)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("Y")
plt.xlabel("X")
plt.tight_layout()
cb.set_label("P")
plt.show()
def show_control(self):
plt.figure()
linestyles = ['-', '--', ':', '-.']
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
crrt_jet_data = self.history_parameters["jet_{}".format(crrt_jet)].get()
plt.plot(crrt_jet_data, label="jet {}".format(crrt_jet), linestyle=linestyles[crrt_jet], linewidth=1.5)
plt.legend(loc=2)
plt.ylabel("control Q")
plt.xlabel("actuation step")
plt.tight_layout()
plt.pause(1.0)
plt.savefig("saved_figures/control_episode_{}.pdf".format(self.episode_number))
plt.show()
plt.pause(2.0)
def show_drag(self):
plt.figure()
crrt_drag = self.history_parameters["drag"].get()
plt.plot(crrt_drag, label="episode drag", linewidth=1.2)
plt.plot([0, self.size_history - 1], [self.inspection_params['line_drag'], self.inspection_params['line_drag']], label="mean drag no control", linewidth=2.5, linestyle="--")
plt.ylabel("measured drag D")
plt.xlabel("actuation step")
range_drag_plot = self.inspection_params["range_drag_plot"]
plt.legend(loc=2)
plt.ylim(range_drag_plot)
plt.tight_layout()
plt.pause(1.0)
plt.savefig("saved_figures/drag_episode_{}.pdf".format(self.episode_number))
plt.show()
plt.pause(2.0)
def visual_inspection(self):
total_number_subplots = 5
crrt_subplot = 1
if(not self.initialized_visualization and self.inspection_params["plot"] != False):
plt.ion()
plt.subplots(total_number_subplots, 1)
# ax.set_xlim([0, self.nbr_points_animate_plot])
# ax.set_ylim([0, 1024])
self.initialized_visualization = True
if("plot" in self.inspection_params and self.inspection_params["plot"] != False):
modulo_base = self.inspection_params["plot"]
if self.solver_step % modulo_base == 0:
plt.subplot(total_number_subplots, 1, crrt_subplot)
plot(self.u_)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("V")
crrt_subplot += 1
plt.subplot(total_number_subplots, 1, crrt_subplot)
plot(self.p_)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("P")
crrt_subplot += 1
plt.subplot(total_number_subplots, 1, crrt_subplot)
plt.cla()
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
crrt_jet_data = self.history_parameters["jet_{}".format(crrt_jet)].get()
plt.plot(crrt_jet_data, label="jet {}".format(crrt_jet))
plt.legend(loc=6)
plt.ylabel("M.F.R.")
crrt_subplot += 1
# plt.subplot(total_number_subplots, 1, crrt_subplot)
# plt.cla()
# for crrt_probe in range(len(self.output_params["locations"])):
# if self.output_params["probe_type"] == 'pressure':
# crrt_probe_data = self.history_parameters["probe_{}".format(crrt_probe)].get()
# plt.plot(crrt_probe_data, label="probe {}".format(crrt_probe))
# elif self.output_params["probe_type"] == 'velocity':
# crrt_probe_data = self.history_parameters["probe_{}_u".format(crrt_probe)].get()
# plt.plot(crrt_probe_data, label="probe {}".format(crrt_probe))
# crrt_probe_data = self.history_parameters["probe_{}_v".format(crrt_probe)].get()
# plt.plot(crrt_probe_data, label="probe {}".format(crrt_probe))
# # plt.legend(loc=6)
# if self.output_params["probe_type"] == "pressure":
# plt.ylabel("pressure")
# elif | |
# Mika "AgenttiX" Mäki & <NAME>, 2017
## @package toolbox_2
# This program serves as a collection of supplementary functions for data_analysis.py
# pylint: disable=wrong-import-order
# To prevent Matlab from loading, comment this line and uncomment the line in simulate_extinction()
import toolbox
from math import factorial
import numpy as np
## From http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay
# This function is thereby NOT covered by our licensing
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
<NAME>, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def remove_noise(data, noise_reduction_number):
"""
Uses the Savitzky-Golay filter function to remove noise from the given data
:param data: numpy array to be filtered
:param noise_reduction_number: filter level
:return: filtered data
"""
if noise_reduction_number == 0:
return data
if noise_reduction_number < 0:
raise ValueError("Noise reduction can't be negative")
return savitzky_golay(data, (noise_reduction_number * 50 + 1), 3) # window size 251, polynomial order 3
def flip_and_normalize(data):
index = find_drop_index(data)
zero_level = np.mean(data[index-10000:index-6000])
return 1-data/zero_level
def find_drop_index(data):
"""
Finds the index of beginning of the largest decline in vector.
Intended for data=measurement.p_diff, in which the biggest
difference drop indicates the moment of pressure release.
:param data: type of array, works best for measurement.p_diff
:return: index of the beginning of the pressure drop
"""
# This funtion finds the coarse index of biggest drop first,
# and then it fine tunes the index to the beginning of drop.
# Finding the biggest decline with coarse method, this is done by iterating differences on some interval.
index_skip = 200 # every single index is not tested for performance reasons,
index_spacing = 500 # the time interval between which difference is tested
border_margin = np.int(index_spacing / index_skip) + 1 # for cropping the ends of an array to avoid out_of_range
greatest_difference = -1
index_of_greatest_diff = -1
# i is sparse index, meaning for every i, there's {index_skip=200} normal indexes
for i in range(border_margin, (np.int(data.size/index_skip)-border_margin)):
index = i * index_skip
# finds the difference of data around the index.
difference = data[index - index_spacing] - data[index + index_spacing]
if difference > greatest_difference:
greatest_difference = difference
index_of_greatest_diff = index
# Fine tuning of index to the beginning of decline:
# Minimum and maximum of data before the drop occurs. Deviation is caused by random noise.
minium_before_drop = np.amin(data[index_of_greatest_diff-4000: index_of_greatest_diff-2000])
maxium_before_drop = np.amax(data[index_of_greatest_diff-4000: index_of_greatest_diff-2000])
threshold = minium_before_drop - (maxium_before_drop-minium_before_drop)
fine_tuned_index_of_drop = -1
for i in range(index_of_greatest_diff-2000, index_of_greatest_diff+1000):
if data[i] < threshold:
fine_tuned_index_of_drop = i
break
if fine_tuned_index_of_drop == -1:
raise UserWarning("Something went wrong in fine-tuning the index of pressure drop!")
fine_tuned_index_of_drop -= 150 # -150 is approximation that sets index somewhat just before the drop starts
return fine_tuned_index_of_drop
def get_pressure_change(measurement):
"""
Returns initial and final pressures.
:param measurement: must be instance of class Measurement
:return: initial_pressure, final_pressure
"""
drop = find_drop_index(measurement.p_diff)
final_pressure = np.mean(measurement.p_abs[drop + 6000: drop + 16000])
drop_height = np.mean(measurement.p_diff[drop - 6000: drop - 4000]) \
- np.mean(measurement.p_diff[drop + 4000: drop + 6000])
initial_pressure = final_pressure + drop_height
return initial_pressure*1000, final_pressure*1000 # Note that pressures in arrays are in kPa
def simulate_extinction(particle_size, p_i, p_f, particle_dens, tmax=10, saturation=1.0):
"""
Simulates particle growth extinction with given parameters. Growing particles are all the same size
:param particle_size: size of particle
:param p_i: initial pressure
:param p_f: final pressure
:param particle_dens: the number of particles in unit volume (#/m^3)
:param tmax: the maximum time up which to compute to (s)
:param saturation proportional fraction of partial water pressure from its maximum value, a.k.a vapor quality
:return:
"""
# To avoid Matlab from loading until executing this function, uncomment
# the next line and comment the line in beginning of file
# import toolbox
# Constants
temp_i = 296.15 # (K), 23 deg C, from instructions
diff = 0.282e-4 # Diffusion coefficient for water (m^2/s)
surface_tension = 72.8e-3 # (N/m), From example code
m_mol = 18.016e-3 # Molar mass of water (kg/mol)
rho_wat = 998.20 # Density of water (kg/m^3)
evap_e = 2260e3 # Evaporation energy of water (J/kg)
thermal_con_air = 0.0257 # Thermal conductivity of air (W/(m*K))
heat_capacity_ratio = 1.4 # For (dry) air, from https://en.wikipedia.org/wiki/Heat_capacity_ratio
m = 1.33 + 0.001 # Refractive index of water
wavelength = 635 # Wavelength of our laser
length = 1 # (m)
partial_pressure = (p_f/p_i) * toolbox.water_pvap(temp_i) * saturation # Partial water pressure after adiabatic expansion
temp_f = toolbox.final_temp(temp_i, p_f, p_i, heat_capacity_ratio) # Temperature after adiabatic expansion
t, dp, pw = toolbox.solve_growth(temp_f, diff, m_mol, evap_e, thermal_con_air, rho_wat, surface_tension,
particle_dens, tmax, particle_size, partial_pressure)
q_ext = toolbox.q_ext(dp, m, wavelength)
sigma_ext = toolbox.extinction_factor(particle_dens, dp, q_ext)
ext = toolbox.extinction(sigma_ext, length)
return ext, t
def minimum_particle_diameter(p_i, p_f, saturation=1.0):
"""
Returns the smallest growing particle size.
:param p_i:
:param p_f:
:param saturation:
:return:
"""
temp_i = 296.15 # (K), 23 deg C, from instructions
heat_capacity_ratio = 1.4 # For (dry) air, from https://en.wikipedia.org/wiki/Heat_capacity_ratio
water_a = 10.23
water_b = 1750
water_c = 38
m_mol = 18.016e-3 # Molar mass of water (kg/mol)
surface_tension = 72.8e-3 # (N/m), From example code
rho_wat = 998.20 # Density of water (kg/m^3)
temp_f = toolbox.final_temp(temp_i, p_f, p_i, heat_capacity_ratio) # Temperature after adiabatic expansion
smallest_growing_particle = toolbox.minimum_particle_diameter_2(p_i, p_f, temp_f, heat_capacity_ratio,
water_a, water_b, water_c, m_mol, surface_tension,
rho_wat, saturation)
return smallest_growing_particle
def extinction_factor(extinction_fraction):
"""
Calculates the extinction factor from equation: extinction_fraction = exp( -extinction_factor * L)
Symbol: sigma_ext
:param extinction_fraction: (1-I/I0), falls in range 0-1
:return:
"""
l = 1 # length of tube (m)
return -np.log(1-extinction_fraction) / l
def particle_count(sigma_ext, p_size, q_ext):
"""
Returns the particle concentration in #/m^3
Symbol N
:param sigma_ext: extinction factor (1/m)
:param p_size: particle size (m)
:param q_ext: extinction efficiency "Ekstinktiotehokkuus"
:return:
"""
return (sigma_ext * 4) / (np.pi * p_size**2 * q_ext)
def particle_count_2(extinction_fraction):
"""
Returns particle count. Combines two functions for better code appereance.
:param extinction_fraction: (1-I/I0), falls in range 0-1
:return: N
"""
p_size = 1.76e-6 # Particle size at the beginning of first wrinkle. Project_mod.py's 5th plot.
q_ext = 2.8 | |
Delete the connections going into the blendshape.
"""
outputs = attr.get_attribute_outputs('%s.weight' % self.pose_control)
removed_already = False
if outputs:
for output in outputs:
removed_already = False
if cmds.nodeType(output) == 'multiplyDivide':
node = output.split('.')
found = None
if len(node) == 2:
node = node[0]
found = node
if found:
output_value = attr.get_attribute_outputs('%s.outputX' % found)
if output_value and len(output_value) == 1:
output = output_value[0]
if output_value and len(output_value) > 1:
for this_output in output_value:
split_output = this_output.split('.')
blend = blendshape.BlendShape(split_output[0])
blend.remove_target(split_output[1])
removed_already = True
if cmds.nodeType(output) == 'blendShape' and not removed_already:
split_output = output.split('.')
blend = blendshape.BlendShape(split_output[0])
blend.remove_target(split_output[1])
def get_blendshape(self, mesh_index = None):
"""
Get the blendshape.
Args:
mesh_index (int): Work with the mesh at the index.
"""
mesh = None
if mesh_index == None:
return
if mesh_index != None:
mesh = self.get_mesh(mesh_index)
if not mesh:
return
target_mesh = self.get_target_mesh(mesh)
if not target_mesh or not cmds.objExists(target_mesh):
return
blendshape_node = self._get_blendshape(target_mesh)
return blendshape_node
#--- attributes
def disconnect_weight_outputs(self):
"""
Disconnect outputs from the pose.weight attribute.
"""
self.disconnected_attributes = None
outputs = attr.get_attribute_outputs('%s.weight' % self.pose_control)
if not outputs:
return
for output in outputs:
node = output.split('.')[0]
if cmds.nodeType(node) == 'multiplyDivide' and cmds.isConnected('%s.enable' % self.pose_control, '%s.input2X' % node):
continue
attr.disconnect_attribute(output)
return outputs
def reconnect_weight_outputs(self, outputs):
"""
Connect outputs from pose.weight attr.
"""
if not outputs:
return
for attribute in outputs:
if not cmds.objExists(attribute):
continue
input_value = attr.get_attribute_input(attribute)
if not input_value:
cmds.connectAttr('%s.weight' % self.pose_control, attribute)
def set_enable(self, value):
cmds.setAttr('%s.enable' % self.pose_control, value)
class PoseNoReader(PoseBase):
"""
This type of pose does not read anything in a rig unless an input is specified.
"""
def _pose_type(self):
return 'no reader'
def _create_pose_control(self):
pose_control = cmds.group(em = True, n = self._get_name())
attr.hide_keyable_attributes(pose_control)
self.pose_control = pose_control
self._create_attributes(pose_control)
return pose_control
def _create_attributes(self, control):
super(PoseNoReader, self)._create_attributes(control)
pose_input = attr.MayaStringVariable('weightInput')
pose_input.create(control)
def _multiply_weight(self, destination):
multiply = self._get_named_message_attribute('multiplyDivide1')
if not multiply:
multiply = self._create_node('multiplyDivide')
if not cmds.isConnected('%s.weight' % self.pose_control, '%s.input1X' % multiply):
cmds.connectAttr('%s.weight' % self.pose_control, '%s.input1X' % multiply)
if not cmds.isConnected('%s.enable' % self.pose_control, '%s.input2X' % multiply):
cmds.connectAttr('%s.enable' % self.pose_control, '%s.input2X' % multiply)
attr.disconnect_attribute(destination)
cmds.connectAttr('%s.outputX' % multiply, destination)
def _connect_weight_input(self, attribute):
weight_attr = '%s.weight' % self.pose_control
input_attr = attr.get_attribute_input(weight_attr)
if attribute == input_attr:
return
cmds.connectAttr(attribute, weight_attr)
def create_blend(self, mesh_index, goto_pose = True, sub_poses = True):
mesh = self._get_current_mesh(mesh_index)
target_mesh = self.get_target_mesh(mesh)
sub_pass_mesh = target_mesh
if not mesh:
return
manager = PoseManager()
manager.set_weights_to_zero()
this_index = mesh_index
if mesh_index == None:
return
old_delta = self._get_named_message_attribute('delta%s' % (this_index + 1))
if old_delta:
cmds.delete(old_delta)
target_mesh = self.get_target_mesh(mesh)
if not target_mesh:
RuntimeError('Mesh index %s, has no target mesh' % mesh_index)
return
if goto_pose:
self.goto_pose()
self.disconnect_blend(this_index)
blend = self._initialize_blendshape_node(target_mesh)
nicename = core.get_basename(self.pose_control, remove_namespace = True)
blend.set_weight(nicename, 0)
offset = deform.chad_extract_shape(target_mesh, mesh)
blend.set_weight(nicename, 1)
self.connect_blend(this_index)
if blend.is_target(nicename):
blend.replace_target(nicename, offset)
if not blend.is_target(nicename):
blend.create_target(nicename, offset)
blend_attr = '%s.%s' % (blend.blendshape, nicename)
self._multiply_weight(blend_attr)
cmds.delete(offset)
if sub_poses:
self.create_sub_poses(sub_pass_mesh)
def set_input(self, attribute):
"""
Set the input into the weightInput of the no reader.
No readers need to have a connection specified that tells the pose when to turn on.
Args:
attribute (str): The node.attribute name of a connection to feed into the no reader.
"""
self.weight_input = attribute
if not cmds.objExists('%s.weightInput' % self.pose_control):
cmds.addAttr(self.pose_control, ln = 'weightInput', dt = 'string')
if not attribute:
attribute = ''
cmds.setAttr('%s.weightInput' % self.pose_control, attribute, type = 'string')
if not attr.is_attribute_numeric(attribute):
attr.disconnect_attribute('%s.weight' % self.pose_control)
return
self._connect_weight_input(attribute)
def get_input(self):
"""
Get the connection into the weightInput attribute of a no reader.
No readers need to have a connection specified that tells the pose when to turn on.
Returns:
str: node.attribute name
"""
attribute = attr.get_attribute_input('%s.weight' % self.pose_control)
if attribute:
return attribute
return cmds.getAttr('%s.weightInput' % self.pose_control)
def attach(self, outputs = None):
super(PoseNoReader, self).attach(outputs)
attribute = self.get_input()
self.set_input(attribute)
if outputs:
self.reconnect_weight_outputs(outputs)
self._hide_meshes()
if self.sub_detach_dict:
for key in self.sub_detach_dict:
pose = get_pose_instance(key)
pose.attach(self.sub_detach_dict[pose])
self.sub_detach_dict = {}
def detach(self):
super(PoseNoReader, self).detach()
input_value = self.get_input()
outputs = self.disconnect_weight_outputs()
attr.disconnect_attribute('%s.weight' % self.pose_control)
cmds.setAttr('%s.weightInput' % self.pose_control, input_value, type = 'string')
self._show_meshes()
return outputs
def mirror(self):
"""
Mirror a pose to a corresponding R side pose.
For example
If self.pose_control = pose_arm_L, there must be a corresponding pose_arm_R.
The pose at pose_arm_R must be a mirrored pose of pose_arm_L.
"""
self.other_pose_exists = False
other_pose_instance = self._get_mirror_pose_instance()
other_target_meshes = []
input_meshes = {}
for inc in range(0, self._get_mesh_count()):
mesh = self.get_mesh(inc)
target_mesh = self.get_target_mesh(mesh)
if target_mesh == None:
continue
other_target_mesh, other_target_mesh_duplicate = self._create_mirror_mesh(target_mesh)
if other_target_mesh == None:
continue
input_meshes[other_target_mesh] = other_target_mesh_duplicate
other_target_meshes.append(other_target_mesh)
if not self.other_pose_exists:
store = rigs_util.StoreControlData(self.pose_control)
if self.left_right:
side = 'L'
if not self.left_right:
side = 'R'
store.eval_mirror_data(side)
other_pose_instance.create()
if self.other_pose_exists:
other_pose_instance.goto_pose()
#cmds.setAttr('%s.weight' % self.pose_control, 0)
for mesh in other_target_meshes:
index = other_pose_instance.get_target_mesh_index(other_target_mesh)
if index == None:
other_pose_instance.add_mesh(other_target_mesh, toggle_vis = False)
for mesh in other_target_meshes:
index = other_pose_instance.get_target_mesh_index(mesh)
if index == None:
continue
input_mesh = other_pose_instance.get_mesh(index)
if not input_mesh:
continue
fix_mesh = input_meshes[mesh]
cmds.blendShape(fix_mesh, input_mesh, foc = True, w = [0,1])
other_pose_instance.create_blend(index, False)
cmds.delete(input_mesh, ch = True)
cmds.delete(fix_mesh)
return other_pose_instance.pose_control
def set_weight(self, value):
"""
Set the weight attribute of the no reader.
No readers have connections specified.
If no connection is specified and connected, this can set the weight.
Args:
value (float): The value to set the weight to.
"""
input_attr = attr.get_attribute_input('%s.weight' % self.pose_control)
if not input_attr:
try:
cmds.setAttr('%s.weight' % self.pose_control, value)
except:
pass
manager = PoseManager()
manager.set_pose_group(self.pose_control)
children = manager.get_poses()
if children:
for child in children:
child_instance = manager.get_pose_instance(child)
child_instance.set_weight(value)
class PoseCombo(PoseNoReader):
def _pose_type(self):
return 'combo'
def _create_attributes(self, control):
super(PoseNoReader, self)._create_attributes(control)
#pose_input = attr.MayaStringVariable('weightInput')
#pose_input.create(control)
def _remove_empty_multiply_attributes(self):
attributes = self._get_message_attribute_with_prefix('multiply')
for attribute in attributes:
input_value = attr.get_attribute_input('%s.%s' % (self.pose_control, attribute))
if not input_value:
cmds.deleteAttr('%s.%s' % (self.pose_control, attribute))
def _get_pose_string_attributes(self):
return self._get_string_attribute_with_prefix('pose')
def _get_empty_pose_string_index(self):
strings = self._get_pose_string_attributes()
inc = 1
for string in strings:
value = cmds.getAttr('%s.%s' % (self.pose_control, string))
if not value:
break
inc+=1
return inc
def _connect_pose(self, pose):
index = self.get_pose_index(pose)
if index != None:
return
empty_index = self._get_empty_pose_string_index()
self._set_string_node(pose, 'pose', empty_index)
def _get_pose_count(self):
attrs = self._get_pose_string_attributes()
return len(attrs)
def _connect_multiplies(self):
poses = self.get_poses()
multiply = None
if len(poses) > 1:
for pose in poses:
if not pose:
continue
namespace = core.get_namespace(self.pose_control)
if namespace:
pose = '%s:%s' % (namespace, pose)
output = '%s.weight' % pose
if not multiply:
input_value = '%s.weight' % self.pose_control
if multiply:
input_value = '%s.input2X' % multiply
if cmds.objExists(output):
multiply = attr.connect_multiply(output, input_value)
if multiply:
cmds.connectAttr('%s.enable' % self.pose_control, '%s.input2X' % multiply)
def _disconnect_multiplies(self):
multiplies = self._find_multiplies()
if multiplies:
cmds.delete(multiplies)
def _find_multiplies(self):
input_value = attr.get_attribute_input('%s.weight' % self.pose_control, node_only = True)
multi = []
multiplies = []
if cmds.nodeType(input_value) == 'multiplyDivide':
multi = [input_value]
while multi:
multiplies += multi
new_multi = []
for m in multi:
input_value = attr.get_attribute_input('%s.input1X' % m, node_only = True)
if cmds.nodeType(input_value) == 'multiplyDivide':
new_multi.append(input_value)
input_value = attr.get_attribute_input('%s.input2X' % m, node_only = True)
if | |
CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "guitar", 75000)
if item.lower() == "drum":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "drum", 100000)
if item.lower() == "lotterytk":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "ltk", 2500)
if item.lower() == "alcohol":
await self.sell_inr(ctx, ctx.author, "alcohol", 8500, amt)
return
if item.lower() == "life_saver":
await self.sell_inr(ctx, ctx.author, "life_saver", 25000, amt)
return
if item.lower() == "Noramedal":
await self.sell_inr(ctx, ctx.author, "nr_medal", 10000000, amt)
return
if item.lower() == "Noratrophy":
await self.sell_inr(ctx, ctx.author, "nr_trophy", 50000000, amt)
return
if item.lower() == "lotterytk":
await self.sell_inr(ctx, ctx.author, "ltk", 2500, amt)
return
@commands.command(aliases=["store"])
async def shop(self, ctx, page=None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
if page == None:
page = "1"
if page == "1":
e = discord.Embed(title="Welcome to the Nora store", description="Use `nr.buy <itemname>` to buy something. The `<itemname>` must match the given `key` or some other secret keys!. Your `[itemamount]` should also follow the given `limit`", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.add_field(name="<:lottery_ticket:847447419498790952> **Lottery Ticket** — 〄 2,500", value="Key: `lotterytk`\nDescription: Buy this and get a higher chance of winning the lottery, nr.lottery for inro on te next, comming or present lottery. Join the support server for 15% more!(Note)\nUsage: `None - Read description`\nLimit: `1`", inline=False)
e.add_field(name="<a:pickaxe:836163392796229642> **Pickaxe** — 〄 7,500", value="Key: `pick`\nDescription: Go mining for epic noranics ore!\nUsage: `nr.mine`\nLimit: `1`", inline=False)
e.add_field(name="<:fishing_pole:835185055433097329> **Fishing Rod** — 〄 8,500", value="Key: `fishing_rod`\nDescription: Go out to your nearest lake chill, fish and sell them for sweet money!\nUsage: `nr.fish`\nLimit: `1`", inline=False)
e.add_field(name="<:alcohol:836203535728771092> **Alcohol** — 〄 8,500", value="Key: `alcohol`\nDescription: Drink it and you might get lucky, just maybe\nUsage: `nr.use alcohol`\nLimit: `None`", inline=False)
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.set_footer(text="Page Index 1/3")
await ctx.send(embed=e)
return
if page == "2":
e = discord.Embed(title="Welcome to the Nora store", description="Use `nr.buy <itemname>` to buy something. The `<itemname>` must match the given `key` or some other secret keys!. Your `[itemamount]` should also follow the given `limit`", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.add_field(name="<:laptop:835185820230615091> **Laptop** — 〄 10,000", value="Key: `lp`\nDescription: Post meme's for the ad cents.\nUsage: `nr.pm`\nLimit: `1`", inline=False)
e.add_field(name="<:lifesaver:836562944950796309> **Life Saver** — 〄 25,000", value="Key: `life_saver`\nDescription: If a life saver is in your inventory at the time of death, this item will be consumed and prevent you from dying! You will keep your coins and items.\nUsage: `None - Read description`\nLimit: `None`", inline=False)
e.add_field(name="<:hunting_rifle:835185487542747225> **Hunting Rifle** — 〄 50,000", value="Key: `hr`\nDescription: Go and hunt for animals!\nUsage: `nr.hunt`\nLimit: `1`", inline=False)
e.add_field(name="<:classical_guitar:836802689635450880> **Guitar** — 〄 75,000", value="Key: `guitar`\nDescription: Go busking(term for: Street performance)\nUsage: `nr.use guitar`\nLimit: `1`", inline=False)
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.set_footer(text="Page Index 2/3")
await ctx.send(embed=e)
return
if page == "3":
e = discord.Embed(title="Welcome to the Nora store", description="Use `nr.buy <itemname>` to buy something. The `<itemname>` must match the given `key` or some other secret keys!. Your `[itemamount]` should also follow the given `limit`", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.add_field(name="<:drums:836815664189669427> **Drums** — 〄 100,000", value="Key: `drum`\nDescription: Perform in the public!\nUsage: `nr.use drum`\nLimit: `1`", inline=False)
e.add_field(name="<:noramedal:836832817307844618> **Nora Medal** — 〄 10,000,000", value="Key: `noramedal`\nDescription: A medal only the top 1% of players have!\nUsage: `Show-off`\nLimit: `None`", inline=False)
e.add_field(name="<:noratrophy:836834560556662784> **Nora Trophy** — 〄 50,000,000", value="Key: `noratrophy`\nDescription: Literally only the richest of the richest of the richest of the richest of the richest of the rich will hold these beloved trophies.\nUsage: `None - Read description`\nLimit: `None`", inline=False)
e.set_footer(text="Page Index 3/3")
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
@commands.command(aliases=["inv"])
async def inventory(self, ctx, *, member: discord.Member = None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
member = member or ctx.author
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
inve = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
if not accounts:
await ctx.send(f"{member} has no balance")
return
if not inve:
e = discord.Embed(title=f"{member} has no inventory", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
ltk = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "ltk")
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "fishing_rod")
hr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "h_rifle")
lp = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "laptop")
pick = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "pickaxe")
drums = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "drum")
guitar = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "guitar")
alcohol = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
alcohol_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
life_saver = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
lifesaver_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
Nora_medal = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
Nora_medal_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
Nora_trophy = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
Nora_trophy_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
e = discord.Embed(title=f"{member}'s inventory", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
if ltk:
ltk_amt = Nora_trophy_data["amt"]
e.add_field(name="<:lottery_ticket:847447419498790952> Lottery Ticket", value=f"Amount: 1")
if fr:
e.add_field(name="<:fishing_pole:835185055433097329> Fishing Rod", value="Amount: 1")
if hr:
e.add_field(name="<:hunting_rifle:835185487542747225> Hunting Rifle", value="Amount: 1")
if lp:
e.add_field(name="<:laptop:835185820230615091> Laptop", value="Amount: 1")
if pick:
e.add_field(name="<a:pickaxe:836163392796229642> Pickaxe", value="Amount: 1")
if drums:
e.add_field(name="<:drums:836815664189669427> Drums", value="Amount: 1")
if guitar:
e.add_field(name="<:classical_guitar:836802689635450880> Guitar", value="Amount: 1")
if alcohol and alcohol_data["amt"] > 0:
alcohol_amt = alcohol_data["amt"]
e.add_field(name="<:alcohol:836203535728771092> Alcohol", value=f"Amount: {alcohol_amt}")
if life_saver and lifesaver_data["amt"] > 0:
life_saver_amt = lifesaver_data["amt"]
e.add_field(name="<:lifesaver:836562944950796309> Life Saver", value=f"Amount: {life_saver_amt}")
if Nora_medal and Nora_medal_data["amt"] > 0:
Nora_medal_amt = Nora_medal_data["amt"]
e.add_field(name="<:noramedal:836832817307844618> Nora Medal", value=f"Amount: {Nora_medal_amt}")
if Nora_trophy and Nora_trophy_data["amt"] > 0:
Nora_trophy_amt = Nora_trophy_data["amt"]
e.add_field(name="<:noratrophy:836834560556662784> Nora Trophy", value=f"Amount: {Nora_trophy_amt}")
await ctx.send(embed=e)
@commands.command()
async def petshop(self, ctx):
e = discord.Embed(title="PET SHOP", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.add_field(name="<:spotted_dog:839739554886058004> Dog — 50,000", value='Key: `dog`\nAdvantages: + 10% while fishing', inline=False)
e.add_field(name="<:brown_cat:839739869692428288> Cat — 75,000", value='Key: `cat`\nAdvantages: + 20% while fishing', inline=False)
e.add_field(name="<:blue_yellow_orange_parrot:839740445574692884> Parrot — 100,000", value='Key: `parrot`\nAdvantages: + 30% while fishing and +10% while hunting', inline=False)
e.add_field(name="<:red_yellow_dragon:839742030930509827> Dragon — 5,000,000", value='Key: `dragon`\nAdvantages: + 40% while fishing and +20% while hunting', inline=False)
await ctx.send(embed=e)
@commands.command()
@commands.cooldown(1, 60, commands.BucketType.user)
async def busk(self, ctx, item_to_busk):
if item_to_busk.lower() == "drum":
await self.busk_drum(ctx, ctx.author)
elif item_to_busk.lower() == "guitar":
await self.busk_guitar(ctx, ctx.author)
@command.command()
@commands.cooldown(1, 40, commands.BucketType)
async def drink(self, ctx, item_to_drink):
if item_to_drink.lower() == "alcohol":
await self.drink_alcohol(ctx, ctx.author)
else:
await ctx.send("That item does not exist")
@commands.command()
async def pet(self, ctx):
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if not pet:
await ctx.send("You dont have a pet to check the shop type `nr.petshop`")
return
if pet:
pet_data = await self.bot.db.fetchrow("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
pet_name = pet_data["name"]
pet_nick = pet_data["nick"]
e = discord.Embed(title=f"{ctx.author.name}'s Pet", description=f"Your pet is a {pet_name}\nNickname: {pet_nick}", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command()
async def petnick(self, ctx, *, nick):
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if not pet:
await ctx.send("You dont have a pet to check the shop type `nr.petshop`")
return
if pet:
await self.bot.db.execute("UPDATE pets SET nick = $1 WHERE user_id = $2", nick, ctx.author.id)
await ctx.reply(f"Your pet's nickname is now {nick}")
return
else:
await ctx.send("There was an error in command petnick: Something went wrong.")
@commands.command()
async def | |
self.draw_app.ui.slot_axis_radio.set_value('A')
elif self.draw_app.ui.slot_axis_radio.get_value() == 'A':
self.draw_app.ui.slot_axis_radio.set_value('X')
# ## Utility geometry (animated)
self.draw_app.update_utility_geometry(data=(self.draw_app.snap_x, self.draw_app.snap_y))
def clean_up(self):
self.draw_app.selected = []
self.draw_app.ui.tools_table_exc.clearSelection()
self.draw_app.plot_all()
try:
self.draw_app.app.jump_signal.disconnect()
except (TypeError, AttributeError):
pass
class SlotArray(FCShapeTool):
"""
Resulting type: MultiPolygon
"""
def __init__(self, draw_app):
DrawTool.__init__(self, draw_app)
self.name = 'slot_array'
self.draw_app = draw_app
self.draw_app.ui.slot_frame.show()
self.draw_app.ui.slot_array_frame.show()
self.selected_dia = None
try:
self.draw_app.app.inform.emit(_("Click to place ..."))
self.selected_dia = self.draw_app.tool2tooldia[self.draw_app.last_tool_selected]
# as a visual marker, select again in tooltable the actual tool that we are using
# remember that it was deselected when clicking on canvas
item = self.draw_app.ui.tools_table_exc.item((self.draw_app.last_tool_selected - 1), 1)
self.draw_app.ui.tools_table_exc.setCurrentItem(item)
except KeyError:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("To add an Slot Array first select a tool in Tool Table"))
return
try:
QtGui.QGuiApplication.restoreOverrideCursor()
except Exception:
pass
self.cursor = QtGui.QCursor(QtGui.QPixmap(self.draw_app.app.resource_location + '/aero_array.png'))
QtGui.QGuiApplication.setOverrideCursor(self.cursor)
self.steps_per_circ = self.draw_app.app.defaults["geometry_circle_steps"]
self.half_width = 0.0
self.half_height = 0.0
self.radius = float(self.selected_dia / 2.0)
self.slot_axis = 'X'
self.slot_array = 'linear' # 'linear'
self.slot_array_size = None
self.slot_pitch = None
self.slot_linear_angle = None
self.slot_angle = None
self.slot_direction = None
self.slot_radius = None
self.origin = None
self.destination = None
self.flag_for_circ_array = None
self.last_dx = 0
self.last_dy = 0
self.pt = []
geo = self.utility_geometry(data=(self.draw_app.snap_x, self.draw_app.snap_y), static=True)
if isinstance(geo, DrawToolShape) and geo.geo is not None:
self.draw_app.draw_utility_geometry(geo=geo)
self.draw_app.app.inform.emit(_("Click on target location ..."))
self.draw_app.app.jump_signal.connect(lambda x: self.draw_app.update_utility_geometry(data=x))
# Switch notebook to Properties page
self.draw_app.app.ui.notebook.setCurrentWidget(self.draw_app.app.ui.properties_tab)
def click(self, point):
if self.slot_array == 'linear': # 'Linear'
self.make()
return
else: # 'Circular'
if self.flag_for_circ_array is None:
self.draw_app.in_action = True
self.pt.append(point)
self.flag_for_circ_array = True
self.set_origin(point)
self.draw_app.app.inform.emit(_("Click on the Slot Circular Array Start position"))
else:
self.destination = point
self.make()
self.flag_for_circ_array = None
return
def set_origin(self, origin):
self.origin = origin
def utility_geometry(self, data=None, static=None):
self.slot_axis = self.draw_app.ui.slot_array_axis_radio.get_value()
self.slot_direction = self.draw_app.ui.slot_array_direction_radio.get_value()
self.slot_array = self.draw_app.ui.slot_array_type_radio.get_value()
try:
self.slot_array_size = int(self.draw_app.ui.slot_array_size_entry.get_value())
try:
self.slot_pitch = float(self.draw_app.ui.slot_array_pitch_entry.get_value())
self.slot_linear_angle = float(self.draw_app.ui.slot_array_linear_angle_spinner.get_value())
self.slot_angle = float(self.draw_app.ui.slot_array_angle_entry.get_value())
except TypeError:
self.draw_app.app.inform.emit('[ERROR_NOTCL] %s' %
_("The value is not Float. Check for comma instead of dot separator."))
return
except Exception:
self.draw_app.app.inform.emit('[ERROR_NOTCL] %s' % _("The value is mistyped. Check the value."))
return
if self.slot_array == 'linear': # 'Linear'
if data[0] is None and data[1] is None:
dx = self.draw_app.x
dy = self.draw_app.y
else:
dx = data[0]
dy = data[1]
geo_el_list = []
geo_el = []
self.points = [dx, dy]
for item in range(self.slot_array_size):
if self.slot_axis == 'X':
geo_el = self.util_shape(((dx + (self.slot_pitch * item)), dy))
if self.slot_axis == 'Y':
geo_el = self.util_shape((dx, (dy + (self.slot_pitch * item))))
if self.slot_axis == 'A':
x_adj = self.slot_pitch * math.cos(math.radians(self.slot_linear_angle))
y_adj = self.slot_pitch * math.sin(math.radians(self.slot_linear_angle))
geo_el = self.util_shape(
((dx + (x_adj * item)), (dy + (y_adj * item)))
)
if static is None or static is False:
geo_el = affinity.translate(geo_el, xoff=(dx - self.last_dx), yoff=(dy - self.last_dy))
geo_el_list.append(geo_el)
self.last_dx = dx
self.last_dy = dy
return DrawToolUtilityShape(geo_el_list)
else: # 'Circular'
if data[0] is None and data[1] is None:
cdx = self.draw_app.x
cdy = self.draw_app.y
else:
cdx = data[0]
cdy = data[1]
# if len(self.pt) > 0:
# temp_points = [x for x in self.pt]
# temp_points.append([cdx, cdy])
# return DrawToolUtilityShape(LineString(temp_points))
utility_list = []
try:
radius = distance((cdx, cdy), self.origin)
except Exception:
radius = 0
if radius == 0:
self.draw_app.delete_utility_geometry()
if len(self.pt) >= 1 and radius > 0:
try:
if cdx < self.origin[0]:
radius = -radius
# draw the temp geometry
initial_angle = math.asin((cdy - self.origin[1]) / radius)
temp_circular_geo = self.circular_util_shape(radius, initial_angle)
# draw the line
temp_points = [x for x in self.pt]
temp_points.append([cdx, cdy])
temp_line = LineString(temp_points)
for geo_shape in temp_circular_geo:
utility_list.append(geo_shape.geo)
utility_list.append(temp_line)
return DrawToolUtilityShape(utility_list)
except Exception as e:
log.debug("SlotArray.utility_geometry -- circular -> %s" % str(e))
def circular_util_shape(self, radius, angle):
self.slot_direction = self.draw_app.ui.slot_array_direction_radio.get_value()
self.slot_angle = self.draw_app.ui.slot_array_angle_entry.get_value()
self.slot_array_size = self.draw_app.ui.slot_array_size_entry.get_value()
circular_geo = []
if self.slot_direction == 'CW':
for i in range(self.slot_array_size):
angle_radians = math.radians(self.slot_angle * i)
x = self.origin[0] + radius * math.cos(-angle_radians + angle)
y = self.origin[1] + radius * math.sin(-angle_radians + angle)
geo_sol = self.util_shape((x, y))
geo_sol = affinity.rotate(geo_sol, angle=(math.pi - angle_radians + angle), use_radians=True)
circular_geo.append(DrawToolShape(geo_sol))
else:
for i in range(self.slot_array_size):
angle_radians = math.radians(self.slot_angle * i)
x = self.origin[0] + radius * math.cos(angle_radians + angle)
y = self.origin[1] + radius * math.sin(angle_radians + angle)
geo_sol = self.util_shape((x, y))
geo_sol = affinity.rotate(geo_sol, angle=(angle_radians + angle - math.pi), use_radians=True)
circular_geo.append(DrawToolShape(geo_sol))
return circular_geo
def util_shape(self, point):
# updating values here allows us to change the aperture on the fly, after the Tool has been started
self.selected_dia = self.draw_app.tool2tooldia[self.draw_app.last_tool_selected]
self.radius = float(self.selected_dia / 2.0)
self.steps_per_circ = self.draw_app.app.defaults["geometry_circle_steps"]
try:
slot_length = float(self.draw_app.ui.slot_length_entry.get_value())
except ValueError:
# try to convert comma to decimal point. if it's still not working error message and return
try:
slot_length = float(self.draw_app.ui.slot_length_entry.get_value().replace(',', '.'))
self.draw_app.ui.slot_length_entry.set_value(slot_length)
except ValueError:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("Value is missing or wrong format. Add it and retry."))
return
try:
slot_angle = float(self.draw_app.ui.slot_angle_spinner.get_value())
except ValueError:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("Value is missing or wrong format. Add it and retry."))
return
if self.draw_app.ui.slot_axis_radio.get_value() == 'X':
self.half_width = slot_length / 2.0
self.half_height = self.radius
else:
self.half_width = self.radius
self.half_height = slot_length / 2.0
if point[0] is None and point[1] is None:
point_x = self.draw_app.x
point_y = self.draw_app.y
else:
point_x = point[0]
point_y = point[1]
geo = []
if self.half_height > self.half_width:
p1 = (point_x - self.half_width, point_y - self.half_height + self.half_width)
p2 = (point_x + self.half_width, point_y - self.half_height + self.half_width)
p3 = (point_x + self.half_width, point_y + self.half_height - self.half_width)
p4 = (point_x - self.half_width, point_y + self.half_height - self.half_width)
down_center = [point_x, point_y - self.half_height + self.half_width]
d_start_angle = math.pi
d_stop_angle = 0.0
down_arc = arc(down_center, self.half_width, d_start_angle, d_stop_angle, 'ccw', self.steps_per_circ)
up_center = [point_x, point_y + self.half_height - self.half_width]
u_start_angle = 0.0
u_stop_angle = math.pi
up_arc = arc(up_center, self.half_width, u_start_angle, u_stop_angle, 'ccw', self.steps_per_circ)
geo.append(p1)
for pt in down_arc:
geo.append(pt)
geo.append(p2)
geo.append(p3)
for pt in up_arc:
geo.append(pt)
geo.append(p4)
else:
p1 = (point_x - self.half_width + self.half_height, point_y - self.half_height)
p2 = (point_x + self.half_width - self.half_height, point_y - self.half_height)
p3 = (point_x + self.half_width - self.half_height, point_y + self.half_height)
p4 = (point_x - self.half_width + self.half_height, point_y + self.half_height)
left_center = [point_x - self.half_width + self.half_height, point_y]
d_start_angle = math.pi / 2
d_stop_angle = 1.5 * math.pi
left_arc = arc(left_center, self.half_height, d_start_angle, d_stop_angle, 'ccw', self.steps_per_circ)
right_center = [point_x + self.half_width - self.half_height, point_y]
u_start_angle = 1.5 * math.pi
u_stop_angle = math.pi / 2
right_arc = arc(right_center, self.half_height, u_start_angle, u_stop_angle, 'ccw', self.steps_per_circ)
geo.append(p1)
geo.append(p2)
for pt in right_arc:
geo.append(pt)
geo.append(p3)
geo.append(p4)
for pt in left_arc:
geo.append(pt)
# this function return one slot in the slot array and the following will rotate that one slot around it's
# center if the radio value is "A".
if self.draw_app.ui.slot_axis_radio.get_value() == 'A':
return affinity.rotate(Polygon(geo), -slot_angle)
else:
return Polygon(geo)
def make(self):
self.geometry = []
geo = None
try:
QtGui.QGuiApplication.restoreOverrideCursor()
except Exception:
pass
# add the point to slots if the diameter is a key in the dict, if not, create it add the drill location
# to the value, as a list of itself
if self.selected_dia not in self.draw_app.slot_points_edit:
self.draw_app.slot_points_edit[self.selected_dia] = []
for i in range(self.slot_array_size):
self.draw_app.slot_points_edit[self.selected_dia].append(self.points)
self.draw_app.current_storage = self.draw_app.storage_dict[self.selected_dia]
if self.slot_array == 'linear': # 'Linear'
for item in range(self.slot_array_size):
if self.slot_axis == 'X':
geo = self.util_shape(((self.points[0] + (self.slot_pitch * item)), self.points[1]))
if self.slot_axis == 'Y':
geo = self.util_shape((self.points[0], (self.points[1] + (self.slot_pitch * item))))
if self.slot_axis == 'A':
x_adj = self.slot_pitch * math.cos(math.radians(self.slot_linear_angle))
y_adj = self.slot_pitch * math.sin(math.radians(self.slot_linear_angle))
geo = self.util_shape(
((self.points[0] + (x_adj * item)), (self.points[1] + (y_adj * item)))
)
self.geometry.append(DrawToolShape(geo))
else: # 'Circular'
if (self.slot_angle * self.slot_array_size) > 360:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("Too many items for the selected spacing angle."))
self.draw_app.app.jump_signal.disconnect()
return
# radius = distance(self.destination, self.origin)
# initial_angle = math.asin((self.destination[1] - self.origin[1]) / radius)
# for i in range(self.slot_array_size):
# angle_radians = math.radians(self.slot_angle * i)
# if self.slot_direction == 'CW':
# x = self.origin[0] + radius * math.cos(-angle_radians + initial_angle)
# y = self.origin[1] + radius * math.sin(-angle_radians + initial_angle)
# else:
# x = self.origin[0] + radius * math.cos(angle_radians + initial_angle)
# y = self.origin[1] + radius * math.sin(angle_radians + initial_angle)
#
# geo = self.util_shape((x, y))
# if | |
<reponame>icanbwell/SparkFhirSchemas<filename>spark_fhir_schemas/stu3/complex_types/claim.py
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ClaimSchema:
"""
A provider issued list of services and products provided, or to be provided,
to a patient which is provided to an insurer for payment recovery.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A provider issued list of services and products provided, or to be provided,
to a patient which is provided to an insurer for payment recovery.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a Claim resource
identifier: The business identifier for the instance: claim number, pre-determination or
pre-authorization number.
status: The status of the resource instance.
type: The category of claim, eg, oral, pharmacy, vision, insitutional, professional.
subType: A finer grained suite of claim subtype codes which may convey Inpatient vs
Outpatient and/or a specialty service. In the US the BillType.
use: Complete (Bill or Claim), Proposed (Pre-Authorization), Exploratory (Pre-
determination).
patient: Patient Resource.
billablePeriod: The billable period for which charges are being submitted.
created: The date when the enclosed suite of services were performed or completed.
enterer: Person who created the invoice/claim/pre-determination or pre-authorization.
insurer: The Insurer who is target of the request.
provider: The provider which is responsible for the bill, claim pre-determination, pre-
authorization.
organization: The organization which is responsible for the bill, claim pre-determination,
pre-authorization.
priority: Immediate (STAT), best effort (NORMAL), deferred (DEFER).
fundsReserve: In the case of a Pre-Determination/Pre-Authorization the provider may request
that funds in the amount of the expected Benefit be reserved ('Patient' or
'Provider') to pay for the Benefits determined on the subsequent claim(s).
'None' explicitly indicates no funds reserving is requested.
related: Other claims which are related to this claim such as prior claim versions or
for related services.
prescription: Prescription to support the dispensing of Pharmacy or Vision products.
originalPrescription: Original prescription which has been superceded by this prescription to
support the dispensing of pharmacy services, medications or products. For
example, a physician may prescribe a medication which the pharmacy determines
is contraindicated, or for which the patient has an intolerance, and therefor
issues a new precription for an alternate medication which has the same
theraputic intent. The prescription from the pharmacy becomes the
'prescription' and that from the physician becomes the 'original
prescription'.
payee: The party to be reimbursed for the services.
referral: The referral resource which lists the date, practitioner, reason and other
supporting information.
facility: Facility where the services were provided.
careTeam: The members of the team who provided the overall service as well as their role
and whether responsible and qualifications.
information: Additional information codes regarding exceptions, special considerations, the
condition, situation, prior or concurrent issues. Often there are mutiple
jurisdiction specific valuesets which are required.
diagnosis: List of patient diagnosis for which care is sought.
procedure: Ordered list of patient procedures performed to support the adjudication.
insurance: Financial instrument by which payment information for health care.
accident: An accident which resulted in the need for healthcare services.
employmentImpacted: The start and optional end dates of when the patient was precluded from
working due to the treatable condition(s).
hospitalization: The start and optional end dates of when the patient was confined to a
treatment center.
item: First tier of goods and services.
total: The total value of the claim.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.claim_related import (
Claim_RelatedSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_payee import Claim_PayeeSchema
from spark_fhir_schemas.stu3.complex_types.claim_careteam import (
Claim_CareTeamSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_information import (
Claim_InformationSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_diagnosis import (
Claim_DiagnosisSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_procedure import (
Claim_ProcedureSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_insurance import (
Claim_InsuranceSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_accident import (
Claim_AccidentSchema,
)
from spark_fhir_schemas.stu3.complex_types.claim_item import Claim_ItemSchema
from spark_fhir_schemas.stu3.complex_types.money import MoneySchema
if (
max_recursion_limit and nesting_list.count("Claim") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Claim"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
| |
grouping of information about the connection to the remote resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to. Required on PUT (CreateOrUpdate) requests.
:param pulumi.Input[str] private_link_service_id: The resource id of the private link service. Required on PUT (CreateOrUpdate) requests.
:param pulumi.Input[str] request_message: A message passed to the owner of the remote resource with this connection request. Restricted to 140 chars.
"""
if group_ids is not None:
pulumi.set(__self__, "group_ids", group_ids)
if private_link_service_id is not None:
pulumi.set(__self__, "private_link_service_id", private_link_service_id)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
@property
@pulumi.getter(name="groupIds")
def group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to. Required on PUT (CreateOrUpdate) requests.
"""
return pulumi.get(self, "group_ids")
@group_ids.setter
def group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_ids", value)
@property
@pulumi.getter(name="privateLinkServiceId")
def private_link_service_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of the private link service. Required on PUT (CreateOrUpdate) requests.
"""
return pulumi.get(self, "private_link_service_id")
@private_link_service_id.setter
def private_link_service_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_service_id", value)
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[pulumi.Input[str]]:
"""
A message passed to the owner of the remote resource with this connection request. Restricted to 140 chars.
"""
return pulumi.get(self, "request_message")
@request_message.setter
def request_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_message", value)
@pulumi.input_type
class ReferenceInputPropertiesArgs:
def __init__(__self__, *,
datasource: Optional[pulumi.Input['BlobReferenceInputDataSourceArgs']] = None,
serialization: Optional[pulumi.Input[Union['AvroSerializationArgs', 'CsvSerializationArgs', 'JsonSerializationArgs']]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The properties that are associated with an input containing reference data.
:param pulumi.Input['BlobReferenceInputDataSourceArgs'] datasource: Describes an input data source that contains reference data. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[Union['AvroSerializationArgs', 'CsvSerializationArgs', 'JsonSerializationArgs']] serialization: Describes how data from an input is serialized or how data is serialized when written to an output. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] type: Indicates whether the input is a source of reference data or stream data. Required on PUT (CreateOrReplace) requests.
Expected value is 'Reference'.
"""
if datasource is not None:
pulumi.set(__self__, "datasource", datasource)
if serialization is not None:
pulumi.set(__self__, "serialization", serialization)
if type is not None:
pulumi.set(__self__, "type", 'Reference')
@property
@pulumi.getter
def datasource(self) -> Optional[pulumi.Input['BlobReferenceInputDataSourceArgs']]:
"""
Describes an input data source that contains reference data. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "datasource")
@datasource.setter
def datasource(self, value: Optional[pulumi.Input['BlobReferenceInputDataSourceArgs']]):
pulumi.set(self, "datasource", value)
@property
@pulumi.getter
def serialization(self) -> Optional[pulumi.Input[Union['AvroSerializationArgs', 'CsvSerializationArgs', 'JsonSerializationArgs']]]:
"""
Describes how data from an input is serialized or how data is serialized when written to an output. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "serialization")
@serialization.setter
def serialization(self, value: Optional[pulumi.Input[Union['AvroSerializationArgs', 'CsvSerializationArgs', 'JsonSerializationArgs']]]):
pulumi.set(self, "serialization", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether the input is a source of reference data or stream data. Required on PUT (CreateOrReplace) requests.
Expected value is 'Reference'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ScalarFunctionPropertiesArgs:
def __init__(__self__, *,
binding: Optional[pulumi.Input[Union['AzureMachineLearningWebServiceFunctionBindingArgs', 'JavaScriptFunctionBindingArgs']]] = None,
inputs: Optional[pulumi.Input[Sequence[pulumi.Input['FunctionInputArgs']]]] = None,
output: Optional[pulumi.Input['FunctionOutputArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The properties that are associated with a scalar function.
:param pulumi.Input[Union['AzureMachineLearningWebServiceFunctionBindingArgs', 'JavaScriptFunctionBindingArgs']] binding: The physical binding of the function. For example, in the Azure Machine Learning web service’s case, this describes the endpoint.
:param pulumi.Input[Sequence[pulumi.Input['FunctionInputArgs']]] inputs: A list of inputs describing the parameters of the function.
:param pulumi.Input['FunctionOutputArgs'] output: The output of the function.
:param pulumi.Input[str] type: Indicates the type of function.
Expected value is 'Scalar'.
"""
if binding is not None:
pulumi.set(__self__, "binding", binding)
if inputs is not None:
pulumi.set(__self__, "inputs", inputs)
if output is not None:
pulumi.set(__self__, "output", output)
if type is not None:
pulumi.set(__self__, "type", 'Scalar')
@property
@pulumi.getter
def binding(self) -> Optional[pulumi.Input[Union['AzureMachineLearningWebServiceFunctionBindingArgs', 'JavaScriptFunctionBindingArgs']]]:
"""
The physical binding of the function. For example, in the Azure Machine Learning web service’s case, this describes the endpoint.
"""
return pulumi.get(self, "binding")
@binding.setter
def binding(self, value: Optional[pulumi.Input[Union['AzureMachineLearningWebServiceFunctionBindingArgs', 'JavaScriptFunctionBindingArgs']]]):
pulumi.set(self, "binding", value)
@property
@pulumi.getter
def inputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FunctionInputArgs']]]]:
"""
A list of inputs describing the parameters of the function.
"""
return pulumi.get(self, "inputs")
@inputs.setter
def inputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FunctionInputArgs']]]]):
pulumi.set(self, "inputs", value)
@property
@pulumi.getter
def output(self) -> Optional[pulumi.Input['FunctionOutputArgs']]:
"""
The output of the function.
"""
return pulumi.get(self, "output")
@output.setter
def output(self, value: Optional[pulumi.Input['FunctionOutputArgs']]):
pulumi.set(self, "output", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Indicates the type of function.
Expected value is 'Scalar'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceBusQueueOutputDataSourceArgs:
def __init__(__self__, *,
property_columns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
queue_name: Optional[pulumi.Input[str]] = None,
service_bus_namespace: Optional[pulumi.Input[str]] = None,
shared_access_policy_key: Optional[pulumi.Input[str]] = None,
shared_access_policy_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Describes a Service Bus Queue output data source.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_columns: A string array of the names of output columns to be attached to Service Bus messages as custom properties.
:param pulumi.Input[str] queue_name: The name of the Service Bus Queue. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] service_bus_namespace: The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] shared_access_policy_key: The shared access policy key for the specified shared access policy. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] shared_access_policy_name: The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] type: Indicates the type of data source output will be written to. Required on PUT (CreateOrReplace) requests.
Expected value is 'Microsoft.ServiceBus/Queue'.
"""
if property_columns is not None:
pulumi.set(__self__, "property_columns", property_columns)
if queue_name is not None:
pulumi.set(__self__, "queue_name", queue_name)
if service_bus_namespace is not None:
pulumi.set(__self__, "service_bus_namespace", service_bus_namespace)
if shared_access_policy_key is not None:
pulumi.set(__self__, "shared_access_policy_key", shared_access_policy_key)
if shared_access_policy_name is not None:
pulumi.set(__self__, "shared_access_policy_name", shared_access_policy_name)
if type is not None:
pulumi.set(__self__, "type", 'Microsoft.ServiceBus/Queue')
@property
@pulumi.getter(name="propertyColumns")
def property_columns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A string array of the names of output columns to be attached to Service Bus messages as custom properties.
"""
return pulumi.get(self, "property_columns")
@property_columns.setter
def property_columns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "property_columns", value)
@property
@pulumi.getter(name="queueName")
def queue_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Service Bus Queue. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "queue_name")
@queue_name.setter
def queue_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "queue_name", value)
@property
@pulumi.getter(name="serviceBusNamespace")
def service_bus_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "service_bus_namespace")
@service_bus_namespace.setter
def service_bus_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_bus_namespace", value)
@property
@pulumi.getter(name="sharedAccessPolicyKey")
def shared_access_policy_key(self) -> Optional[pulumi.Input[str]]:
"""
The shared access policy key for the specified shared access policy. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "shared_access_policy_key")
@shared_access_policy_key.setter
def shared_access_policy_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_access_policy_key", value)
@property
@pulumi.getter(name="sharedAccessPolicyName")
def shared_access_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "shared_access_policy_name")
@shared_access_policy_name.setter
def shared_access_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_access_policy_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Indicates the type of data source output will be written to. Required on PUT (CreateOrReplace) requests.
Expected value is 'Microsoft.ServiceBus/Queue'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceBusTopicOutputDataSourceArgs:
def __init__(__self__, *,
property_columns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_bus_namespace: Optional[pulumi.Input[str]] = None,
shared_access_policy_key: Optional[pulumi.Input[str]] = None,
shared_access_policy_name: Optional[pulumi.Input[str]] = None,
topic_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Describes a Service Bus Topic output data source.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_columns: A string array of the names of output columns to be attached to Service Bus messages as custom properties.
:param pulumi.Input[str] service_bus_namespace: The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] shared_access_policy_key: The shared access policy key for the specified shared access policy. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] shared_access_policy_name: The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] topic_name: The name of the Service Bus Topic. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] type: Indicates the | |
import json
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect, HttpResponse, Http404, JsonResponse
from django.contrib.auth.decorators import permission_required
from survey.forms.filters import QuestionFilterForm
from survey.models import Question, QuestionSet
from survey.models import QuestionFlow
from survey.models import QuestionSet
from survey.models import QuestionTemplate
from survey.models import TemplateOption
from survey.models import QuestionLoop
from survey.models import QuestionOption
from survey.models import Answer
from survey.models import ResponseValidation
from survey.forms.question import get_question_form, BatchQuestionForm, QuestionForm # , QuestionFlowForm
from survey.services.export_questions import get_question_as_dump
from survey.utils.query_helper import get_filterset
from survey.views.custom_decorators import not_allowed_when_batch_is_open
from survey.forms.logic import LogicForm, LoopingForm
from survey.forms.response_validation import ResponseValidationForm
ADD_LOGIC_ON_OPEN_BATCH_ERROR_MESSAGE = "Logics cannot be added\
while the batch is open."
ADD_SUBQUESTION_ON_OPEN_BATCH_ERROR_MESSAGE = "Subquestions cannot be\
added while batch is open."
REMOVE_QUESTION_FROM_OPEN_BATCH_ERROR_MESSAGE = "Question cannot be \
removed from a batch while the batch is open."
@permission_required('auth.can_view_batches')
def index(request, qset_id):
# now I'm gonna call question set a batch of questions.\
#If there's time, I'll rename them properly
# So don't get confused :)
try:
batch = QuestionSet.get(pk=qset_id)
except QuestionSet.DoesNotExist:
raise Http404("No QuestionSet Model matches the given query.")
questions = batch.questions_inline()
request_data = request.GET if request.method == 'GET' else request.POST
question_filter_form = QuestionFilterForm(data=request_data, qset=batch)
search_fields = ['identifier', 'text', ]
qset_questions = batch.questions.all()
if 'q' in request_data:
questions = get_filterset(qset_questions, request_data['q'], search_fields)
if 'question_types' in request_data:
relevant_ids = list(question_filter_form.filter(
qset_questions).values_list('id', flat=True))
questions = [q for q in questions if q.id in relevant_ids]
# now maintain same inline other exclusing questions in
breadcrumbs = Question.index_breadcrumbs(qset=batch)
if breadcrumbs:
request.breadcrumbs(breadcrumbs)
context = {'questions': questions, 'request': request, 'batch': batch,
'question_filter_form': question_filter_form,
'placeholder': 'identifier, text',
'template_file': 'interviews/answer.html',
'is_preview': True
# caution atleast on ODK access
# at least on access must exist
}
return render(request, 'set_questions/index.html', context)
@permission_required('auth.can_view_batches')
def new_subquestion(request, batch_id):
return _save_subquestion(request, batch_id)
def _save_subquestion(request, batch_id, instance=None):
# possible subquestions are questions not bound to any interviewer yet
batch = QuestionSet.get(pk=batch_id)
QuestionForm = get_question_form(batch.question_model())
questionform = QuestionForm(batch, instance=instance)
if request.method == 'POST':
questionform = QuestionForm(batch, data=request.POST, instance=instance)
if questionform.is_valid():
if instance:
zombify = False
else:
zombify = True
question = questionform.save(zombie=zombify)
if request.is_ajax():
return HttpResponse(
json.dumps(
{
'id': question.pk,
'text': question.text,
'identifier': question.identifier}),
content_type='application/json')
messages.info(request, 'Sub Question saved')
if instance:
heading = 'Edit Subquestion'
else:
heading = 'New Subquestion'
context = {
'questionform': questionform,
'button_label': 'Create',
'id': 'add-sub_question-form',
'USSD_MAX_CHARS': settings.USSD_MAX_CHARS,
'save_url': reverse(
'%s_home' %
batch.resolve_tag()),
'cancel_url': reverse(
'qset_questions_page',
args=(
batch.pk,
)),
'class': 'question-form',
'heading': heading}
breadcrumbs = Question.edit_breadcrumbs(qset=batch)
if breadcrumbs:
request.breadcrumbs(breadcrumbs)
template_name = 'set_questions/new.html'
if request.is_ajax():
template_name = 'set_questions/_add_question.html'
return render(request, template_name, context)
else:
return HttpResponseRedirect(
reverse(
'qset_questions_page',
args=(
batch.pk,
)))
def get_sub_questions_for_question(request, question_id):
question = Question.objects.get(id=question_id)
return _create_question_hash_response(Question.zombies(question.qset))
def get_prev_questions_for_question(request, question_id):
question = Question.objects.get(id=question_id)
return _create_question_hash_response(question.previous_inlines())
def get_questions_for_batch(request, batch_id, question_id):
batch = QuestionSet.get(id=batch_id)
questions = batch.questions_inline()
questions = [q for q in questions if int(q.pk) is not int(question_id)]
return _create_question_hash_response(questions)
def _create_question_hash_response(questions):
questions_to_display = map(
lambda question: {
'id': str(
question.id),
'text': question.text,
'identifier': question.identifier},
questions)
return HttpResponse(
json.dumps(questions_to_display),
content_type='application/json')
@permission_required('auth.can_view_batches')
def edit_subquestion(request, question_id, batch_id=None):
question = Question.objects.get(pk=question_id)
return _save_subquestion(request, batch_id, instance=question)
@permission_required('auth.can_view_batches')
def delete(request, question_id):
return _remove(request, question_id)
@permission_required('auth.can_view_batches')
def add_logic(request, qset_id, question_id):
question = Question.get(id=question_id)
batch = QuestionSet.get(id=qset_id)
QuestionForm = get_question_form(batch.question_model())
response = None
cancel_url = '../'
logic_form = LogicForm(question)
question_rules_for_batch = {}
# question_rules_for_batch[question] = question.rules_for_batch(batch)
if request.method == "POST":
logic_form = LogicForm(question, data=request.POST)
if logic_form.is_valid():
logic_form.save()
messages.success(request, 'Logic successfully added.')
response = HttpResponseRedirect(
reverse('qset_questions_page', args=(batch.pk, )))
breadcrumbs = Question.edit_breadcrumbs(qset=batch)
if breadcrumbs:
request.breadcrumbs(breadcrumbs)
cancel_url = breadcrumbs[-1][1]
context = {
'logic_form': logic_form,
'button_label': 'Save',
'question': question,
'USSD_MAX_CHARS': settings.USSD_MAX_CHARS,
'rules_for_batch': question_rules_for_batch,
'questionform': QuestionForm(
batch,
parent_question=question),
'modal_action': reverse(
'add_qset_subquestion_page',
args=(
batch.pk,
)),
'class': 'question-form',
'batch_id': qset_id,
'batch': batch,
'cancel_url': cancel_url}
return response or render(request, "set_questions/logic.html", context)
def manage_loop(request, question_id):
question = Question.get(id=question_id)
batch = QuestionSet.get(pk=question.qset.pk)
cancel_url = '../'
existing_loop = getattr(question, 'loop_started', None)
looping_form = LoopingForm(question, instance=existing_loop)
if request.method == "POST":
looping_form = LoopingForm(
question,
instance=existing_loop,
data=request.POST)
if looping_form.is_valid():
looping_form.save()
messages.success(request, 'Loop Logic successfully added.')
return HttpResponseRedirect(
reverse(
'qset_questions_page',
args=(
batch.pk,
)))
breadcrumbs = Question.edit_breadcrumbs(qset=batch)
if breadcrumbs:
request.breadcrumbs(breadcrumbs)
cancel_url = breadcrumbs[-1][1]
context = {
'loop_form': looping_form,
'button_label': 'Save',
'question': question,
'cancel_url': cancel_url}
return render(request, "set_questions/loop.html", context)
@permission_required('auth.can_view_batches')
def delete_logic(request, flow_id):
flow = get_object_or_404(QuestionFlow, pk=flow_id)
batch = flow.question.qset
flow.delete()
_kill_zombies(batch.zombie_questions())
messages.success(request, "Logic successfully deleted.")
return HttpResponseRedirect(
reverse(
'qset_questions_page',
args=(
batch.pk,
)))
@permission_required('auth.can_view_batches')
def edit(request, question_id):
question = Question.get(id=question_id)
batch = QuestionSet.get(pk=question.qset.pk)
response, context = _render_question_view(
request, batch, instance=question)
context['page_title '] = 'Edit Question'
return response or render(request, 'set_questions/new.html', context)
@permission_required('auth.can_view_batches')
def new(request, qset_id):
batch = QuestionSet.get(pk=qset_id) # can be listng or actual batch
response, context = _render_question_view(request, batch)
context['page_title '] = 'Add Question'
return response or render(request, 'set_questions/new.html', context)
@permission_required('auth.can_view_batches')
def insert(request, prev_quest_id):
prev_question = Question.get(pk=prev_quest_id)
batch = QuestionSet.get(pk=prev_question.qset.pk)
response, context = _render_question_view(request, batch, prev_question=prev_question)
context['prev_question'] = prev_question
return response or render(request, 'set_questions/new.html', context)
@permission_required('auth.can_view_batches')
def json_create_response_validation(request):
"""This function is meant to create json posted response validation
:param request:
:return:
"""
if request.method == 'POST':
response_validation_form = ResponseValidationForm(data=request.POST)
if response_validation_form.is_valid():
response_validation = response_validation_form.save()
return JsonResponse({'success': True, 'created': {'id': response_validation.id,
'text': str(response_validation)}})
else:
return JsonResponse({'success': False, 'error': response_validation_form.errors})
return JsonResponse({})
@permission_required('auth.can_view_batches')
def get_response_validations(request):
"""This function is meant to create json posted response validation
:param request:
:return:
"""
answer_type = request.GET.get('answer_type') if request.method == 'GET' else request.POST.get('answer_type')
answer_class = Answer.get_class(answer_type)
validator_names = [validator.__name__ for validator in answer_class.validators()]
validations = ResponseValidation.objects.filter(validation_test__in=validator_names).values_list('id', flat=True)
return JsonResponse(list(validations), safe=False)
def get_answer_validations(request):
"""This function is meant to create json posted response validation
:param request:
:return:
"""
answer_type = request.GET.get('answer_type') if request.method == 'GET' else request.POST.get('answer_type')
answer_class = Answer.get_class(answer_type)
validator_names = [validator.__name__ for validator in answer_class.validators()]
return JsonResponse(validator_names, safe=False)
def _process_question_form(request, batch, response, question_form):
instance = question_form.instance
action_str = 'edit' if instance else 'add'
if question_form.is_valid():
question = question_form.save(**request.POST)
module = getattr(question, 'module', None)
if 'add_to_lib_button' in request.POST:
qt = QuestionTemplate.objects.create(
identifier=question.identifier,
text=question.text,
answer_type=question.answer_type,
module=module)
options = question.options.all()
if options:
topts = []
for option in options:
topts.append(TemplateOption(
question=qt, text=option.text, order=option.order))
TemplateOption.objects.bulk_create(topts)
messages.success(request, 'Question successfully %sed. to library' % action_str)
messages.success(request, 'Question successfully Saved.')
if 'add_more_button' in request.POST:
redirect_age = ''
else:
redirect_age = reverse('qset_questions_page', args=(batch.pk, ))
response = HttpResponseRedirect(redirect_age)
else:
messages.error(request, 'Question was not %sed: %s' % (action_str, question_form.errors.values()[0][0]))
# options = dict(request.POST).get('options', None)
return response, question_form
def _render_question_view(request, batch, instance=None, prev_question=None):
if instance is None and prev_question is None:
prev_question = batch.last_question_inline()
elif prev_question is None:
try:
prev_inlines = instance.previous_inlines()
if prev_inlines:
prev_question = prev_inlines[-1]
except ValidationError:
pass
button_label = 'Create'
options = None
response = None
QuestionForm = get_question_form(batch.question_model())
if instance:
button_label = 'Save'
options = instance.options.all().order_by('order')
# options = [option.text.strip()\
#for option in options] if options else None
if request.method == 'POST':
question_form = QuestionForm(
batch,
data=request.POST,
instance=instance,
prev_question=prev_question)
response, question_form = _process_question_form(request, batch, response, question_form)
else:
question_form = QuestionForm(
batch, instance=instance, prev_question=prev_question)
context = {'button_label': button_label,
'id': 'add-question-form',
'instance':instance,
'request': request,
'class': 'question-form',
'USSD_MAX_CHARS': settings.USSD_MAX_CHARS,
'batch': batch,
'prev_question': prev_question,
# 'prev_question': prev_question,
'cancel_url': reverse('qset_questions_page', args=(batch.pk, )),
'questionform': question_form,
'response_validation_form': ResponseValidationForm(),
'model_name' : batch.__class__.__name__
}
if options:
#options = filter(lambda text: text.strip(),
#list(OrderedDict.fromkeys(options)))
# options = map(lambda option: re.sub("[%s]" % \
#settings.USSD_IGNORED_CHARACTERS, '', option), options)
# map(lambda option: re.sub(" ", ' ', option), options)
context['options'] = options
breadcrumbs = Question.edit_breadcrumbs(qset=batch)
if breadcrumbs:
request.breadcrumbs(breadcrumbs)
return response, context
@permission_required('auth.can_view_batches')
def assign(request, qset_id):
batch = QuestionSet.get(id=qset_id)
if batch.interviews.count():
error_message = "Questions cannot be assigned \
interviews has already been conducted: %s." % \
batch.name.capitalize()
messages.error(request, error_message)
return HttpResponseRedirect(
reverse(
'qset_questions_page',
args=(
batch.pk,
)))
if request.method == 'POST':
data = dict(request.POST)
last_question = batch.last_question_inline()
lib_questions = QuestionTemplate.objects.filter(
identifier__in=data.get('identifier', ''))
if lib_questions:
for lib_question in lib_questions:
question = Question.objects.create(
identifier=lib_question.identifier,
text=lib_question.text,
answer_type=lib_question.answer_type,
qset=batch,
)
# assign the options
for option in lib_question.options.all():
QuestionOption.objects.create(
question=question,
text=option.text,
order=option.order)
if last_question:
QuestionFlow.objects.create(
question=last_question, next_question=question)
else:
batch.start_question = question
batch.save()
last_question = question
#batch_questions_form = BatchQuestionsForm(batch=batch,\
#\data=request.POST, instance=batch)
success_message = "Questions successfully assigned to %s: %s." % (
batch.verbose_name(), batch.name.capitalize())
messages.success(request, success_message)
return HttpResponseRedirect(
reverse(
'qset_questions_page',
args=(
batch.pk,
)))
used_identifiers = [
question.identifier for question in batch.questions.all()]
library_questions = QuestionTemplate.objects.exclude(
identifier__in=used_identifiers).order_by('identifier')
question_filter_form = QuestionFilterForm()
# library_questions = question_filter_form.filter(library_questions)
breadcrumbs = Question.edit_breadcrumbs(qset=batch)
page_name = ''
if breadcrumbs:
if breadcrumbs[0][0] == 'Listing Form':
page_name = 'Listing'
else:
page_name = 'Batch'
request.breadcrumbs(breadcrumbs)
context = {
'batch_questions_form': QuestionForm(batch),
'batch': batch,
'button_label': 'Save',
'id': 'assign-question-to-batch-form',
'library_questions': library_questions,
'question_filter_form': question_filter_form,
'page_name': page_name,
'redirect_url': '/qsets/%s/questions/' % qset_id}
return render(request, 'set_questions/assign.html',
context)
@permission_required('auth.can_view_batches')
def update_orders(request, qset_id):
| |
"""
`dumpsqsh` - a tool for viewing or extracting SquashFS contents
Author: (C) 2019 <NAME> <<EMAIL>>
"""
import struct
from binascii import b2a_hex
import datetime
import os
import os.path
import lzma
import zlib
try:
import lzo
except ImportError:
lzo = False
try:
import lz4.block
except ImportError:
lz4 = False
try:
import zstd
except ImportError:
zstd = False
# dir entry types
SQUASHFS_DIR_TYPE = 1
SQUASHFS_REG_TYPE = 2
SQUASHFS_SYMLINK_TYPE = 3
SQUASHFS_BLKDEV_TYPE = 4
SQUASHFS_CHRDEV_TYPE = 5
SQUASHFS_FIFO_TYPE = 6
SQUASHFS_SOCKET_TYPE = 7
# inode types
SQUASHFS_LDIR_TYPE = 8
SQUASHFS_LREG_TYPE = 9
SQUASHFS_LSYMLINK_TYPE = 10
SQUASHFS_LBLKDEV_TYPE = 11
SQUASHFS_LCHRDEV_TYPE = 12
SQUASHFS_LFIFO_TYPE = 13
SQUASHFS_LSOCKET_TYPE = 14
itypenames = [ "NUL", "DIR", "REG", "SYM", "BLK", "CHR", "FIFO", "SOCK",
"LDIR", "LREG", "LSYM", "LBLK", "LCHR", "LFIFO", "LSOCK" ]
# filesystem flags
SQUASHFS_NOI = 0 # -noI do not compress inode table
SQUASHFS_NOD = 1 # -noD do not compress data blocks
SQUASHFS_CHECK = 2
SQUASHFS_NOF = 3 # -noF do not compress fragment blocks
SQUASHFS_NO_FRAG = 4 # -no-fragments do not use fragments
SQUASHFS_ALWAYS_FRAG = 5 # -always-use-fragments use fragment blocks for files larger than block size
SQUASHFS_DUPLICATE = 6 # -no-duplicates do not perform duplicate checking
SQUASHFS_EXPORT = 7 # -no-exports don't make the filesystem exportable via NFS
SQUASHFS_NOX = 8 # -noX do not compress extended attributes
SQUASHFS_NO_XATTR = 9 # -no-xattrs don't store extended attributes
SQUASHFS_COMP_OPT = 10
SQUASHFS_NOID = 11
fsflagnames = [ "NOI", "NOD", "CHECK", "NOF", "NO_FRAG", "ALWAYS_FRAG",
"DUPLICATE", "EXPORT", "NOX", "NO_XATTR", "COMP_OPT", "NOID" ]
# xattr types
SQUASHFS_XATTR_USER = 0
SQUASHFS_XATTR_TRUSTED = 1
SQUASHFS_XATTR_SECURITY = 2
xatypenames = [ "USER", "TRUSTED", "SECURITY" ]
# compression types
ZLIB_COMPRESSION = 1
LZMA_COMPRESSION = 2
LZO_COMPRESSION = 3
XZ_COMPRESSION = 4
LZ4_COMPRESSION = 5
ZSTD_COMPRESSION = 6
compnames = [ "ZLIB", "LZMA", "LZO", "XZ", "LZ4", "ZSTD" ]
def log(*args):
#print("##", *args)
pass
def timestr(t):
return datetime.datetime.utcfromtimestamp(t).strftime("%Y-%m-%d %H:%M:%S")
class OffsetReader:
"""
Wraps around a filehandle, shifts offset '0' in the file to the specified offset.
"""
def __init__(self, fh, ofs):
self.baseofs = ofs
self.fh = fh
self.fh.seek(ofs)
def read(self, size):
return self.fh.read(size)
def seek(self, pos):
return self.fh.seek(pos+self.baseofs)
class InodeHeader:
MINSIZE = 16
def __init__(self, fs, data):
self.fs = fs
(
self.type,
self.mode,
self.uid,
self.gid,
self.mtime,
self.inode_idx,
) = struct.unpack(fs.byteorder + "4H2L", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def modebits(self):
def perms(x, specialchar, specialflag):
return "%s%s%s" % (
"-r"[x>>2],
"-w"[(x>>1)&1],
("-x"+specialchar)[(x&1) + 2*specialflag]
)
typechar = "d-lbcps"
mode = self.mode
return typechar[(self.type-1)%7] \
+ perms((mode>>6)&6, "Ss", (mode>>11)&1) \
+ perms((mode>>3)&6, "Ss", (mode>>10)&1) \
+ perms((mode>>0)&6, "Tt", (mode>>9)&1)
def idstring(self):
return "%5d %5d" % (self.fs.idlist[self.uid],self.fs.idlist[self.gid])
def oneline(self):
return "%s %s %s" % (self.modebits(), self.idstring(), timestr(self.mtime))
def __str__(self):
return "[%s, %s, %04d:%04d, (%s), #%04x]" % (itypenames[self.type], self.modebits(), self.uid, self.gid, timestr(self.mtime), self.inode_idx)
class LDirectoryNode:
"""
SQUASHFS_LDIR_TYPE 8
"""
MINSIZE = 24
def __init__(self, fs, data):
(
self.nlink,
self.file_size,
self.start_block, # offset relative to directory_table_start
self.parent_inode,
self.i_count,
self.offset, # offset into dir block
self.xattr,
) = struct.unpack(fs.byteorder + "4L2HL", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def oneline(self, name):
return "%s %3d %s %10d %s %s/" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), self.file_size, timestr(self.hdr.mtime), name)
def __str__(self):
return "n%d, s%08x, blk%06x, parent:#%04x, cnt:%d, off:%05x, xa:%d" % (
self.nlink, self.file_size, self.start_block,
self.parent_inode, self.i_count, self.offset, self.xattr)
class LRegularNode:
"""
SQUASHFS_LREG_TYPE 9
the data in a file is stored as follows:
if startblk:
read complete blocks starting at 'startblk'
if fragment:
read remaining from 'offset' in 'fragment'
"""
MINSIZE = 40
def __init__(self, fs, data):
(
self.start_block, # offset to first block
self.file_size,
self.sparse,
self.nlink,
self.fragment, # idx into fragment table
self.offset, # offset into fragment
self.xattr,
) = struct.unpack(fs.byteorder + "3Q4L", data[:self.MINSIZE])
# nr of complete blocks
nblocks = self.file_size // fs.block_size
if self.fragment == 0xFFFFFFFF and self.file_size % fs.block_size:
nblocks += 1
self.block_size_list = struct.unpack(fs.byteorder + "%dL" % nblocks, data[self.MINSIZE:self.MINSIZE+4*nblocks])
def size(self):
return self.MINSIZE + 4*len(self.block_size_list)
def oneline(self, name):
return "%s %3d %s %10d %s %s" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), self.file_size, timestr(self.hdr.mtime), name)
def __str__(self):
return "n%d, s%08x, blk%06x, sprs:%d, frag:%d, off:%05x, xa:%d {%s}" % (
self.nlink, self.file_size, self.start_block,
self.sparse, self.fragment, self.offset, self.xattr,
",".join("%06x" % _ for _ in self.block_size_list))
class LDeviceNode:
"""
SQUASHFS_LBLKDEV_TYPE 11
SQUASHFS_LCHRDEV_TYPE 12
"""
MINSIZE = 12
def __init__(self, fs, data):
(
self.nlink,
self.rdev,
self.xattr,
) = struct.unpack(fs.byteorder + "3L", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def oneline(self, name):
return "%s %3d %s %3d,%3d %s %s" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), self.rdev>>8, self.rdev&0xFF, timestr(self.hdr.mtime), name)
def __str__(self):
return "n%d r%04x, xa:%d" % (self.nlink, self.rdev, self.xattr)
class LIpcNode:
"""
SQUASHFS_LFIFO_TYPE 13
SQUASHFS_LSOCKET_TYPE 14
"""
MINSIZE = 8
def __init__(self, fs, data):
(
self.nlink,
self.xattr,
) = struct.unpack(fs.byteorder + "2L", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def oneline(self, name):
return "%s %3d %s %s %s" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), timestr(self.hdr.mtime), name)
def __str__(self):
return "n%d xa:%d" % (self.nlink, self.xattr)
class DirectoryNode:
"""
SQUASHFS_DIR_TYPE 1
"""
MINSIZE = 16
def __init__(self, fs, data):
(
self.start_block, # offset relative to directory_table_start
self.nlink,
self.file_size,
self.offset, # offset into dir block
self.parent_inode,
) = struct.unpack(fs.byteorder + "2L2HL", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def oneline(self, name):
return "%s %3d %s %10d %s %s/" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), self.file_size, timestr(self.hdr.mtime), name)
def __str__(self):
return "blk%06x, n%d, s%08x, off:%08x, parent:#%04x" % (self.start_block, self.nlink, self.file_size, self.offset, self.parent_inode)
class RegularNode:
"""
SQUASHFS_REG_TYPE 2
the data in a file is stored as follows:
if startblk:
read complete blocks starting at 'startblk'
if fragment:
read remaining from 'offset' in 'fragment'
"""
MINSIZE = 16
def __init__(self, fs, data):
(
self.start_block, # offset to first block
self.fragment, # idx into fragment table
self.offset, # offset into fragment
self.file_size,
) = struct.unpack(fs.byteorder + "4L", data[:self.MINSIZE])
# nr of complete blocks
nblocks = self.file_size // fs.block_size
if self.fragment == 0xFFFFFFFF and self.file_size % fs.block_size:
nblocks += 1
self.block_size_list = struct.unpack(fs.byteorder + "%dL" % nblocks, data[self.MINSIZE:self.MINSIZE+4*nblocks])
def size(self):
return self.MINSIZE + 4*len(self.block_size_list)
def oneline(self, name):
return "%s %3d %s %10d %s %s" % (self.hdr.modebits(), 1, self.hdr.idstring(), self.file_size, timestr(self.hdr.mtime), name)
def __str__(self):
return "s%08x, blk%06x, off:%05x frag:%08x {%s}" % (
self.file_size, self.start_block, self.offset, self.fragment,
",".join("%06x" % _ for _ in self.block_size_list))
class SymlinkNode:
"""
SQUASHFS_LSYMLINK_TYPE 10
SQUASHFS_SYMLINK_TYPE 3
"""
MINSIZE = 8
def __init__(self, fs, data):
(
self.nlink,
symlink_size,
) = struct.unpack(fs.byteorder + "2L", data[:self.MINSIZE])
self.symlink = data[self.MINSIZE:self.MINSIZE+symlink_size].decode('utf-8')
# TODO: which is the zero DWORD after the symlink string ?
def size(self):
return self.MINSIZE+len(self.symlink)
def oneline(self, name):
return "%s %3d %s %s %s -> %s" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), timestr(self.hdr.mtime), name, self.symlink)
def __str__(self):
return "n%d -> %s" % (self.nlink, self.symlink)
class DeviceNode:
"""
SQUASHFS_LBLKDEV_TYPE 11
SQUASHFS_LCHRDEV_TYPE 12
SQUASHFS_BLKDEV_TYPE 4
SQUASHFS_CHRDEV_TYPE 5
"""
MINSIZE = 8
def __init__(self, fs, data):
(
self.nlink,
self.rdev,
) = struct.unpack(fs.byteorder + "2L", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def oneline(self, name):
return "%s %3d %s %3d,%3d %s %s" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), self.rdev>>8, self.rdev&0xFF, timestr(self.hdr.mtime), name)
def __str__(self):
return "n%d r%04x" % (self.nlink, self.rdev)
class IpcNode:
"""
SQUASHFS_LFIFO_TYPE 13
SQUASHFS_LSOCKET_TYPE 14
SQUASHFS_FIFO_TYPE 6
SQUASHFS_SOCKET_TYPE 7
"""
MINSIZE = 4
def __init__(self, fs, data):
(
self.nlink,
) = struct.unpack(fs.byteorder + "L", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def oneline(self, name):
return "%s %3d %s %s %s" % (self.hdr.modebits(), self.nlink, self.hdr.idstring(), timestr(self.hdr.mtime), name)
def __str__(self):
return "n%d" % (self.nlink)
class DirHeader:
"""
Start of a list of RelativeDirEntry's in the directory-table.
"""
MINSIZE = 12
def __init__(self, fs, data):
(
self.count, # note: one less than nr of entries.
self.inode_block,
self.index_base,
) = struct.unpack(fs.byteorder + "3L", data[:self.MINSIZE])
def size(self):
return self.MINSIZE
def __str__(self):
return "n=%d, i:%08x, #:%04x" % (self.count, self.inode_block, self.index_base)
class RelativeDirEntry:
"""
the file_size of a dir entry is always 3 bytes larger than the actual size
of the combined dir_headers + dir_entries
This is because the linux driver uses the offset into the node as the 'current' entry,
when doing 'readdir', entries 0 and 1 are translated to "." and "..", while 2
is not used, and 3 and larger map to direntries.
"""
MINSIZE = 8
def __init__(self, fs, data):
(
self.inode_offset, # offset into the block specified in the DirHeader
self.index_delta, # (signed int) added to the inode_number in the DirHeader
self.type, # 1 = dir, 2 = reg, 3 = sym, 4 = blk, 5 = chr, 6 = fifo, 7 = sock
namelen, # name length
) = struct.unpack(fs.byteorder + "HhHH", data[:self.MINSIZE])
self.name = data[self.MINSIZE : self.MINSIZE+namelen+1].decode('utf-8')
def size(self):
return self.MINSIZE + len(self.name)
def __str__(self):
| |
import numpy
import numpy as np
import math
from scipy.interpolate import InterpolatedUnivariateSpline as interpolate
from scipy.integrate import simps
from scipy.integrate import romberg
import sys
#sys.path.append("/global/homes/c/chmodi/Programs/Py_codes/modules/")
import mycosmology as cosmo_lib
class Mass_Func():
def __init__(self, power_file, M, L = None, H0 = 100.):
self.M = M
if L is None:
L = 1- M
self.L = L
self.ktrue, self.ptrue = numpy.loadtxt(power_file, unpack = True)
self.H0 = H0
self.rhoc = 3 * H0**2 /(8 * math.pi * 43.007)
self.rhom = self.rhoc*M
self.cosmo = cosmo_lib.Cosmology(M= M, L = L, pfile = power_file)
self.masses = 10**numpy.arange(8, 18, 0.01)
self.sigma = numpy.zeros_like(self.masses)
self.calc_sigma()
self.fmap = {'Mice':self.Micef, 'ST':self.STf, 'Watson':self.Watsonf, 'Press':self.Pressf}
def calc_sigma(self):
M = self.masses
for foo in range(len(M)):
self.sigma[foo] = self.sigmasq(M[foo])**0.5
def tophat(self, k, R):
kr = k*R
wt = 3 * (numpy.sin(kr)/kr - numpy.cos(kr))/kr**2
if wt is 0:
wt = 1
return wt
def rLtom(self, R):
"""returns Mass in solar mass for smoothing scale in Mpc"""
m = 4* math.pi*self.rhom * R**3 /3.
return m*10**10
def mtorL(self, m):
"""returns lagrangian radius (in Mpc) for Mass in solar mass"""
rhobar = self.rhom * 10**10
R3 = m /(4* math.pi*rhobar /3.)
return R3**(1/3.)
def mtorE(self, m, ovd = 178):
"""returns Eulerian radius (in Mpc) for Mass in solar mass"""
rhobar = ovd * self.rhom * 10**10
R3 = m /(4* math.pi*rhobar /3.)
return R3**(1/3.)
def sm_scale(self, M):
"""returns smoothing scale in Mpc for Mass in solar mass"""
return self.mtorL(M)
#rhobar = self.rhom * 10**10
# R3 = 3* M /(4 * math.pi * rhobar)
# return R3 ** (1/3.)
def sigmasq(self, M):
"""returns sigma**2 corresponding to mass in solar mass"""
R = self.sm_scale(M)
k = self.ktrue
p = self.ptrue
w2 = self.tophat(k, R)**2
return simps(p * w2 * k**2, k)/2/math.pi**2
def dlninvsigdM(self, M, sigmaf = None, aa = 1.):
""" returns d(ln(1/sigma))/d(M) for M in solar masses. Can specify redshift with scale factor"""
if sigmaf is None:
sigmaf = self.sigmaf
dM = 0.001 * M
Mf = M + dM/2.
Mb = M - dM/2.
lnsigf = numpy.log(1/sigmaf(aa)(Mf))
lnsigb = numpy.log(1/sigmaf(aa)(Mb))
return (lnsigf - lnsigb)/dM
def sigmaf(self, aa = 1.):
""" returns interpolating function for sigma. syntax to use - sigmaf(a)(M)"""
d = self.cosmo.Dgrow(a = aa)
return interpolate(self.masses, d*self.sigma)
def Micef(self, M, aa=1., dndlnm = True):
s = self.sigmaf(aa)(M)
zz = self.cosmo.atoz(aa)
if aa== 1 or zz==0:
A = 0.58
a = 1.37
b = 0.3
c = 1.036
elif az==0.66 or zz==0.5:
A = 0.55
a = 1.29
b = 0.29
c = 1.026
else:
print('Redshift should be 0 or 1')
return None
f = A *(s ** -a + b) * numpy.exp(- c / s**2)
if dndlnm: #https://arxiv.org/pdf/0803.2706.pdf, Eq. 2 for conversion
return f * self.rhom * self.dlninvsigdM(M, self.sigmaf, aa = aa) *10**10
else:
return f
def Tinkerf(self, M, aa=1., dndlnm = True, delta =200.):
s = self.sigmaf(aa)(M)
if delta == 200:
A, a, b, c = 0.186, 1.47, 2.57, 1.19
elif delta == 300:
A, a, b, c = 0.200, 1.52, 2.25, 1.27
f = A*((s/b)**-a + 1)*numpy.exp(-c/s**2.)
if dndlnm:
return f * self.rhom * self.dlninvsigdM(M, self.sigmaf, aa = aa) *10**10
else:
return f
def Watsonf(self, M, aa=1., dndlnm = True):
s = self.sigmaf(aa)(M)
A = 0.282
a = 2.163
g = 1.210
b = 1.406
f = A *((b/s) ** a + 1) * numpy.exp(- g/ s**2)
if dndlnm:
return f * self.rhom * self.dlninvsigdM(M, self.sigmaf, aa = aa) *10**10
else:
return f
def Pressf(self, M, aa = 1., dndlnm = True):
delc = 1.686
nu = delc/self.sigmaf(aa)(M)
f = numpy.sqrt(2/math.pi) * nu * numpy.exp(- (nu**2) /2.)
if dndlnm:
return f * self.rhom * self.dlninvsigdM(M, self.sigmaf, aa = aa) *10**10
else:
return f
def STf(self, M, aa = 1., dndlnm = True):
delc = 1.686
nu = delc/self.sigmaf(aa)(M)
a = 0.75
p = 0.3
f = 0.3222* numpy.sqrt(2*a/math.pi) * nu * numpy.exp(- a *(nu**2) /2.)*( 1 + 1/(a * nu**2)**p)
if dndlnm:
return f * self.rhom * self.dlninvsigdM(M, self.sigmaf, aa = aa) *10**10
else:
return f
def DnDlnm(self, M, mfunc = 'Mice', aa = 1):
mf = self.fmap[mfunc]
return self.rhom * mf(M, aa = aa, dndm = False) * self.dlninvsigdM(M, aa = aa) *10**10
def match_abundance(self, halomass, bs, mfunc = 'Mice', aa = 1, Mmin = 10.**11., Mmax = None):
'''Returns new halomasses by matching abundance to given mass function'''
if Mmax is None:
Mmax = halomass[0]*1.01
marray = numpy.exp(numpy.arange(numpy.log(Mmin), numpy.log(Mmax), 0.01))
abund = []
l = marray.shape[0]
# f = lambda x:temp_mice(x, a)
f = lambda x:self.DnDlnm(numpy.exp(x), mfunc = mfunc, aa = aa)
for foo in range(0, marray.shape[0]):
abund.append(romberg(f, numpy.log(marray)[l-foo-1], numpy.log(marray)[-1]))
abund = numpy.array(abund)
nexpect = abund*bs**3
newmass = interpolate(nexpect, marray[::-1])
halomassnew = newmass(numpy.linspace(1, len(halomass), num = len(halomass), endpoint=True))
return halomassnew
def icdf_sampling(self, bs, mfunc='Mice', match_high = True, hmass = None, M0 = None, N0 = None, seed=100, retargs = False, zz = 0., lmmin=10, lmmax=17):
'''
Given samples from analytic mass function (dN/dln(M)), find halo masss by matching abundance via
inverse cdf sampling.
bs : boxsize
mv, mfv : (Semi-optional) analytic hmf sampled at masses 'mv'
mf : (Semi-optional) Analytic hmf, if mv and mfv are not given
match_high : if True, Match the highest mass of the catalog to analytic mass.
if False, match the lowest mass
hmass : (Semi-Optional) Halo mass catalog, used to calculate highest/lowest mass
and number of halos
M0, N0 : (Semi-optional) If mass catalog not given, M0 and N0 are required to
correspond to highest/lowest mass and number if halos
Returns: Abundance matched halo mass catalog
'''
Nint = 500 #No.of points to interpolate
aa = self.cosmo.ztoa(zz)
mf = self.fmap[mfunc]
mv = np.logspace(lmmin, lmmax, Nint)
mfv = mf(mv, aa=aa)
#Interpolate
imf = interpolate(mv, mfv, k = 5)
ilmf = lambda x: imf(np.exp(x))
#Create array to integrate high or low from the matched mass based on match_high
if N0 is None:
N0 = hmass.size
if match_high:
if M0 is None:
M0 = hmass.max()
lmm = np.linspace(np.log(M0), np.log(mv.min()), Nint)
else:
if M0 is None:
M0 = hmass.min()
lmm = np.linspace(np.log(M0), np.log(mv.max()), Nint)
#Calculate the other mass-limit M2 of the catalog by integrating mf and comparing total number of halos
ncum = abs(np.array([romberg(ilmf, lmm[0], lmm[i]) for i in range(lmm.size)]))*bs**3
M2 = np.exp(np.interp(N0, ncum, lmm))
#Create pdf and cdf for N(M) from mf between M0 and M2
lmm2 = np.linspace(np.log(M0), np.log(M2), Nint)
nbin = abs(np.array([romberg(ilmf, lmm2[i], lmm2[i+1]) for i in range(lmm2.size-1)]))*bs**3
nprob = nbin/nbin.sum()
cdf = np.array([nprob[:i+1].sum() for i in range(nprob.size)])
icdf = interpolate(cdf[:], 0.5*(lmm2[:-1] + lmm2[1:]))
#Sample random points from uniform distribution and find corresponding mass
np.random.seed(seed)
ran = np.random.uniform(0, 1, N0)
hmatch = np.exp(icdf(ran))
hmatch.sort()
return hmatch[::-1]
class Num_Mass_Func():
def __init__(self, bs, nc, M, L = None, H0 = 100):
self.M = M
if L is None:
L = 1- M
self.L = L
self.H0 = H0
self.bs = float(bs)
self.nc = nc
self.rhoc = 3 * H0**2 /(8 * math.pi * 43.007)
self.rhom = self.rhoc*M
self.mp = self.rhom *(self.bs/self.nc)**3 * 10.**10
self.vol = self.bs**3
self.lMin = numpy.log10(1. * 10**11)
self.lMax = numpy.log10(5. * 10**15)
self.dlM = 0.05
# def calc(self, halofile, lMin = 0, lMax = 0, dlM = 0):
def calc_file(self, halofile, lMin, lMax, dlM, warren = 0):
'''Calculate numerical mass function by binning in logspace between lMin, lMax with dlm.
Returns mf, counts, Mmean for every bin'''
import h5py
a = h5py.File(halofile, "r")
halo = a["FOFGroups"][:]
halomass = halo["Length"][1:]
if warren:
halomass = halomass *(1 - halomass**-0.6)
halomass = halomass*self.mp
return self.calc_array(halomass, lMin, lMax, dlM)
def calc_array(self, halomass, lMin, lMax, dlM):
'''Calculate numerical mass function by binning in logspace between lMin, lMax with dlm.
Returns mf, counts, Mmean for every bin'''
nMbins = int((lMax - lMin)/dlM)
lmass = numpy.zeros(nMbins)
for foo in range(nMbins):
lmass[foo] = lMin + foo*dlM
counts = numpy.zeros((nMbins - 1))
Mmean = numpy.zeros((nMbins - 1))
| |
not in self.bombing_agents.keys():
keys_to_add.append( ((r,c), self.bombing_agents[key]) )
#down
r = key[0]+1
c = key[1]
if (r < 11):
if bomb_life_map[r][c] > 0 and (r,c) not in self.bombing_agents.keys():
keys_to_add.append( ((r,c), self.bombing_agents[key]) )
#left
r = key[0]
c = key[1]-1
if (c >= 0):
if bomb_life_map[r][c] > 0 and (r,c) not in self.bombing_agents.keys():
keys_to_add.append( ((r,c), self.bombing_agents[key]) )
#right
r = key[0]
c = key[1] + 1
if (c < 11):
if bomb_life_map[r][c] > 0 and (r,c) not in self.bombing_agents.keys():
keys_to_add.append( ((r,c), self.bombing_agents[key]) )
keys_to_pop.append((key[0],key[1]))
for k in keys_to_pop:
self.bombing_agents.pop(k, None)
for k in keys_to_add:
self.bombing_agents[k[0]] = k[1]
#--------------------------------------
#======================================
#--------------------------------------
@staticmethod
def _djikstra(board, my_position, bombs, enemies, depth=None, exclude=None):
assert(depth is not None)
if exclude is None:
exclude = [constants.Item.Fog]
def out_of_range(p1, p2):
x1, y1 = p1
x2, y2 = p2
return depth is not None and abs(y2 - y1) + abs(x2 - x1) > depth
items = defaultdict(list)
dist = {}
prev = {}
Q = queue.PriorityQueue()
Q.put([0, my_position])
mx, my = my_position
for r in range(max(0, mx - depth), min(11, mx + depth)):
for c in range(max(0, my - depth), min(11, my + depth)):
position = (r, c)
if any([
out_of_range(my_position, position),
utility.position_in_items(board, position, exclude),
]):
continue
dist[position] = np.inf
prev[position] = None
item = constants.Item(board[position])
items[item].append(position)
dist[my_position] = 0
for bomb in bombs:
if bomb['position'] == my_position:
items[constants.Item.Bomb].append(my_position)
while not Q.empty():
_, position = Q.get()
x, y = position
val = dist[(x, y)] + 1
for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
new_position = (row + x, col + y)
if utility.position_on_board(board, new_position):
if all([new_position in dist,
utility.position_is_passable(board, new_position, enemies)]):
new_val = val
if new_val < dist[new_position]:
dist[new_position] = new_val
prev[new_position] = position
Q.put((dist[new_position], new_position))
return items, dist, prev
def _update_safe_position(self, bombs, board, my_position, items, dist, prev, enemies):
sorted_dist = {k:v for k, v in dist.items() if v < 15 and not helper_func.position_is_not_passible(board, k, enemies)}
sorted_dist = sorted(sorted_dist, key=lambda position: dist[position])
safe_positions = queue.PriorityQueue()
best_dist = 99999
for position in sorted_dist:
unsafe_directions = helper_func._directions_in_range_of_bomb(board, position, bombs, dist, bomb_ticking_threshold=15)
position_is_bad_corner = helper_func.is_bad_corner(board, my_position, position, items, dist, prev, enemies, distance_to_enemies=3, threshold_wall_count = 2)
if len(unsafe_directions) == 0 and not position_is_bad_corner:
if dist[position] <= best_dist:
best_dist = dist[position]
# calculate threat during escaping
num_threats = 0
curr_position = position
while prev[curr_position] != my_position:
unsafe_dir = helper_func._directions_in_range_of_bomb(board, curr_position, bombs, dist, bomb_ticking_threshold=15)
if len(unsafe_dir) != 0:
num_threats += 1
curr_position = prev[curr_position]
# append it to the queue
safe_positions.put((num_threats, position))
elif best_dist != 99999:
break
# return position
# elif len(unsafe_directions) == 0 and not position_is_bad_corner:
# safe_positions.put((dist[position] + len(unsafe_directions) / 10.0, position))
#append to safe position
if not safe_positions.empty():
position = safe_positions.get()[1]
helper_func.agent_output(["SAFE POSITION BOARD",
position, my_position, board])
return position
else:
# if there is no safe position, then
return (-1,-1)
def _find_safe_directions(self, board, my_position, unsafe_directions, bombs, enemies):
def is_stuck_direction(next_position, bomb_range, next_board, enemies):
Q = queue.PriorityQueue()
Q.put((0, next_position))
seen = set()
nx, ny = next_position
is_stuck = True
while not Q.empty():
dist, position = Q.get()
seen.add(position)
px, py = position
if nx != px and ny != py:
is_stuck = False
break
if dist > bomb_range:
is_stuck = False
break
for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
new_position = (row + px, col + py)
if new_position in seen:
continue
if not utility.position_on_board(next_board, new_position):
continue
if not utility.position_is_passable(next_board, new_position, enemies):
continue
dist = abs(row + px - nx) + abs(col + py - ny)
Q.put((dist, new_position))
return is_stuck
# All directions are unsafe. Return a position that won't leave us locked.
safe = []
if len(unsafe_directions) == 4:
next_board = board.copy()
next_board[my_position] = constants.Item.Bomb.value
for direction, bomb_range in unsafe_directions.items():
next_position = utility.get_next_position(my_position, direction)
nx, ny = next_position
if not utility.position_on_board(next_board, next_position) or \
not utility.position_is_passable(next_board, next_position, enemies):
continue
if not is_stuck_direction(next_position, bomb_range, next_board, enemies):
# We found a direction that works. The .items provided
# a small bit of randomness. So let's go with this one.
return [direction]
if not safe:
safe = [constants.Action.Stop]
return safe
x, y = my_position
disallowed = [] # The directions that will go off the board.
for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
position = (x + row, y + col)
direction = utility.get_direction(my_position, position)
# Don't include any direction that will go off of the board.
if not utility.position_on_board(board, position):
disallowed.append(direction)
continue
# Don't include any direction that we know is unsafe.
if direction in unsafe_directions:
continue
if utility.position_is_passable(board, position, enemies) or utility.position_is_fog(board, position):
safe.append(direction)
if not safe:
# We don't have any safe directions, so return something that is allowed.
safe = [k for k in unsafe_directions if k not in disallowed]
if not safe:
# We don't have ANY directions. So return the stop choice.
return [constants.Action.Stop]
return safe
@staticmethod
def _is_adjacent_enemy(items, dist, enemies):
for enemy in enemies:
for position in items.get(enemy, []):
if dist[position] <= 2:
return True
return False
@staticmethod
#Return the enemy ID on board
def _is_adjacent_enemy_target(items, dist, enemies):
for enemy in enemies:
for position in items.get(enemy, []):
if dist[position] <= 3:
return enemy
return None
@staticmethod
def _has_bomb(obs):
return obs['ammo'] >= 1
#@staticmethod
def _maybe_bomb(self, ammo, blast_strength, items, dist, my_position, board, prev, enemies, bombs, scope="NOTHING"):
"""Returns whether we can safely bomb right now.
Decides this based on:
1. Do we have ammo?
2. If we laid a bomb right now, will we be stuck?
"""
# Do we have ammo?
if ammo < 1:
return False
# if helper_func.count_bomb_in_radius(my_position, bombs, items, 4) >= 3:
# return False
# if self._directions_in_range_of_bomb(board, my_position, bombs, dist, consider_bomb_life=False): #current position connects other bombs
# return False
copy_bombs = copy.deepcopy(self.bombs)
copy_bombs.append({'position': my_position, 'blast_strength': int(blast_strength), 'bomb_life': 10, 'moving_direction': None})
# Will we be stuck?
x, y = my_position
for position in items.get(constants.Item.Passage):
if dist[position] > 5 or utility.position_is_agent(board,position) \
or helper_func._directions_in_range_of_bomb(board, position, copy_bombs, dist, consider_bomb_life=False) \
or helper_func.is_bad_corner(board, my_position, position, items, dist, prev, enemies, distance_to_enemies=3, threshold_wall_count = 3) \
or self.susceptible_to_path_bombing(copy_bombs, my_position, position, dist, radius=4):
continue
# We can reach a passage that's outside of the bomb scope.
px, py = position
if px != x and py != y:
return True
path_bombable = helper_func.path_is_bombable(board, my_position, position, bombs)
if path_bombable:
distance = helper_func.get_manhattan_distance(my_position, position)
else:
distance = dist[position]
# We can reach a passage that's outside of the bomb strength.
if distance > blast_strength:
return True
return False
@classmethod
def _near_wood(cls, my_position, items, dist, prev, radius):
objs = [constants.Item.Wood]
nearest_item_position = cls._nearest_position(dist, objs, items, radius)
return cls._get_direction_towards_position(my_position, nearest_item_position, prev)
def _near_bomb_item(self, my_position, items, dist, prev, radius):
counter = 0
directions = [constants.Action.Up, constants.Action.Down, constants.Action.Left, constants.Action.Right]
for d in directions:
new_pos = utility.get_next_position(my_position, d)
if utility.position_on_board(self.board, new_pos) and\
self.board[new_pos] == constants.Item.Bomb.value:
counter += 1
return counter
@staticmethod
def _filter_invalid_directions(board, my_position, directions, enemies):
ret = []
for direction in directions:
position = utility.get_next_position(my_position, direction)
if utility.position_on_board(board, position) and utility.position_is_passable(board, position, enemies):# and not helper_func.position_is_skull(board, position):
ret.append(direction)
return ret
@classmethod
def _filter_unsafe_directions(self, board, my_position, directions, bombs, items, dist, prev, enemies):
ret = []
bad_corner_surving_direction = []
for direction in directions:
if not utility.is_valid_direction(board, my_position, direction):
continue
x, y = utility.get_next_position(my_position, direction)
is_bad = False
unsafe_directions = helper_func._directions_in_range_of_bomb(board, (x,y), bombs, dist)
is_bad_corner = helper_func.is_bad_corner(board, my_position, (x,y), items, dist, prev, enemies, distance_to_enemies=-1, threshold_wall_count = 4)
if len(unsafe_directions) != 0:
is_bad = True
if board[x,y] == constants.Item.Flames.value:
is_bad = True
if is_bad_corner and not is_bad:
is_bad = True
bad_corner_surving_direction.append(direction)
if not is_bad:
ret.append(direction)
if not ret:
return bad_corner_surving_direction
else:
return ret
@staticmethod
def _filter_recently_visited(directions, my_position, recently_visited_positions):
ret = []
for direction in directions:
if not utility.get_next_position(my_position, direction) in recently_visited_positions:
ret.append(direction)
if not ret:
ret = directions
return ret
def update_distance_to_items(self, items, dist, prev, board, enemies):
| |
from .canvas import Canvas, Point, Line
from .command import BaseCommand, PenCommand, MoveCommand, ClearCommand, ColorCommand
from .parser import Parser
class Drawer(Parser):
"""
Abstraction for processing a btye stream and generating draw lines, commands and pen up/down points
on a canvas.
"""
default_canvas = Canvas(-8192, 8191, -8192, 8191)
def __init__(self, arg_stream=None, draw_file=None, canvas=None):
"""
:param arg_stream: str: raw un-decoded op codes
:param draw_file: str: file name to process a byte stream from
:param canvas: Canvas: support configurable Canvas
"""
super(Drawer, self).__init__()
self.commands = list()
self.draw_lines = list()
self.pen_down_points = list()
self.pen_up_points = list()
# naive assumptions
self.current_point = None
self.was_drawing = False
self.drawer_out_of_bounds = False
self.pen_down = False
if canvas:
self.canvas = canvas
else:
self.canvas = Drawer.default_canvas
for border in self.canvas.borders:
self.draw_lines.append(border)
self.color = self.canvas.default_color
if arg_stream:
self.input_steam = arg_stream
else:
with open(draw_file, "r") as file:
# here would be another place to run validation on the input_file e.g. /n's
self.input_steam = file.readline()
def validate_parameters(self):
if self.input_steam is None:
raise RuntimeError("invalid input stream for Drawer")
def parse(self):
self._decode_input_stream()
self.result = [command.raw_command for command in self.commands]
def display(self):
for command in self.result:
print(command)
def _out_of_bounds(self, point):
return not self.canvas.contains_point(point)
def _decode_input_stream(self):
"""
Start of op code processing. determind the command code were dealing with then delegate
"""
self.raw_op_codes = list(self._get_op_codes())
self.current_op_code_pointer = 0
while self.current_op_code_pointer < len(self.raw_op_codes):
next_op_code = self.raw_op_codes[self.current_op_code_pointer]
if next_op_code == "F0":
self._handle_clear_command()
elif next_op_code == "A0":
self._handle_color_command()
elif next_op_code == "80":
self._handle_pen_command()
elif next_op_code == "C0":
self._handle_move_command()
else:
# unrecognized command, ignore
self.current_op_code_pointer = self.current_op_code_pointer + 1
def _get_op_codes(self):
"""
Yield op codes from the given input stream
"""
while self.input_steam:
yield self.input_steam[:2]
self.input_steam = self.input_steam[2:]
def _handle_clear_command(self):
"""
Handle a clear command
- Create a clear command instance and append its raw command value
- Update this drawers globals for subsequent commands
"""
clear_command = ClearCommand()
self.commands.append(clear_command)
self.current_op_code_pointer = (
self.current_op_code_pointer + clear_command.current_point_offset
)
self.current_color = [0, 0, 0, 225]
self.current_point = self.canvas.center_point
self.pen_down = False
def _handle_color_command(self):
"""
Handle a pen command.
- Grab the next eight op codes
- Decode into a rgba color and update this drawers color
- Update this drawers globals for subsequent commands
"""
r_bytes = [
self.raw_op_codes[self.current_op_code_pointer + 1],
self.raw_op_codes[self.current_op_code_pointer + 2],
]
g_bytes = [
self.raw_op_codes[self.current_op_code_pointer + 3],
self.raw_op_codes[self.current_op_code_pointer + 4],
]
b_bytes = [
self.raw_op_codes[self.current_op_code_pointer + 5],
self.raw_op_codes[self.current_op_code_pointer + 6],
]
a_bytes = [
self.raw_op_codes[self.current_op_code_pointer + 7],
self.raw_op_codes[self.current_op_code_pointer + 8],
]
color_command = ColorCommand(r_bytes, g_bytes, b_bytes, a_bytes)
self.color = color_command.color
self.commands.append(color_command)
self.current_op_code_pointer = (
self.current_op_code_pointer + color_command.current_point_offset
)
def _handle_pen_command(self):
"""
Handle a pen command.
- Grab the next two op codes
- Decode into either pen up or pen down command
- Update this drawers globals for subsequent commands
"""
pen_bytes = [
self.raw_op_codes[self.current_op_code_pointer + 1],
self.raw_op_codes[self.current_op_code_pointer + 2],
]
pen_command = PenCommand(pen_bytes)
if self.drawer_out_of_bounds and pen_command.is_down:
raise ValueError(
"Invalid Drawer Command: Cannot PEN DOWN while drawer is off the canvas."
)
if not self.current_point and pen_command.is_down:
raise ValueError(
"Invalid Drawer Command: Cannot PEN DOWN before setting an initial point."
)
self.commands.append(pen_command)
self.current_op_code_pointer = (
self.current_op_code_pointer + pen_command.current_point_offset
)
self.pen_down = pen_command.is_down
if self.pen_down:
self.pen_down_points.append(self.current_point)
else:
self.pen_up_points.append(self.current_point)
def _handle_move_command(self):
"""
Handle a pen command.
- Build a list of move points based off the next 4 op code bytes for each loop
- Handle cases for termination
- run subroutine to update self.commands based off new points list
"""
new_points = list()
if self.current_op_code_pointer + 1 < len(self.raw_op_codes):
# get number of parameters
#
move_pointer = self.current_op_code_pointer
orginal_current_point = self.current_point
# determine if we keep processing coordinate bytes based off command ops of endof byte stream
next_move_op = self.raw_op_codes[move_pointer + 1]
while next_move_op not in ["F0", "A0", "80"] and move_pointer + 1 < len(
self.raw_op_codes
):
# decode the coordinate and append to moves in this run
x_axis_bytes = [
self.raw_op_codes[move_pointer + 1],
self.raw_op_codes[move_pointer + 2],
]
y_axis_bytes = [
self.raw_op_codes[move_pointer + 3],
self.raw_op_codes[move_pointer + 4],
]
new_point = Point(
BaseCommand.decode_bytes(x_axis_bytes[0], x_axis_bytes[1]),
BaseCommand.decode_bytes(y_axis_bytes[0], y_axis_bytes[1]),
)
new_point.x = int(new_point.x + self.current_point.x)
new_point.y = int(new_point.y + self.current_point.y)
new_points.append(new_point)
self.current_point = new_point
# determine if we have a terminating case for this move command or update pointers
# Note: this is major assumption not defined, after any move to center is a single parameter move
# command. This is the only way i could get around ignoring bad parameters e.g. Blue Square
if move_pointer + 5 == len(self.raw_op_codes) or (
new_point.x == 0 and new_point.y == 0
):
break
else:
move_pointer = move_pointer + 4
next_move_op = self.raw_op_codes[move_pointer + 1]
# set pointers for build subroutine
self.current_point = orginal_current_point
self.current_op_code_pointer = (
self.current_op_code_pointer + (4 * len(new_points)) + 1
)
self._build_move_command(new_points)
def _build_move_command(self, new_points):
"""
Determine if we have to handle out of bound cases and make sub commands where needed.
We assume if the pen is down that a move command has set this.current_point
:param new_points: [Point]: list of move points for this move command. May contain points out of bounds
"""
valid_moves_points = list()
for next_point in new_points:
if self.pen_down:
# we can assert that the pen will not be down and out of bounds at the same time
if not self._out_of_bounds(self.current_point) and self._out_of_bounds(
next_point
):
# in bounds going out
# execute mv points to edge point
# execute pen up
# mark out of bounds
edge_point = self._build_edge_point(
inner_point=self.current_point, outer_point=next_point
)
valid_moves_points.append(edge_point)
move_command = MoveCommand(points=valid_moves_points)
self.commands.append(move_command)
pen_up_command = PenCommand(["40", "00"]) # zero for pen down
self.pen_down = False
self.commands.append(pen_up_command)
new_line = Line(self.current_point, edge_point, self.color)
self.draw_lines.append(new_line)
self.pen_up_points.append(edge_point)
self.drawer_out_of_bounds = True
self.was_drawing = True
self.current_point = next_point
# reset valid_move_points
valid_moves_points = list()
elif not self._out_of_bounds(
self.current_point
) and not self._out_of_bounds(next_point):
# normal case make a new line
valid_moves_points.append(next_point)
new_line = Line(self.current_point, next_point, self.color)
self.draw_lines.append(new_line)
self.current_point = next_point
else:
if self.drawer_out_of_bounds:
# handle the case where multiple moves out of bounds are being done
# meaning we wont update valid moves until we re-enter the canvas boundaries,
# but still update the current pointer
if (
self._out_of_bounds(self.current_point)
and not self._out_of_bounds(next_point)
and self.was_drawing
):
# out of bounds coming in
# execute mv points to this edge point
# execute pen down
# mark in bounds
# move to next point
edge_point = self._build_edge_point(
inner_point=next_point, outer_point=self.current_point
)
valid_moves_points.append(edge_point)
move_command = MoveCommand(points=valid_moves_points)
self.commands.append(move_command)
pen_down_command = PenCommand(
["40", "01"]
) # non-zero for pen down
self.pen_down = True
self.commands.append(pen_down_command)
new_line = Line(next_point, edge_point, self.color)
self.draw_lines.append(new_line)
self.pen_down_points.append(edge_point)
self.drawer_out_of_bounds = False
self.was_drawing = False
self.current_point = next_point
# reset valid_move_points
valid_moves_points = [next_point]
elif self._out_of_bounds(
self.current_point
) and self._out_of_bounds(next_point):
# still out of bounds move next_point
self.current_point = next_point
else:
# normal case make a new line
valid_moves_points.append(next_point)
self.current_point = next_point
# add final move command if any moves
if valid_moves_points:
move_command = MoveCommand(points=valid_moves_points)
self.commands.append(move_command)
def _build_edge_point(self, inner_point, outer_point):
"""
Determine the edge point where this line crosses the drawer's canvas border
Approach:
- Get the slope and y-intercept of the line from the given points via:
m = (y2 - y1)/(x2 - x1)
b = y - mx
- Now that we now we have a line equation y = mx + b, based off the relativity of the points, we can
assert an x/y axis via:
y = mx + b
x = (y - b) / m
or the y-intercept is zero meaning we cross the border at a corner
- Handle all 8 edge cases where {A, B, C, D} represent a quadrant side and { E, F, G, H } are corners
H D E
- - - - -
| | |
| | |
C - - - - - A
| | |
| | |
- - - - -
G B F
Note: we must also account for when the slope is zero when finding for x axis
:param inner_point: Point: point for this line that is within | |
<reponame>hamolicious/Red-Dot<gh_stars>1-10
from dearpygui.core import *
from dearpygui.simple import *
import cv2
import json
import os
import sys
from time import time, gmtime
from threading import Thread
from math import sqrt
def print_stamped(text):
time_stamp = gmtime(time())
log_info(text, logger='reportLogger')
class Camera:
def __init__(self, app):
self.app = app
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.set_resolution(get_value('tupleprevRes'))
self.validate_open()
self.frame_count = 0
self.last_stamp = -1
self.update_search_settings()
self.cwd = ''
def validate_open(self):
if not self.cap.isOpened():
self.app.create_camera_error('Camera already in use', 'Make sure the camera is not being used by any other programs')
self.cap.release()
def update_search_settings(self):
self.search_color = get_value('sampledColor')
self.search_pos_x = int(get_value('xPos') * get_value('tupleprevRes')[0])
self.search_pos_y = int(get_value('yPos') * get_value('tupleprevRes')[1])
def set_resolution(self, res):
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, res[0])
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, res[1])
def reset_frame_count(self):
self.frame_count = 0
def name_frame(self):
frame_name = get_value('txt_frameName')
n_zeros = frame_name.count('#')
current_count = str(self.frame_count)
if n_zeros != 0:
d_zeros = n_zeros - len(current_count)
if d_zeros >= 0:
num = ('0'*d_zeros) + current_count
else:
num = current_count
name, ext = frame_name.split('.')
name = name.replace('#'*n_zeros, num)
frame_name = name + '.' + ext
else:
name, ext = frame_name.split('.')
frame_name = name + current_count + '.' + ext
return frame_name
def capture_frame(self):
if not self.cap.isOpened():
return
self.set_resolution(get_value('tupleimgRes'))
_, frame = self.cap.read()
self.set_resolution(get_value('tupleprevRes'))
filepath = os.path.join(self.cwd, self.name_frame())
cv2.imwrite(filepath, frame)
self.frame_count += 1
print_stamped(f'Saved: {filepath}')
def get_image(self):
"""
saves a still frame from the web cam
"""
if not self.cap.isOpened():
return
result, frame = self.cap.read()
self.set_resolution(get_value('tupleprevRes'))
cv2.imwrite('save_files/out.jpg', frame)
def get_current_color(self, frame):
return [int(i) for i in frame[int(self.search_pos_y)][int(self.search_pos_x)][::-1]]
def get_change(self, current_color):
return int(sum((abs(current_color[0] - self.search_color[0]), abs(current_color[1] - self.search_color[1]), abs(current_color[2] - self.search_color[2]))))
def create_dir(self):
path = get_value('txt_timelapseSavePath')
if not os.path.exists(path):
os.mkdir(path)
self.cwd = os.path.join(path, f'Timelapse-{int(time())}/')
os.mkdir(self.cwd)
def hms_to_sec(self, hhmmss):
hrs, min, sec = hhmmss
return (hrs * 60 * 60) + (min * 60) + sec
def red_dot(self):
new_thread = Thread(target=self.red_dot_worker)
new_thread.daemon = True
new_thread.start()
def red_dot_worker(self):
self.create_dir()
frame_lock = False
await_time = -1
while True:
_, frame = self.cap.read()
current_color = self.get_current_color(frame)
change = self.get_change(current_color)
cv2.imshow('Red-Dot Timelapse', frame)
if change < get_value('sensitivity')**2:
if not frame_lock:
print_stamped('Starting to take picture')
await_time = time() + get_value('pictureDelay')
self.search_color = current_color.copy()
frame_lock = True
else:
frame_lock = False
if await_time != -1 and time() > await_time:
self.capture_frame()
await_time = -1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
def viewer(self):
new_thread = Thread(target=self.viewer_worker)
new_thread.daemon = True
new_thread.start()
def viewer_worker(self):
start_text_y = 20
self.text_y = start_text_y
d_text_y = 30
font_scale = 0.7
def put_text(text, frame):
frame = cv2.putText(frame, text, (10, self.text_y), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (0, 0, 255), 2)
self.text_y += d_text_y
return frame
width, height = get_value('tupleprevRes')
while True:
_, frame = self.cap.read()
current_color = self.get_current_color(frame)
change = self.get_change(current_color)
cx = int(width / 2)
cy = int(height / 2)
cs = 50
frame = cv2.line(frame, (cx - cs, cy), (cx + cs, cy), (0, 0, 0), 1)
frame = cv2.line(frame, (cx, cy - cs), (cx, cy + cs), (0, 0, 0), 1)
self.text_y = start_text_y
frame = put_text(f'Change: {int(sqrt(change))} | Sensitivity: {get_value("sensitivity")}', frame)
frame = put_text(f'Current Col: {current_color}', frame)
frame = put_text(f'Sample Col : {self.search_color}', frame)
frame = cv2.circle(frame, (self.search_pos_x, self.search_pos_y), 5, (0, 0, 0), 2)
cv2.imshow('Viewer', frame)
# await a Q press
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
def timed(self):
new_thread = Thread(target=self.timed_worker)
new_thread.daemon = True
new_thread.start()
def timed_worker(self):
total_seconds_elapse = self.hms_to_sec(get_value('totalPrintTime'))
frame_await = self.hms_to_sec(get_value('timeBetweenFrames'))
end_time = time() + total_seconds_elapse
next_frame = time() + frame_await
self.create_dir()
while True:
# capture a frame
_, frame = self.cap.read()
# display frame
cv2.imshow('Timed Timelapse', frame)
if time() > next_frame:
print_stamped('Starting to capture frame')
self.capture_frame()
next_frame = time() + frame_await
if time() > end_time:
break
# await a Q press
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
class App:
def __init__(self):
self.main_menu_btn_width = 300
self.win_pos = (350, 50)
self.default_timelapse_path = 'Timelapses/'
self.default_frame_name = 'frame-####.png'
self.create_default_paths()
self.create_defaults()
self.load_settings()
set_theme(get_value('txt_theme'))
self.camera = Camera(self)
self.camera.create_dir()
self.create_main_menu()
self.create_timelapses_win()
self.create_red_dot_settings_win()
self.create_timed_settings_win()
self.create_path_settings_win()
self.create_theme_win()
self.create_logger_win()
self.create_preview()
def start(self):
set_main_window_size(1000, 600)
set_main_window_title('Red-Dot')
start_dearpygui(primary_window='win_mainMenu')
#region SETTINGS
def create_defaults(self):
"""
create space in memory for settings and asign default values
"""
add_value('sampledColor', [0, 0, 0])
add_value('xPos', .5)
add_value('yPos', .5)
add_value('sensitivity', 10)
add_value('pictureDelay', 1.0)
add_value('tupleprevRes', [640, 480])
add_value('tupleimgRes', [1280, 720])
add_value('timeBetweenFrames', [0,0,0])
add_value('totalPrintTime', [0,0,0])
add_value('calculatedInfo', '')
add_value('txt_timelapseSavePath', self.default_timelapse_path)
add_value('txt_frameName', self.default_frame_name)
add_value('txt_theme', 'Default')
def load_settings(self):
"""
loads the settings file (if exists) else creates the file and sets default values
"""
# check if save files dir exists
if not os.path.exists('save_files'):
os.mkdir('save_files')
# see if file exists
if os.path.exists('save_files/settings.json'):
# if file exists load it
settings = json.load(open('save_files/settings.json', 'r'))
else:
# else create the file and fill it with default settings
json.dump({}, open('save_files/settings.json', 'w'))
self.save_settings()
return
# if file loaded, fill out the memory with settings
set_value('xPos', settings.get('searchPos')[0])
set_value('yPos', settings.get('searchPos')[1])
set_value('sampledColor', settings.get('sampledColor'))
set_value('sensitivity', settings.get('sensitivity'))
set_value('pictureDelay', settings.get('pictureDelay'))
set_value('tupleprevRes', settings.get('tupleprevRes'))
set_value('tupleimgRes', settings.get('tupleimgRes'))
set_value('txt_timelapseSavePath', settings.get('txt_timelapseSavePath'))
set_value('txt_frameName', settings.get('txt_frameName'))
set_value('txt_theme', settings.get('txt_theme'))
def save_settings(self):
"""
saves settings to a json file
"""
self.check_paths()
try:
self.camera.update_search_settings()
except AttributeError:
pass
settings = {
"searchPos": [get_value('xPos'), get_value('yPos')],
"sampledColor": get_value('sampledColor'),
"sensitivity": get_value('sensitivity'),
"pictureDelay": get_value('pictureDelay'),
"tupleprevRes": get_value('tupleprevRes'),
"tupleimgRes": get_value('tupleimgRes'),
"txt_timelapseSavePath": get_value('txt_timelapseSavePath'),
"txt_frameName": get_value('txt_frameName'),
"txt_theme": get_value('txt_theme'),
}
json.dump(settings, open('save_files/settings.json', 'w'))
self.unsaved_changes = False
#endregion
#region WINDOWS
def create_main_menu(self):
show_timelapses_win = lambda : show_item('win_timelapses')
show_rd_win = lambda : show_item('win_redDotSettings')
show_ttl_win = lambda : show_item('win_ttlSettings')
show_path_win = lambda : show_item('win_pathSettings')
show_theme_win = lambda : show_item('win_themes')
show_logger_win = lambda : show_item('win_logger')
with window('win_mainMenu', label='Main Menu', x_pos=self.win_pos[0], y_pos=self.win_pos[1]):
add_button('showTimelapsesWin', label='Timelapses', width=self.main_menu_btn_width, callback=show_timelapses_win, tip='Show the Timelapses window')
add_button('showRDWin', label='Red Dot Settings', width=self.main_menu_btn_width, callback=show_rd_win, tip='Show the Red-Dot settings window')
add_button('showTTLWin', label='Timed Time-lapse Settings', width=self.main_menu_btn_width, callback=show_ttl_win, tip='Show the Timed Timelapse settings window')
add_button('showLoggerWin', label='Logger', width=self.main_menu_btn_width, callback=show_logger_win, tip='Show the logger')
add_button('showPathsWin', label='Path Settings', width=self.main_menu_btn_width, callback=show_path_win, tip='Show the path settings window')
add_button('showThemesWin', label='Themes', width=self.main_menu_btn_width, callback=show_theme_win, tip='Show the theme selection window')
add_dummy(height=50)
add_button('saveButton', label='Save Settings', width=self.main_menu_btn_width, callback=self.save_settings, tip='Save all current settings')
add_dummy(height=200)
add_button('exitButton', label='Exit', width=self.main_menu_btn_width, callback=self.kill, tip='Perform a clean exit without saving (cleans up some temporary files)')
def create_timelapses_win(self):
with window('win_timelapses', label='Timelapses', autosize=True, show=False, x_pos=self.win_pos[0], y_pos=self.win_pos[1]):
"""
timelapse settings
"""
add_button('startViewer', label='Start Viewer', callback=self.camera.viewer, tip='Start the viewer, used to calibrate sensitivity')
add_button('startRedDot', label='Start Red Dot', callback=self.camera.red_dot, tip='Start the red-dot searching timelapse with current settings')
add_button('startTimed', label='Start Timed Time-Lapse', callback=self.camera.timed, tip='Start the Timed time-lapse with current settings')
add_input_int2('prevRes', label='Preview Resolution', source='tupleprevRes', tip='Preview resolution, keep this value very low for best results')
add_input_int2('imgRes', label='Image Resolution', source='tupleimgRes', tip='Image resolution, if your output is black, it\'s possible your camera does not\nsupport the resolution')
def create_red_dot_settings_win(self):
show_imgprev_win = lambda : show_item('imgPreview')
with window('win_redDotSettings', label='Red Dot Settings', show=False, x_pos=self.win_pos[0], y_pos=self.win_pos[1], width=450, height=250, no_resize=True):
"""
recognition settings
"""
add_button('btn_showPrev', label='Show Preview', callback=show_imgprev_win, tip='Display a preview of the web cam image with the position of the search pixel overlaid')
add_input_float('xFinder', label='X Search Position', max_value=1, max_clamped=True, min_clamped=True, callback=self.update_circle_pos, source='xPos', step=0.001, tip='X position of the search pixel')
add_input_float('yFinder', label='Y Search Position', max_value=1, max_clamped=True, min_clamped=True, callback=self.update_circle_pos, source='yPos', step=0.001, tip='Y position of the search pixel')
add_input_int('sensInp', label='Sensitivity', source='sensitivity', min_value=0, max_value=255, min_clamped=True, max_clamped=True, tip='The sensitivity of the search, decrease value if you are getting false positives\nand increase if software does not recognise the dot')
add_input_float('picDelay', label='Capture Delay', source='pictureDelay',min_value=0, max_value=5, min_clamped=True, max_clamped=True, tip='The delay (in seconds) between dot recognition and capture of frame, increase if timelapses are jittery')
add_button('btn_sampleCol', label='Sample Color', callback=self.sample_color, tip='Get the colour to search for')
add_child('colourDisplayHolder', border=False)
add_drawing('colourDisplayer', width=50, height=50)
draw_rectangle('colourDisplayer', [0, 0], [50, 50], get_value('sampledColor'), fill=get_value('sampledColor'), tag='rect##dynamic')
text = 'Not Sampled' if get_value('sampledColor') == [0, 0, 0] else str(get_value('sampledColor'))
add_text('colourDisplayText', default_value=text,parent='colourDisplayHolder')
end()
def create_timed_settings_win(self):
with window('win_ttlSettings', label='Timed Time-lapse Settings', autosize=True, show=False, x_pos=self.win_pos[0], y_pos=self.win_pos[1]):
"""
timed time-lapse settings
"""
add_input_int3('printTime', label='Total Print Time', source='totalPrintTime', callback=self.calculate_per_print, tip='How long your print will take (roughly is okay),\nthis value will be used to exit the timelapse after the time has expired HH:MM:SS')
add_input_int3('timeBetweenFrames', label='Time Between Frames', source='timeBetweenFrames', callback=self.calculate_per_print, tip='Time to wait between each frame HH:MM:SS')
add_text('timelapseInfoText', default_value='', source='calculatedInfo')
def create_path_settings_win(self):
with window('win_pathSettings', label='Path Settings', autosize=True, show=False, x_pos=self.win_pos[0], y_pos=self.win_pos[1]):
add_input_text('settingsSavePathInp', label='Timelapse Frames Save Path', source='txt_timelapseSavePath', tip='The directory where the generated timelapse frames will be saved to')
add_input_text('frameNamingConventionInp', label='Frame Names', source='txt_frameName', tip='The name of frames, use # to substitute a numbering system\nfor example: "frame-####.png" will be saved as "frame-0000.png"')
def create_theme_win(self):
def | |
st))
def select_mother_EBV_top(self, cat, st): # tukaj je trenutna categorija, saj jih doloačs koec generacije
selRow = list(
self.ped.loc[(self.ped.cat == cat), 'EBV'].sort_values(ascending=False)[:st].index) # katere izbereš
return list(self.ped.loc[selRow, 'Indiv'])
def write_ped(self, path):
pedNB = self.ped[self.ped.Generation == max(self.gens())]
pedNB.to_csv(path, columns=['Indiv', 'Father', 'Mother'], quoting=None, index=False, header=False)
def write_pedTotal(self, path):
self.ped.to_csv(path, quoting=None, index=False, header=True)
def UpdateIndCat(self, Dir):
if not os.path.isfile(Dir + '/IndCat.csv'):
self.IndCat = pd.DataFrame()
self.IndCat['Indiv'] = self.ped.Indiv
self.IndCat['cat0'] = self.ped.cat
else:
self.OldIndCat = pd.read_csv(Dir + '/IndCat.csv')
self.NewIndCat = pd.DataFrame()
self.NewIndCat['Indiv'] = self.ped.Indiv
self.NewIndCat['cat' + str(max(self.gens()))] = self.ped.cat
self.IndCat = pd.merge(self.OldIndCat, self.NewIndCat, on='Indiv', how='outer')
self.IndCat.to_csv(Dir + '/IndCat.csv', index=None)
def select_age_0_1(self, categories, nrFn, nrMn, telFn, vhlevljenin, potomciNPn,
telMn): # tukaj odbereš iz novorojenih živali tel, ptel in mlade bike, pripust1
# FEMALES
self.set_cat_sex_old("F", "potomciNP", "telF", categories)
izlF = nrFn - telFn # koliko jih izločiš
self.izloci_poEBV("F", izlF, "nr", categories) # tukaj jih izloči, funkcija v modulu
self.izberi_poEBV_top("F", (nrFn - izlF), "nr", "telF", categories) # izberi telice, ki jih osemeniš --> krave
# MALES
self.izberi_poEBV_top("M", vhlevljenin, "potomciNP", "vhlevljeni",
categories) # odberi mlade TO SAMO NA ZAČETKU; POTEM POTOMCI BM IN ELITE!
self.izloci_poEBV("M", int(potomciNPn - vhlevljenin), 'potomciNP', categories)
self.izberi_random("M", telMn, "nr", "telM", categories)
self.izloci_random("M", int(nrMn - telMn), "nr", categories)
def select_age_1_2(self, categories, ptn, mladin, vhlevljenin,
bik12n): # tukaj odbereš nič pri kravah - razen, če so že bikovske matere, pripust 2, bike24
# FEMALES
self.izberi_poEBV_top("F", ptn, 'telF', 'pt', categories)
self.izloci_poEBV("F", (len(categories['telF']) - ptn), 'telF', categories) # terlice postanejo
# MALES
self.izberi_poEBV_top("M", mladin, "vhlevljeni", "mladi", categories) # odberi mlade
self.izberi_poEBV_OdDo("M", mladin, vhlevljenin, "vhlevljeni", "pripust1", categories) # odberi v pripustu
self.izberi_random("M", bik12n, 'telM', 'bik12', categories)
self.izloci_random("M", (len(categories['telM']) - bik12n), 'telM', categories)
def select_age_2_3(self, categories, kraveUp, bmOdbira, bmn, bmUp, cak, pripust1n, pripust2n, mladin):
# FEMALES
# najprej dodaj nove krave
self.set_cat_old('pt', 'k', categories) # osemenjene telice postanejo krave - predpostavimo, da vse
# potem izloči najstarejše krave - po 4. laktaciji
if ('k' in categories.keys()) and ((kraveUp + 2) in self.age()): # izloči koliko laktacij + 2 leti
self.izloci_age_cat((kraveUp + 2), 'k', categories)
# ostale krave prestavi naprej v krave - OZIROMA PODALJŠAJ STATUS!
self.set_cat_age_old(3, 'k', 'k', categories)
self.set_cat_age_old(4, 'k', 'k', categories)
self.set_cat_age_old(5, 'k', 'k', categories)
# če imaš že dovolj stare krave, potem odberi BM
# BM se odbira po drugi laktaciji - to je starost 3 - 4 (starost v pedigreju = 3, ker imaš tudi 0)
if ('k' in categories.keys()) and ((1 + bmOdbira) in self.age()):
self.izberi_poEBV_top_age("F", 3, int(bmn / bmUp), 'k', 'pBM', categories) # izberi bikovske matere
# in izloči najastarejše BM, če jih imaš
if ('bm' in categories.keys()):
self.izloci_cat('bm', categories)
# ostale BM prestavi naprej
for i in range((1 + bmOdbira + 1), (
1 + bmOdbira + bmUp)): # 1 leto prva osemenitev, bm odbrane po 2. laktaciji, +1 da začneš prestavljat
self.set_cat_age_old(i, 'pBM', 'pBM', categories)
self.set_cat_age_old((1 + bmOdbira + bmUp), 'pBM', 'bm',
categories) # spremeni kategorijo iz plemenskih BM v bm v zadnji laktaciji
# MALES
# mladi biki postanejo čakajoči (~1 leto, da se osemeni krave s semenom oz. malo po 2. letu)
self.set_cat_old('mladi', 'cak', categories)
self.set_active_cat('mladi', 2, categories)
# čakajočim bikov podaljšaj status (do starosti 5 let)
# hkrati jim tudi nastavi status izl
# ped.set_cat_age_old(2, 'cak', 'cak', categories)
for i in range((2 + 1), (
2 + cak)): # 1 leto, ko začnejo semenit in so mladi biki, 3 so čakajoči, +1 da začneš prestavlajt
self.set_cat_age_old(i, 'cak', 'cak', categories)
# povprečna doba v pripustu - glede na to odberi bike, ki preživijo še eno leto
if 'pripust1' in categories.keys():
self.izberi_random("M", pripust2n, 'pripust1', 'pripust2', categories)
self.izloci_random("M", (pripust1n - pripust2n), 'pripust1', categories)
# plemenske bike prestavljaj naprej
self.set_cat_old('pb', 'pb', categories)
self.izloci_cat('bik12', categories)
self.izloci_cat('pripust2', categories)
if ('cak' in categories.keys()) and ((cak + 2) in self.age()): # +2 - eno leto so teleta, eno leto mladi biki
self.izberi_poEBV_top_age("M", (cak + 2), int(mladin * 0.5), 'cak', 'pb', categories)
self.set_active_cat('cak', 2,
categories) # tukaj moraš to nastaviti, zato ker fja izberi avtomatsko nastavi na active=1
self.izloci_poEBV_age("M", (cak + 2), int(mladin * 0.5), 'cak',
categories) # TUKAJ MORA BITI ŠE STAROST!!!!!!!!!!!
def doloci_matere(self, stNB, potomciNPn, ptn, kraveUp):
# MATERE
sTbmMother = (potomciNPn * 2) if len(self.catCurrent_indiv('pBM')) >= (potomciNPn * 2) else len(
self.catCurrent_indiv('pBM'))
print sTbmMother
print self.cat()
print self.active()
if sTbmMother != 0:
bmMother = self.select_mother_random('pBM', sTbmMother)
self.set_mother_catPotomca(bmMother, 'potomciNP')
#
if 'k' in self.cat(): # TUKAJ SO DOLOČENE SEDAJ VSE MATERE!!!
mother = self.select_mother_EBV_top('k', int(
round(ptn * kraveUp * 0.7))) # tukaj odberi brez tistih, ki so za gospodarsko križanje
if len(mother) >= (
stNB - sTbmMother): # če že imaš dovolj krav, določi matere vsem novorojenim oz. odbiraš matere, saj jih imaš preveč!
motherOther = random.sample(mother, (stNB - sTbmMother))
self.set_mother_catPotomca(motherOther, 'nr') # TUKAJ SO DOLOČENE SEDAJ VSE MATERE!!!
elif len(mother) < (
stNB - sTbmMother): # če jih še ni dovolj, ne odbiraš mater, ampak uporabiš vse MINUS gosp. križanmja
self.set_mother_catPotomca(mother, 'nr')
def doloci_ocete(self, stNB, potomciNPn, cak, pbUp, pripustDoz, mladiDoz, pozitivnoTestDoz):
# OČETJE
mladiOce = self.catCurrent_indiv('mladi')
pripustOce = self.catCurrent_indiv('pripust1') + self.catCurrent_indiv('pripust2')
testiraniOce = list(chain.from_iterable([self.catCurrent_indiv_age('pb', (2 + cak + x)) for x in range(1,
pbUp + 1)])) # v času, ko določaš potomce, so že eno leto starjši!!!
gentestiraniOce = list(chain.from_iterable([self.catCurrent_indiv_age('gpb', x) for x in range(1,
pbUp + 1)])) # v času, ko določaš potomce, so že eno leto starjši!!!
bmMother = (potomciNPn * 2) if len(self.catCurrent_indiv('pBM')) >= (potomciNPn * 2) else len(
self.catCurrent_indiv('pBM'))
if 'pb' in self.cat():
elita = np.random.choice(testiraniOce, bmMother, replace=True) # navidezna elita
# pd.Series(elita).value_counts()#preveri zastopanost po bikih
# naštimaj očete elite --> BM
self.set_father_catPotomca(elita, 'potomciNP')
ocetje = pripustOce * pripustDoz + testiraniOce * pozitivnoTestDoz + mladiOce * mladiDoz + gentestiraniOce * pozitivnoTestDoz
if len(ocetje) >= (stNB - potomciNPn * 2): # če imaš dovolj DOZ za vse NB
ocetjeNB = random.sample(ocetje, (stNB - potomciNPn * 2)) # tukaj izbereš očete za vse krave - razen BM!
self.set_father_catPotomca(ocetjeNB, 'nr')
if len(ocetje) < (stNB - potomciNPn * 2):
self.set_father_catPotomca(ocetje, 'nr')
def save_cat_DF(self):
categoriesDF = pd.DataFrame.from_dict(self.save_cat(), orient='index').transpose()
categoriesDF.to_csv('Categories_gen' + str(max(self.gens())) + 'DF.csv', index=None)
def save_sex_DF(self):
sexDF = pd.DataFrame.from_dict(self.save_sex(), orient='index').transpose()
sexDF.to_csv('Sex_gen' + str(max(self.gens())) + 'DF.csv', index=None)
def save_active_DF(self):
activeDF = pd.DataFrame.from_dict(self.save_active(), orient='index').transpose()
activeDF.to_csv('Active_gen' + str(max(self.gens())) + 'DF.csv', index=None)
def create_categoriesDict(self, catDFEx):
categories = defaultdict(list)
catDF = pd.read_csv(catDFEx)
for cat in catDF.columns:
values = [int(i) for i in catDF[cat] if not math.isnan(i)]
categories[cat] = values
return categories
def create_sexDict(self, sexDFEx):
sexDict = defaultdict(list)
sexDF = pd.read_csv(sexDFEx)
for sex in sexDF.columns:
values = [int(i) for i in sexDF[sex] if not math.isnan(i)]
sexDict[sex] = values
return sexDict
def create_activeDict(self, activeDFEx):
activeDict = defaultdict(list)
activeDF = pd.read_csv(activeDFEx)
for active in activeDF.columns:
values = [int(i) for i in activeDF[active] if not math.isnan(i)]
activeDict[active] = values
return activeDict
class OrigPed():
def __init__(self, AlphaSimDir):
self.name = AlphaSimDir + '/SimulatedData/PedigreeAndGeneticValues.txt'
self.pdPed = pd.read_table(self.name, sep='\s+')
def computeEBV(self, cor):
# precacunas EBV v Ru in zapises PEDIGRE
shutil.copy("/home/jana/Genotipi/Genotipi_CODES/Rcorr_PedEBV.R", "Rcorr_PedEBV_ThisGen.R")
os.system('sed -i "s|AlphaSimPed|' + self.name + '|g" Rcorr_PedEBV_ThisGen.R')
os.system('sed -i "s|setCor|' + str(cor) + '|g" Rcorr_PedEBV_ThisGen.R')
call('Rscript Rcorr_PedEBV_ThisGen.R', shell=True)
################################################################
# FUNKCIJE
###################################################################
def selekcija_total(pedFile, **kwargs):
print kwargs
ped = pedigree(pedFile)
# tukaj potem pridobi kategorije - če imaš samo eno burn-in, štartaš iz nule
if max(ped.gen) == 1:
ped.set_cat_gen(max(ped.gen), "nr") # to je samo na prvem loopu
ped.set_sex_list([x for x in range(0, ped.rows()) if x % 2 == 0], "F")
ped.set_sex_list([x for x in range(0, ped.rows()) if x % 2 != 0], "M")
ped.izberi_poEBV_top_catCurrent("F", int(kwargs.get('potomciNPn')), 'nr', 'potomciNP')
ped.izberi_poEBV_top_catCurrent("M", int(kwargs.get('potomciNPn')), 'nr', 'potomciNP')
# global categories #to moraš dat global samo v prvenm loopu, drugje dobiš return
categories = ped.save_cat()
# global sex
sex = ped.save_sex()
active = ped.save_active()
ped = pedigree(pedFile)
elif max(ped.gens()) > 1:
categories = ped.create_categoriesDict('Categories_gen' + str(max(ped.gens())) + 'DF.csv')
sex = ped.create_sexDict('Sex_gen' + str(max(ped.gens())) + 'DF.csv')
active = ped.create_activeDict('Active_gen' + str(max(ped.gens())) + 'DF.csv')
ped.set_sex_prevGen(sex) # add sex information for individuals from prevGen
ped.set_active_prevGen(active) # add active information for individuals from prevGen
# remove category information from the ped itself
for i in ped.gens():
ped.set_cat_gen(i, "")
# transfer culled (izlocene) | |
<filename>preprocess.py
from __future__ import print_function
import numpy as np
import pandas as pd
import random
import argparse
import os
import sys
from shutil import copyfile
import pickle as pkl
def command_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest='filename', help='Input file', required=True, type=str)
parser.add_argument('--columns', help='Order of the columns in the file (eg: "uirt"), u for user, i for item, t for timestamp, r for rating. If r is not present a default rating of 1 is given to all interaction. If t is not present interactions are assumed to be in chronological order. Extra columns are ignored. Default: uit', default="uit", type=str)
parser.add_argument('--sep', help='Separator between the column. If unspecified pandas will try to guess the separator', default="\s+", type=str)
parser.add_argument('--min_user_activity', help='Users with less interactions than this will be removed from the dataset. Default: 2', default=2, type=int)
parser.add_argument('--min_item_pop', help='Items with less interactions than this will be removed from the dataset. Default: 5', default=5, type=int)
parser.add_argument('--val_size', help='Number of users to put in the validation set. If in (0,1) it will be interpreted as the fraction of total number of users. Default: 0.1', default=0.1, type=float)
parser.add_argument('--test_size', help='Number of users to put in the test set. If in (0,1) it will be interpreted as the fraction of total number of users. Default: 0.1', default=0.1, type=float)
parser.add_argument('--seed', help='Seed for the random train/val/test split', default=1, type=int)
args = parser.parse_args()
args.dirname = os.path.dirname(os.path.abspath(args.filename)) + "/"
return args
def warn_user(dirname):
'''Ask user if he's sure to create files in that directory.
'''
print('This program will create a lot of files and directories in ' + dirname)
answer = raw_input('Are you sure that you want to do that ? [y/n]')
if answer != "y":
sys.exit(0)
def create_dirs(dirname):
if not os.path.exists(dirname + "data"):
os.makedirs(dirname + "data")
if not os.path.exists(dirname + "models"):
os.makedirs(dirname + "models")
if not os.path.exists(dirname + "results"):
os.makedirs(dirname + "results")
def load_data(filename, columns, separator):
''' Load the data from filename and sort it according to timestamp.
Returns a dataframe with 3 columns: user_id, item_id, rating
'''
print('Load data...')
data = pd.read_csv(filename, sep=separator, names=list(columns), index_col=False, usecols=range(len(columns)))
if 'r' not in columns:
# Add a column of default ratings
data['r'] = 1
if 't' in columns:
# sort according to the timestamp column
if data['t'].dtype == np.int64: # probably a timestamp
data['t'] = pd.to_datetime(data['t'], unit='s')
else:
data['t'] = pd.to_datetime(data['t'])
print('Sort data in chronological order...')
data.sort_values('t', inplace=True)
return data
def load_data_split(folder, columns, separator):
''' Load the data from filename and sort it according to timestamp.
Returns a dataframe with 3 columns: user_id, item_id, rating
'''
print('Load data...')
with open (folder + '/traindata_small.pkl' , 'rb' ) as f:
pkl_data = pkl.load(f)
traindata = pkl_data['graph']
with open (folder + '/testdata_small.pkl' , 'rb' ) as f:
pkl_data = pkl.load(f)
testdata = pkl_data['graph']
traindata = pd.DataFrame(np.array(traindata), columns = list(columns) )
testdata = pd.DataFrame(np.array(testdata), columns = list(columns) )
#data = pd.read_csv(filename, sep=separator, names=list(columns), index_col=False, usecols=range(len(columns)))
if 'r' not in columns:
# Add a column of default ratings
traindata['r'] = 1
testdata['r'] = 1
if 't' in columns:
# sort according to the timestamp column
if traindata['t'].dtype == np.int64: # probably a timestamp
traindata['t'] = pd.to_datetime(traindata['t'], unit='s')
else:
traindata['t'] = pd.to_datetime(traindata['t'])
print('Sort data in chronological order...')
traindata.sort_values('t', inplace=True)
if testdata['t'].dtype == np.int64: # probably a timestamp
testdata['t'] = pd.to_datetime(testdata['t'], unit='s')
else:
testdata['t'] = pd.to_datetime(testdata['t'])
print('Sort data in chronological order...')
testdata.sort_values('t', inplace=True)
return traindata,testdata
def remove_rare_elements(data, min_user_activity, min_item_popularity):
'''Removes user and items that appears in too few interactions.
min_user_activity is the minimum number of interaction that a user should have.
min_item_popularity is the minimum number of interaction that an item should have.
NB: the constraint on item might not be strictly satisfied because rare users and items are removed in alternance,
and the last removal of inactive users might create new rare items.
'''
print('Remove inactive users and rare items...')
#Remove inactive users a first time
user_activity = data.groupby('u').size()
data = data[np.in1d(data.u, user_activity[user_activity >= min_user_activity].index)]
#Remove unpopular items
item_popularity = data.groupby('i').size()
data = data[np.in1d(data.i, item_popularity[item_popularity >= min_item_popularity].index)]
#Remove users that might have passed below the activity threshold due to the removal of rare items
user_activity = data.groupby('u').size()
data = data[np.in1d(data.u, user_activity[user_activity >= min_user_activity].index)]
return data
def save_index_mapping(data, separator, dirname):
''' Save the mapping of original user and item ids to numerical consecutive ids in dirname.
NB: some users and items might have been removed in previous steps and will therefore not appear in the mapping.
'''
separator = "\t"
# Pandas categorical type will create the numerical ids we want
print('Map original users and items ids to consecutive numerical ids...')
data['u_original'] = data['u'].astype('category')
data['i_original'] = data['i'].astype('category')
data['u'] = data['u_original'].cat.codes
data['i'] = data['i_original'].cat.codes
print('Save ids mapping to file...')
user_mapping = pd.DataFrame({'original_id' : data['u_original'], 'new_id': data['u']})
user_mapping.sort_values('original_id', inplace=True)
user_mapping.drop_duplicates(subset='original_id', inplace=True)
user_mapping.to_csv(dirname+"data/user_id_mapping", sep=separator, index=False)
item_mapping = pd.DataFrame({'original_id' : data['i_original'], 'new_id': data['i']})
item_mapping.sort_values('original_id', inplace=True)
item_mapping.drop_duplicates(subset='original_id', inplace=True)
item_mapping.to_csv(dirname+"data/item_id_mapping", sep=separator, index=False)
return data
def split_data(data, nb_val_users, nb_test_users, dirname):
'''Splits the data set into training, validation and test sets.
Each user is in one and only one set.
nb_val_users is the number of users to put in the validation set.
nb_test_users is the number of users to put in the test set.
'''
nb_users = data['u'].nunique()
# check if nb_val_user is specified as a fraction
if nb_val_users < 1:
nb_val_users = round(nb_val_users * nb_users)
if nb_test_users < 1:
nb_test_users = round(nb_test_users * nb_users)
nb_test_users = int(nb_test_users)
nb_val_users = int(nb_val_users)
if nb_users <= nb_val_users+nb_test_users:
raise ValueError('Not enough users in the dataset: choose less users for validation and test splits')
def extract_n_users(df, n):
users_ids = np.random.choice(df['u'].unique(), n)
n_set = df[df['u'].isin(users_ids)]
remain_set = df.drop(n_set.index)
return n_set, remain_set
print('Split data into training, validation and test sets...')
test_set, tmp_set = extract_n_users(data, nb_test_users)
val_set, train_set = extract_n_users(tmp_set, nb_val_users)
print('Save training, validation and test sets in the triplets format...')
train_set.to_csv(dirname + "data/train_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
val_set.to_csv(dirname + "data/val_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
test_set.to_csv(dirname + "data/test_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
return train_set, val_set, test_set
def split_data_timeline(traindata, testdata, dirname):
'''Splits the data set into training, validation and test sets.
Each user is in one and only one set.
nb_val_users is the number of users to put in the validation set.
nb_test_users is the number of users to put in the test set.
'''
test_set = testdata
counts = traindata['u'].map(traindata.groupby('u').apply(len))
ranks = traindata.groupby('u')['i'].rank(method='first')
mask = (ranks / counts) > 0.8
val_set = traindata[mask]
train_set = traindata[~mask]
print('Save training, validation and test sets in the triplets format...')
train_set.to_csv(dirname + "data/train_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
val_set.to_csv(dirname + "data/val_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
test_set.to_csv(dirname + "data/test_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
return train_set, val_set, test_set
# def split_data_timeline(data, nb_val_users, nb_test_users, dirname):
# '''Splits the data set into training, validation and test sets.
# Each user is in one and only one set.
# nb_val_users is the number of users to put in the validation set.
# nb_test_users is the number of users to put in the test set.
# '''
# #Create train and test set by 80-20\% split
# counts = data['u'].map(data.groupby('u').apply(len))
# ranks = data.groupby('u')['i'].rank(method='first')
# mask = (ranks / counts) > 0.8
# test_set = data[mask]
# rem_data = data[~mask]
#
# counts = rem_data['u'].map(rem_data.groupby('u').apply(len))
# ranks = rem_data.groupby('u')['i'].rank(method='first')
# mask = (ranks / counts) > 0.8
# val_set = rem_data[mask]
# train_set = rem_data[~mask]
#
# print('Save training, validation and test sets in the triplets format...')
# train_set.to_csv(dirname + "data/train_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
# val_set.to_csv(dirname + "data/val_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
# test_set.to_csv(dirname + "data/test_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
#
# return train_set, val_set, test_set
def gen_sequences(data, half=False):
'''Generates sequences of user actions from data.
each sequence has the format [user_id, first_item_id, first_item_rating, 2nd_item_id, 2nd_item_rating, ...].
If half is True, cut the sequences to half their true length (useful to produce the extended training set).
'''
data = data.sort_values('u', kind="mergesort") # Mergesort is stable and keeps the time ordering
seq = []
prev_id = -1
for u, i, r in zip(data['u'], data['i'], data['r']):
if u != prev_id:
if len(seq) > 3:
if half:
seq = seq[:1+2*int((len(seq) - 1)/4)]
yield seq
prev_id = u
seq = [u]
seq.extend([i,r])
if half:
seq = seq[:1+2*int((len(seq) - 1)/4)]
yield seq
def make_sequence_format(train_set, val_set, test_set, dirname):
'''Convert the train/validation/test sets in the sequence format and save them.
Also create the extended training sequences, which countains the first half of the sequences of users in the validation and test sets.
'''
print('Save the training set in the sequences format...')
with open(dirname+"data/train_set_sequences", "w") as f:
for s in gen_sequences(train_set):
f.write(' '.join(map(str, s)) + "\n")
print('Save the validation set in the sequences format...')
with open(dirname+"data/val_set_sequences", "w") as f:
for s in gen_sequences(val_set):
f.write(' '.join(map(str, s)) + "\n")
print('Save the test set in the sequences format...')
with open(dirname+"data/test_set_sequences", "w") as f:
for s in gen_sequences(test_set):
f.write(' '.join(map(str, s)) + "\n")
# sequences+ contains all the sequences of train_set_sequences plus half the sequences of val and test sets
print('Save the extended training set in the sequences format...')
copyfile(dirname+"data/train_set_sequences", dirname+"data/train_set_sequences+")
with open(dirname+"data/train_set_sequences+", "a") as f:
for s in gen_sequences(val_set, half=True):
f.write(' '.join(map(str, s)) + "\n")
for s in gen_sequences(test_set, half=True):
f.write(' '.join(map(str, s)) + "\n")
def save_data_stats(data, train_set, val_set, test_set, dirname):
print('Save stats...')
def _get_stats(df):
return "\t".join(map(str, [df['u'].nunique(), df['i'].nunique(), len(df.index), df.groupby('u').size().max()]))
with open(dirname+"data/stats", "w") as f:
f.write("set\tn_users\tn_items\tn_interactions\tlongest_sequence\n")
f.write("Full\t"+ _get_stats(data) + "\n")
f.write("Train\t"+ _get_stats(train_set) + "\n")
f.write("Val\t"+ _get_stats(val_set) + "\n")
f.write("Test\t"+ _get_stats(test_set) + | |
<gh_stars>1000+
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import functools
import itertools
import logging
from dashboard.common import timing
from google.appengine.ext import ndb
from google.appengine.ext import db
__all__ = (
'PopulateTaskGraph',
'TaskGraph',
'TaskVertex',
'Dependency',
'Evaluate',
'ExtendTaskGraph',
'UpdateTask',
'AppendTasklog',
)
TaskVertex = collections.namedtuple('TaskVertex',
('id', 'vertex_type', 'payload'))
Dependency = collections.namedtuple('Dependency', ('from_', 'to'))
TaskGraph = collections.namedtuple('TaskGraph', ('vertices', 'edges'))
# These InMemoryTask instances are meant to isolate the Task model which is
# actually persisted in Datastore.
InMemoryTask = collections.namedtuple(
'InMemoryTask', ('id', 'task_type', 'payload', 'status', 'dependencies'))
VALID_TRANSITIONS = {
'pending': {'ongoing', 'completed', 'failed', 'cancelled'},
'ongoing': {'completed', 'failed', 'cancelled'},
'cancelled': {'pending'},
'completed': {'pending'},
'failed': {'pending'},
}
# Traversal states used in the graph traversal. We use these as marks for when
# vertices are traversed, as how we would implement graph colouring in a graph
# traversal (like Depth First Search).
NOT_EVALUATED, CHILDREN_PENDING, EVALUATION_DONE = (0, 1, 2)
ReconstitutedTaskGraph = collections.namedtuple('ReconstitutedTaskGraph',
('terminal_tasks', 'tasks'))
class Error(Exception):
pass
class InvalidAmendment(Error):
pass
class TaskNotFound(Error):
pass
class InvalidTransition(Error):
pass
# These are internal-only models, used as an implementation detail of the
# execution engine.
class Task(ndb.Model):
"""A Task associated with a Pinpoint Job.
Task instances are always associated with a Job. Tasks represent units of work
that are in well-defined states. Updates to Task instances are transactional
and need to be.
"""
task_type = ndb.StringProperty(required=True)
status = ndb.StringProperty(required=True, choices=VALID_TRANSITIONS.keys())
payload = ndb.JsonProperty(compressed=True, indexed=False)
dependencies = ndb.KeyProperty(repeated=True, kind='Task')
created = ndb.DateTimeProperty(required=True, auto_now_add=True)
updated = ndb.DateTimeProperty(required=True, auto_now_add=True)
def ToInMemoryTask(self):
# We isolate the ndb model `Task` from the evaluator, to avoid accidentially
# modifying the state in datastore.
return InMemoryTask(
id=self.key.id(),
task_type=self.task_type,
payload=self.payload,
status=self.status,
dependencies=[dep.id() for dep in self.dependencies])
class TaskLog(ndb.Model):
"""Log entries associated with Task instances.
TaskLog instances are always associated with a Task. These entries are
immutable once created.
"""
timestamp = ndb.DateTimeProperty(
required=True, auto_now_add=True, indexed=False)
message = ndb.TextProperty()
payload = ndb.JsonProperty(compressed=True, indexed=False)
@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, retries=0)
def PopulateTaskGraph(job, graph):
"""Populate the Datastore with Task instances associated with a Job.
The `graph` argument must have two properties: a collection of `TaskVertex`
instances named `vertices` and a collection of `Dependency` instances named
`dependencies`.
"""
if job is None:
raise ValueError('job must not be None.')
job_key = job.key
tasks = {
v.id: Task(
key=ndb.Key(Task, v.id, parent=job_key),
task_type=v.vertex_type,
payload=v.payload,
status='pending') for v in graph.vertices
}
dependencies = set()
for dependency in graph.edges:
dependency_key = ndb.Key(Task, dependency.to, parent=job_key)
if dependency not in dependencies:
tasks[dependency.from_].dependencies.append(dependency_key)
dependencies.add(dependency)
ndb.put_multi(tasks.values(), use_cache=True)
@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, retries=0)
def ExtendTaskGraph(job, vertices, dependencies):
"""Add new vertices and dependency links to the graph.
Args:
job: a dashboard.pinpoint.model.job.Job instance.
vertices: an iterable of TaskVertex instances.
dependencies: an iterable of Dependency instances.
"""
if job is None:
raise ValueError('job must not be None.')
if not vertices and not dependencies:
return
job_key = job.key
amendment_task_graph = {
v.id: Task(
key=ndb.Key(Task, v.id, parent=job_key),
task_type=v.vertex_type,
status='pending',
payload=v.payload) for v in vertices
}
# Ensure that the keys we're adding are not in the graph yet.
current_tasks = Task.query(ancestor=job_key).fetch()
current_task_keys = set(t.key for t in current_tasks)
new_task_keys = set(t.key for t in amendment_task_graph.values())
overlap = new_task_keys & current_task_keys
if overlap:
raise InvalidAmendment('vertices (%r) already in task graph.' % (overlap,))
# Then we add the dependencies.
current_task_graph = {t.key.id(): t for t in current_tasks}
handled_dependencies = set()
update_filter = set(amendment_task_graph)
for dependency in dependencies:
dependency_key = ndb.Key(Task, dependency.to, parent=job_key)
if dependency not in handled_dependencies:
current_task = current_task_graph.get(dependency.from_)
amendment_task = amendment_task_graph.get(dependency.from_)
if current_task is None and amendment_task is None:
raise InvalidAmendment('dependency `from` (%s) not in amended graph.' %
(dependency.from_,))
if current_task:
current_task_graph[dependency.from_].dependencies.append(dependency_key)
if amendment_task:
amendment_task_graph[dependency.from_].dependencies.append(
dependency_key)
handled_dependencies.add(dependency)
update_filter.add(dependency.from_)
ndb.put_multi(
itertools.chain(
amendment_task_graph.values(),
[t for id_, t in current_task_graph.items() if id_ in update_filter]),
use_cache=True)
@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, retries=0)
def UpdateTask(job, task_id, new_state=None, payload=None):
"""Update a task.
This enforces that the status transitions are semantically correct, where only
the transitions defined in the VALID_TRANSITIONS map are allowed.
When either new_state or payload are not None, this function performs the
update transactionally. At least one of `new_state` or `payload` must be
provided in calls to this function.
"""
if new_state is None and payload is None:
raise ValueError('Set one of `new_state` or `payload`.')
if new_state and new_state not in VALID_TRANSITIONS:
raise InvalidTransition('Unknown state: %s' % (new_state,))
task = Task.get_by_id(task_id, parent=job.key)
if not task:
raise TaskNotFound('Task with id "%s" not found for job "%s".' %
(task_id, job.job_id))
if new_state:
valid_transitions = VALID_TRANSITIONS.get(task.status)
if new_state not in valid_transitions:
raise InvalidTransition(
'Attempting transition from "%s" to "%s" not in %s; task = %s' %
(task.status, new_state, valid_transitions, task))
task.status = new_state
if payload:
task.payload = payload
task.put()
def LogStateTransitionFailures(wrapped_action):
"""Decorator to log state transition failures.
This is a convenience decorator to handle state transition failures, and
suppress further exception propagation of the transition failure.
"""
@functools.wraps(wrapped_action)
def ActionWrapper(*args, **kwargs):
try:
return wrapped_action(*args, **kwargs)
except InvalidTransition as e:
logging.error('State transition failed: %s', e)
return None
except db.TransactionFailedError as e:
logging.error('Transaction failed: %s', e)
return None
return ActionWrapper
@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, retries=0)
def AppendTasklog(job, task_id, message, payload):
task_log = TaskLog(
parent=ndb.Key(Task, task_id, parent=job.key),
message=message,
payload=payload)
task_log.put()
@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, retries=0)
def _LoadTaskGraph(job):
with timing.WallTimeLogger('ExecutionEngine:_LoadTaskGraph'):
tasks = Task.query(ancestor=job.key).fetch()
# The way we get the terminal tasks is by looking at tasks where nothing
# depends on them.
has_dependents = set()
for task in tasks:
has_dependents |= set(task.dependencies)
terminal_tasks = [t.key for t in tasks if t.key not in has_dependents]
return ReconstitutedTaskGraph(
terminal_tasks=terminal_tasks, tasks={task.key: task for task in tasks})
class NoopAction(object):
@staticmethod
def __str__():
return 'NoopAction()'
@staticmethod
def __call__(_):
pass
@ndb.non_transactional
@timing.TimeWall('ExecutionEngine:Evaluate')
def Evaluate(job, event, evaluator):
"""Applies an evaluator given a task in the task graph and an event as input.
This function implements a depth-first search traversal of the task graph and
applies the `evaluator` given a task and the event input in post-order
traversal. We start the DFS from the terminal tasks (those that don't have
dependencies) and call the `evaluator` function with a representation of the
task in the graph, an `event` as input, and an accumulator argument.
The `evaluator` must be a callable which accepts three arguments:
- task: an InMemoryTask instance, representing a task in the graph.
- event: an object whose shape/type is defined by the caller of the
`Evaluate` function and that the evaluator can handle.
- accumulator: a dictionary which is mutable which is valid in the scope of
a traversal of the graph.
The `evaluator` must return either None or an iterable of callables which take
a single argument, which is the accumulator at the end of a traversal.
Events are free-form but usually are dictionaries which constitute inputs that
are external to the task graph evaluation. This could model events in an
event-driven evaluation of tasks, or synthetic inputs to the system. It is
more important that the `event` information is known to the evaluator
implementation, and is provided as-is to the evaluator in this function.
The Evaluate function will keep iterating while there are actions still being
produced by the evaluator. When there are no more actions to run, the Evaluate
function will return the most recent traversal's accumulator.
"""
if job is None:
raise ValueError('job must not be None.')
accumulator = {}
actions = [NoopAction()]
while actions:
for action in actions:
logging.debug('Running action: %s', action)
# Each action should be a callable which takes the accumulator as an
# input. We want to run each action in their own transaction as well.
# This must not be called in a transaction.
with timing.WallTimeLogger('ExecutionEngine:ActionRunner<%s>' %
(type(action).__name__,)):
action(accumulator)
# Clear the actions and accumulator for this traversal.
del actions[:]
accumulator.clear()
# Load the graph transactionally.
graph = _LoadTaskGraph(job)
if not graph.tasks:
logging.debug('Task graph empty for job %s', job.job_id)
return
# First get all the "terminal" tasks, and traverse the dependencies in a
# depth-first-search.
task_stack = [graph.tasks[task] for task in graph.terminal_tasks]
# If the stack is empty, we should start at an arbitrary point.
if not task_stack:
task_stack = [graph.tasks.values()[0]]
vertex_states = {}
while task_stack:
task = task_stack[-1]
state = vertex_states.get(task.key, NOT_EVALUATED)
if state == CHILDREN_PENDING:
| |
import os
import subprocess
import numpy as np
import matplotlib.pyplot as pyplot
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from simtk import unit
import openmmtools
from cg_openmm.utilities.util import set_box_vectors, get_box_vectors
from simtk.openmm.app.pdbfile import PDBFile
from simtk.openmm.app.dcdfile import DCDFile
from mdtraj.formats import PDBTrajectoryFile
from mdtraj import Topology, Trajectory
from pymbar import timeseries
from scipy.special import erf
from scipy.optimize import minimize_scalar
import time
from openmmtools.multistate import MultiStateReporter, MultiStateSampler, ReplicaExchangeSampler
from openmmtools.multistate import ReplicaExchangeAnalyzer
# quiet down some citation spam
MultiStateSampler._global_citation_silence = True
kB = (unit.MOLAR_GAS_CONSTANT_R).in_units_of(unit.kilojoule / (unit.kelvin * unit.mole))
def make_replica_dcd_files(
topology, timestep=5*unit.femtosecond, time_interval=200,
output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1):
"""
Make dcd files from replica exchange simulation trajectory data.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param timestep: Time step used in the simulation (default=5*unit.femtosecond)
:type timestep: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>` float * simtk.unit
:param time_interval: frequency, in number of time steps, at which positions were recorded (default=200)
:type time_interval: int
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the dcd trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many time intervals when writing dcd trajectories (default=1)
:type frame_stride: int
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of replicas:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
n_replicas=len(states)
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for replica_index in range(n_replicas):
replica_positions = extract_trajectory(topology, replica_index=replica_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
n_frames_tot = replica_positions.shape[0]
# Determine simulation time (in ps) for each frame:
time_delta_ps = (timestep*time_interval).value_in_unit(unit.picosecond)
traj_times = np.linspace(
frame_begin*time_delta_ps,
(frame_begin+frame_stride*(n_frames_tot-1))*time_delta_ps,
num=n_frames_tot,
)
file_name = f"{output_dir}/replica_{replica_index+1}.dcd"
# Trajectories are written in nanometers:
replica_traj = Trajectory(
replica_positions,
Topology.from_openmm(topology),
time=traj_times,
)
Trajectory.save_dcd(replica_traj,file_name)
return file_list
def make_replica_pdb_files(
topology, output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1):
"""
Make pdb files from replica exchange simulation trajectory data.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the pdb trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many frames when writing pdb trajectories (default=1)
:type frame_stride: int
:returns:
- file_list ( List( str ) ) - A list of names for the files that were written
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of replicas:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
n_replicas = len(states)
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for replica_index in range(n_replicas):
replica_positions = extract_trajectory(topology, replica_index=replica_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
file_name = f"{output_dir}/replica_{replica_index+1}.pdb"
# Trajectories are written in nanometers:
replica_traj = Trajectory(
replica_positions,
Topology.from_openmm(topology),
)
Trajectory.save_pdb(replica_traj,file_name)
return file_list
def make_state_dcd_files(
topology, timestep=5*unit.femtosecond, time_interval=200,
output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1, center=True):
"""
Make dcd files by state from replica exchange simulation trajectory data.
Note: these are discontinuous trajectories with constant temperature state.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param timestep: Time step used in the simulation (default=5*unit.femtosecond)
:type timestep: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>` float * simtk.unit
:param time_interval: frequency, in number of time steps, at which positions were recorded (default=200)
:type time_interval: int
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the dcd trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many time intervals when writing dcd trajectories (default=1)
:type frame_stride: int
:param center: align the center of mass of each structure in the discontinuous state trajectory (default=True)
:type center: Boolean
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of states:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for state_index in range(len(states)):
state_positions = extract_trajectory(topology, state_index=state_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
n_frames_tot = state_positions.shape[0]
# Determine simulation time (in ps) for each frame:
time_delta_ps = (timestep*time_interval).value_in_unit(unit.picosecond)
traj_times = np.linspace(
frame_begin*time_delta_ps,
(frame_begin+frame_stride*(n_frames_tot-1))*time_delta_ps,
num=n_frames_tot,
)
file_name = f"{output_dir}/state_{state_index+1}.dcd"
# Trajectories are written in nanometers:
state_traj = Trajectory(
state_positions,
Topology.from_openmm(topology),
time=traj_times,
)
if center:
ref_traj = state_traj[0]
state_traj.superpose(ref_traj)
# This rewrites to state_traj
Trajectory.save_dcd(state_traj,file_name)
return file_list
def make_state_pdb_files(
topology, output_dir="output", output_data="output.nc", checkpoint_data="output_checkpoint.nc",
frame_begin=0, frame_stride=1, center=True):
"""
Make pdb files by state from replica exchange simulation trajectory data.
Note: these are discontinuous trajectories with constant temperature state.
:param topology: OpenMM Topology
:type topology: `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_
:param output_dir: path to which we will write the output (default='output')
:type output_dir: str
:param output_data: name of output .nc data file (default='output.nc')
:type output_data: str
:param checkpoint_data: name of checkpoint .nc data file (default='output_checkpoint.nc')
:type checkpoint_data: str
:param frame_begin: Frame at which to start writing the pdb trajectory (default=0)
:type frame_begin: int
:param frame_stride: advance by this many frames when writing pdb trajectories (default=1)
:type frame_stride: int
:param center: align the center of mass of each structure in the discontinuous state trajectory (default=True)
:type center: Boolean
:returns:
- file_list ( List( str ) ) - A list of names for the files that were written
"""
file_list = []
output_data_path = os.path.join(output_dir, output_data)
# Get number of states:
reporter = MultiStateReporter(output_data_path, open_mode="r")
states = reporter.read_thermodynamic_states()[0]
sampler_states = reporter.read_sampler_states(iteration=0)
xunit = sampler_states[0].positions[0].unit
for state_index in range(len(states)):
state_positions = extract_trajectory(topology, state_index=state_index,
output_data=output_data_path, checkpoint_data=checkpoint_data,
frame_begin=frame_begin, frame_stride=frame_stride)
file_name = f"{output_dir}/state_{state_index+1}.pdb"
# Trajectories are written in nanometers:
state_traj = Trajectory(
state_positions,
Topology.from_openmm(topology),
)
if center:
ref_traj = state_traj[0]
state_traj.superpose(ref_traj)
# This rewrites to state_traj
Trajectory.save_pdb(state_traj,file_name)
return file_list
def extract_trajectory(
topology, output_data="output/output.nc", checkpoint_data="output_checkpoint.nc",
state_index=None, replica_index=None,
frame_begin=0, frame_stride=1, frame_end=-1):
"""
Internal function for extract trajectory (replica or state) from .nc file,
Based on YANK extract_trajectory code.
"""
reporter = MultiStateReporter(output_data, open_mode='r', checkpoint_storage=checkpoint_data)
# Get dimensions
trajectory_storage = reporter._storage_checkpoint
n_iterations = reporter.read_last_iteration()
n_frames = trajectory_storage.variables['positions'].shape[0]
n_atoms = trajectory_storage.variables['positions'].shape[2]
# Determine frames to extract.
# Convert negative indices to last indices.
if frame_begin < 0:
frame_begin = n_frames + frame_begin
if frame_end < 0:
frame_end = n_frames + frame_end + 1
frame_indices = range(frame_begin, frame_end, frame_stride)
if len(frame_indices) == 0:
raise ValueError('No frames selected')
# Determine the number of frames that the trajectory will have.
if state_index is None:
n_trajectory_frames = len(frame_indices)
else:
# With SAMS, an iteration can have 0 or more replicas in a given state.
# Deconvolute state indices.
state_indices = [None for _ in frame_indices]
for i, iteration in enumerate(frame_indices):
replica_indices = reporter._storage_analysis.variables['states'][iteration, :]
state_indices[i] = np.where(replica_indices == state_index)[0]
n_trajectory_frames = sum(len(x) for x in state_indices)
# Initialize positions and box vectors arrays.
# MDTraj Cython code expects float32 positions.
positions = np.zeros((n_trajectory_frames, n_atoms, 3), dtype=np.float32)
# Extract state positions and box vectors.
if state_index is not None:
# Extract state positions
frame_idx = 0
for i, iteration in enumerate(frame_indices):
for replica_index in state_indices[i]:
positions[frame_idx, :, :] = trajectory_storage.variables['positions'][iteration, replica_index, :, :].astype(np.float32)
frame_idx += 1
else: # Extract replica positions
for i, iteration in enumerate(frame_indices):
positions[i, :, :] = trajectory_storage.variables['positions'][iteration, replica_index, :, :].astype(np.float32)
return positions
def process_replica_exchange_data(
output_data="output/output.nc", output_directory="output", series_per_page=4,
write_data_file=True, plot_production_only=False, print_timing=False,
equil_nskip=1, frame_begin=0, frame_end=-1,
):
"""
Read replica exchange simulation data, detect equilibrium and decorrelation time, and plot replica exchange results.
:param output_data: path to | |
Returns: true if one or more instance of the attributeType parameter is applied to this
member; otherwise, false.
"""
pass
def ToString(self):
"""
ToString(self: _MethodBase) -> str
Provides COM objects with version-independent access to the
System.Object.ToString method.
Returns: A string that represents the current System.Object.
"""
pass
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==y """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args): #cannot find CLR method
pass
Attributes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.Attributes property.
Get: Attributes(self: _MethodBase) -> MethodAttributes
"""
CallingConvention = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.CallingConvention property.
Get: CallingConvention(self: _MethodBase) -> CallingConventions
"""
DeclaringType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.DeclaringType property.
Get: DeclaringType(self: _MethodBase) -> Type
"""
IsAbstract = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsAbstract property.
Get: IsAbstract(self: _MethodBase) -> bool
"""
IsAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsAssembly property.
Get: IsAssembly(self: _MethodBase) -> bool
"""
IsConstructor = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsConstructor property.
Get: IsConstructor(self: _MethodBase) -> bool
"""
IsFamily = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsFamily property.
Get: IsFamily(self: _MethodBase) -> bool
"""
IsFamilyAndAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsFamilyAndAssembly property.
Get: IsFamilyAndAssembly(self: _MethodBase) -> bool
"""
IsFamilyOrAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsFamilyOrAssembly property.
Get: IsFamilyOrAssembly(self: _MethodBase) -> bool
"""
IsFinal = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsFinal property.
Get: IsFinal(self: _MethodBase) -> bool
"""
IsHideBySig = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsHideBySig property.
Get: IsHideBySig(self: _MethodBase) -> bool
"""
IsPrivate = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsPrivate property.
Get: IsPrivate(self: _MethodBase) -> bool
"""
IsPublic = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsPublic property.
Get: IsPublic(self: _MethodBase) -> bool
"""
IsSpecialName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsSpecialName property.
Get: IsSpecialName(self: _MethodBase) -> bool
"""
IsStatic = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsStatic property.
Get: IsStatic(self: _MethodBase) -> bool
"""
IsVirtual = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.IsVirtual property.
Get: IsVirtual(self: _MethodBase) -> bool
"""
MemberType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.MemberType property.
Get: MemberType(self: _MethodBase) -> MemberTypes
"""
MethodHandle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MethodBase.MethodHandle property.
Get: MethodHandle(self: _MethodBase) -> RuntimeMethodHandle
"""
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.Name property.
Get: Name(self: _MethodBase) -> str
"""
ReflectedType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.ReflectedType property.
Get: ReflectedType(self: _MethodBase) -> Type
"""
class _MethodBuilder:
""" Exposes the System.Reflection.Emit.MethodBuilder class to unmanaged code. """
def GetIDsOfNames(self, riid, rgszNames, cNames, lcid, rgDispId):
"""
GetIDsOfNames(self: _MethodBuilder, riid: Guid, rgszNames: IntPtr, cNames: UInt32, lcid: UInt32, rgDispId: IntPtr) -> Guid
Maps a set of names to a corresponding set of dispatch identifiers.
riid: Reserved for future use. Must be IID_NULL.
rgszNames: An array of names to be mapped.
cNames: The count of the names to be mapped.
lcid: The locale context in which to interpret the names.
rgDispId: An array allocated by the caller that receives the identifiers corresponding to
the names.
"""
pass
def GetTypeInfo(self, iTInfo, lcid, ppTInfo):
"""
GetTypeInfo(self: _MethodBuilder, iTInfo: UInt32, lcid: UInt32, ppTInfo: IntPtr)
Retrieves the type information for an object, which can be used to get the type
information for an interface.
iTInfo: The type information to return.
lcid: The locale identifier for the type information.
ppTInfo: A pointer to the requested type information object.
"""
pass
def GetTypeInfoCount(self, pcTInfo):
"""
GetTypeInfoCount(self: _MethodBuilder) -> UInt32
Retrieves the number of type information interfaces that an object provides
(either 0 or 1).
"""
pass
def Invoke(self, dispIdMember, riid, lcid, wFlags, pDispParams, pVarResult, pExcepInfo, puArgErr):
"""
Invoke(self: _MethodBuilder, dispIdMember: UInt32, riid: Guid, lcid: UInt32, wFlags: Int16, pDispParams: IntPtr, pVarResult: IntPtr, pExcepInfo: IntPtr, puArgErr: IntPtr) -> Guid
Provides access to properties and methods exposed by an object.
dispIdMember: An identifier of a member.
riid: Reserved for future use. Must be IID_NULL.
lcid: The locale context in which to interpret arguments.
wFlags: Flags describing the context of the call.
pDispParams: A pointer to a structure containing an array of arguments, an array of argument
DISPIDs for named arguments, and counts for the number of elements in the
arrays.
pVarResult: A pointer to the location where the result will be stored.
pExcepInfo: A pointer to a structure that contains exception information.
puArgErr: The index of the first argument that has an error.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class _MethodInfo:
""" Exposes the public members of the System.Reflection.MethodInfo class to unmanaged code. """
def Equals(self, other):
"""
Equals(self: _MethodInfo, other: object) -> bool
Provides COM objects with version-independent access to the
System.Object.Equals(System.Object) method.
other: The System.Object to compare with the current System.Object.
Returns: true if the specified System.Object is equal to the current System.Object;
otherwise, false.
"""
pass
def GetBaseDefinition(self):
"""
GetBaseDefinition(self: _MethodInfo) -> MethodInfo
Provides COM objects with version-independent access to the
System.Reflection.MethodInfo.GetBaseDefinition method.
Returns: A System.Reflection.MethodInfo object for the first implementation of this
method.
"""
pass
def GetCustomAttributes(self, *__args):
"""
GetCustomAttributes(self: _MethodInfo, inherit: bool) -> Array[object]
Provides COM objects with version-independent access to the
System.Reflection.MemberInfo.GetCustomAttributes(System.Boolean) method.
inherit: true to search this member's inheritance chain to find the attributes;
otherwise, false.
Returns: An array that contains all the custom attributes, or an array with zero (0)
elements if no attributes are defined.
GetCustomAttributes(self: _MethodInfo, attributeType: Type, inherit: bool) -> Array[object]
Provides COM objects with version-independent access to the
System.Reflection.MemberInfo.GetCustomAttributes(System.Type,System.Boolean)
method.
attributeType: The type of attribute to search for. Only attributes that are assignable to
this type are returned.
inherit: true to search this member's inheritance chain to find the attributes;
otherwise, false.
Returns: An array of custom attributes applied to this member, or an array with zero (0)
elements if no attributes have been applied.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: _MethodInfo) -> int
Provides COM objects with version-independent access to the
System.Object.GetHashCode method.
Returns: The hash code for the current | |
from typing import List, Optional, Tuple, Union
import numpy as np
from transformers import *
from summarizer.bert_parent import BertParent
from summarizer.cluster_features import ClusterFeatures
from summarizer.sentence_handler import SentenceHandler
class ModelProcessor(object):
aggregate_map = {
'mean': np.mean,
'min': np.min,
'median': np.median,
'max': np.max
}
def __init__(
self,
model: str = 'bert-large-uncased',
custom_model: PreTrainedModel = None,
custom_tokenizer: PreTrainedTokenizer = None,
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
sentence_handler: SentenceHandler = SentenceHandler(),
random_state: int = 12345,
hidden_concat: bool = False
):
"""
This is the parent Bert Summarizer model. New methods should implement this class.
:param model: This parameter is associated with the inherit string parameters from the transformers library.
:param custom_model: If you have a pre-trained model, you can add the model class here.
:param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.
:param hidden: This signifies which layer(s) of the BERT model you would like to use as embeddings.
:param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.
:param sentence_handler: The handler to process sentences. If want to use coreference, instantiate and pass.
CoreferenceHandler instance
:param random_state: The random state to reproduce summarizations.
:param hidden_concat: Whether or not to concat multiple hidden layers.
"""
np.random.seed(random_state)
self.model = BertParent(model, custom_model, custom_tokenizer)
self.hidden = hidden
self.reduce_option = reduce_option
self.sentence_handler = sentence_handler
self.random_state = random_state
self.hidden_concat = hidden_concat
def cluster_runner(
self,
content: List[str],
ratio: float = 0.2,
algorithm: str = 'kmeans',
use_first: bool = True,
num_sentences: int = None
) -> Tuple[List[str], np.ndarray]:
"""
Runs the cluster algorithm based on the hidden state. Returns both the embeddings and sentences.
:param content: Content list of sentences.
:param ratio: The ratio to use for clustering.
:param algorithm: Type of algorithm to use for clustering.
:param use_first: Whether to use first sentence (helpful for news stories, etc).
:param num_sentences: Number of sentences to use for summarization.
:return: A tuple of summarized sentences and embeddings
"""
if num_sentences is not None:
num_sentences = num_sentences if use_first else num_sentences
hidden = self.model(content, self.hidden, self.reduce_option, hidden_concat=self.hidden_concat)
hidden_args = ClusterFeatures(hidden, algorithm, random_state=self.random_state).cluster(ratio, num_sentences)
if use_first:
if not hidden_args:
hidden_args.append(0)
elif hidden_args[0] != 0:
hidden_args.insert(0, 0)
sentences = [content[j] for j in hidden_args]
embeddings = np.asarray([hidden[j] for j in hidden_args])
return sentences, embeddings
def __run_clusters(
self,
content: List[str],
ratio: float = 0.2,
algorithm: str = 'kmeans',
use_first: bool = True,
num_sentences: int = None
) -> List[str]:
"""
Runs clusters and returns sentences.
:param content: The content of sentences.
:param ratio: Ratio to use for for clustering.
:param algorithm: Algorithm selection for clustering.
:param use_first: Whether to use first sentence
:param num_sentences: Number of sentences. Overrides ratio.
:return: summarized sentences
"""
sentences, _ = self.cluster_runner(content, ratio, algorithm, use_first, num_sentences)
return sentences
def __retrieve_summarized_embeddings(
self,
content: List[str],
ratio: float = 0.2,
algorithm: str = 'kmeans',
use_first: bool = True,
num_sentences: int = None
) -> np.ndarray:
"""
Retrieves embeddings of the summarized sentences.
:param content: The content of sentences.
:param ratio: Ratio to use for for clustering.
:param algorithm: Algorithm selection for clustering.
:param use_first: Whether to use first sentence
:return: Summarized embeddings
"""
_, embeddings = self.cluster_runner(content, ratio, algorithm, use_first, num_sentences)
return embeddings
def calculate_elbow(
self,
body: str,
algorithm: str = 'kmeans',
min_length: int = 40,
max_length: int = 600,
k_max: int = None,
) -> List[float]:
"""
Calculates elbow across the clusters.
:param body: The input body to summarize.
:param algorithm: The algorithm to use for clustering.
:param min_length: The min length to use.
:param max_length: The max length to use.
:param k_max: The maximum number of clusters to search.
:return: List of elbow inertia values.
"""
sentences = self.sentence_handler(body, min_length, max_length)
if k_max is None:
k_max = len(sentences) - 1
hidden = self.model(sentences, self.hidden, self.reduce_option, hidden_concat=self.hidden_concat)
elbow = ClusterFeatures(hidden, algorithm, random_state=self.random_state).calculate_elbow(k_max)
return elbow
def calculate_optimal_k(
self,
body: str,
algorithm: str = 'kmeans',
min_length: int = 40,
max_length: int = 600,
k_max: int = None
):
"""
Calculates the optimal Elbow K.
:param body: The input body to summarize.
:param algorithm: The algorithm to use for clustering.
:param min_length: The min length to use.
:param max_length: The max length to use.
:param k_max: The maximum number of clusters to search.
:return:
"""
sentences = self.sentence_handler(body, min_length, max_length)
if k_max is None:
k_max = len(sentences) - 1
hidden = self.model(sentences, self.hidden, self.reduce_option, hidden_concat=self.hidden_concat)
optimal_k = ClusterFeatures(hidden, algorithm, random_state=self.random_state).calculate_optimal_cluster(k_max)
return optimal_k
def run_embeddings(
self,
body: str,
ratio: float = 0.2,
min_length: int = 40,
max_length: int = 600,
use_first: bool = True,
algorithm: str = 'kmeans',
num_sentences: int = None,
aggregate: str = None
) -> Optional[np.ndarray]:
"""
Preprocesses the sentences, runs the clusters to find the centroids, then combines the embeddings.
:param body: The raw string body to process
:param ratio: Ratio of sentences to use
:param min_length: Minimum length of sentence candidates to utilize for the summary.
:param max_length: Maximum length of sentence candidates to utilize for the summary
:param use_first: Whether or not to use the first sentence
:param algorithm: Which clustering algorithm to use. (kmeans, gmm)
:param num_sentences: Number of sentences to use. Overrides ratio.
:param aggregate: One of mean, median, max, min. Applied on zero axis
:return: A summary embedding
"""
sentences = self.sentence_handler(body, min_length, max_length)
if sentences:
embeddings = self.__retrieve_summarized_embeddings(sentences, ratio, algorithm, use_first, num_sentences)
if aggregate is not None:
assert aggregate in ['mean', 'median', 'max', 'min'], "aggregate must be mean, min, max, or median"
embeddings = self.aggregate_map[aggregate](embeddings, axis=0)
return embeddings
return None
def run(
self,
body: str,
ratio: float = 0.2,
min_length: int = 40,
max_length: int = 600,
use_first: bool = True,
algorithm: str = 'kmeans',
num_sentences: int = None,
return_as_list: bool = False
) -> Union[List, str]:
"""
Preprocesses the sentences, runs the clusters to find the centroids, then combines the sentences.
:param body: The raw string body to process
:param ratio: Ratio of sentences to use
:param min_length: Minimum length of sentence candidates to utilize for the summary.
:param max_length: Maximum length of sentence candidates to utilize for the summary
:param use_first: Whether or not to use the first sentence
:param algorithm: Which clustering algorithm to use. (kmeans, gmm)
:param num_sentences: Number of sentences to use (overrides ratio).
:param return_as_list: Whether or not to return sentences as list.
:return: A summary sentence
"""
sentences = self.sentence_handler(body, min_length, max_length)
if sentences:
sentences = self.__run_clusters(sentences, ratio, algorithm, use_first, num_sentences)
if return_as_list:
return sentences
else:
return ' '.join(sentences)
def __call__(
self,
body: str,
ratio: float = 0.2,
min_length: int = 40,
max_length: int = 600,
use_first: bool = True,
algorithm: str = 'kmeans',
num_sentences: int = None,
return_as_list: bool = False,
) -> str:
"""
(utility that wraps around the run function)
Preprocesses the sentences, runs the clusters to find the centroids, then combines the sentences.
:param body: The raw string body to process.
:param ratio: Ratio of sentences to use.
:param min_length: Minimum length of sentence candidates to utilize for the summary.
:param max_length: Maximum length of sentence candidates to utilize for the summary.
:param use_first: Whether or not to use the first sentence.
:param algorithm: Which clustering algorithm to use. (kmeans, gmm)
:param Number of sentences to use (overrides ratio).
:param return_as_list: Whether or not to return sentences as list.
:return: A summary sentence.
"""
return self.run(
body, ratio, min_length, max_length, algorithm=algorithm, use_first=use_first, num_sentences=num_sentences,
return_as_list=return_as_list
)
class Summarizer(ModelProcessor):
def __init__(
self,
model: str = 'bert-large-uncased',
custom_model: PreTrainedModel = None,
custom_tokenizer: PreTrainedTokenizer = None,
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
sentence_handler: SentenceHandler = SentenceHandler(),
random_state: int = 12345,
hidden_concat: bool = False
):
"""
This is the main Bert Summarizer class.
:param model: This parameter is associated with the inherit string parameters from the transformers library.
:param custom_model: If you have a pre-trained model, you can add the model class here.
:param custom_tokenizer: If you have | |
<reponame>dorinapetra/probing
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import os
import gzip
import logging
import numpy as np
import unidecode
from transformers import AutoTokenizer
from probing.data.base_data import BaseDataset, DataFields
class WLSTMFields(DataFields):
_fields = (
'probe_target', 'label', 'probe_target_len', 'target_idx',
'raw_idx', 'raw_target', 'raw_sentence',)
_alias = {
'input': 'probe_target',
'input_len': 'probe_target_len',
}
needs_vocab = ('probe_target', 'label')
needs_padding = ('probe_target', )
class Word2vecProberFields(DataFields):
_fields = (
'sentence', 'probe_target', 'probe_target_idx', 'label')
_alias = {
'input': 'probe_target',
}
needs_vocab = ('label',)
class TokenInSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx', 'label',
'subword_tokens', 'input_len', 'probe_target', 'token_starts',
'probe_target_idx',
)
_alias = {
'input': 'subword_tokens'
}
needs_vocab = ('subword_tokens', 'label')
needs_padding = ('subword_tokens', )
needs_constants = ('subword_tokens', )
class SLSTMFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'input', 'input_len', 'target_idx', 'label',
)
needs_vocab = ('input', 'label', )
needs_constants = ('input', )
needs_padding = ('input', )
class SequenceClassificationWithSubwordsDataFields(DataFields):
_fields = (
'raw_sentence', 'labels',
'sentence_len', 'tokens', 'sentence_subword_len', 'token_starts',
)
_alias = {
'input': 'tokens',
'input_len': 'sentence_subword_len',
'label': 'labels',
}
needs_vocab = ('tokens', 'labels')
needs_padding = ('tokens', )
needs_constants = ('tokens', )
class Embedding:
def __init__(self, embedding_file, filter=None):
self.filter_ = filter
if embedding_file.endswith('.gz'):
with gzip.open(embedding_file, 'rt') as f:
self.load_stream(f)
else:
with open(embedding_file, 'rt') as f:
self.load_stream(f)
def load_stream(self, stream):
self.mtx = []
self.vocab = {}
for line in stream:
fd = line.strip().split(" ")
if len(fd) == 2:
continue
word = fd[0]
if self.filter_ and word not in self.filter_:
continue
self.vocab[word] = len(self.mtx)
self.mtx.append(list(map(float, fd[1:])))
self.mtx = np.array(self.mtx)
def __len__(self):
return self.mtx.shape[0]
def __getitem__(self, key):
if key not in self.vocab:
return self.mtx[0]
return self.mtx[self.vocab[key]]
@property
def embedding_dim(self):
return self.mtx.shape[1]
class Word2vecProberDataset(BaseDataset):
datafield_class = Word2vecProberFields
def to_idx(self):
vocab = set(r.probe_target for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
word_vecs = []
labels = []
for r in self.raw:
word_vecs.append(self.embedding[r.probe_target])
if r.label:
labels.append(self.vocabs.label[r.label])
else:
labels.append(None)
self.mtx = self.datafield_class(
probe_target=word_vecs,
label=labels
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
sent, target, idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
return self.datafield_class(
sentence=sent,
probe_target=target,
probe_target_idx=int(idx),
label=label
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.probe_target,
sample.probe_target_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
class WLSTMDataset(BaseDataset):
datafield_class = WLSTMFields
def __init__(self, config, stream_or_file, **kwargs):
if config.external_tokenizer:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
else:
self.tokenizer = None
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(fd) > 3:
sent, target, idx, label = fd[:4]
else:
sent, target, idx = fd[:3]
label = None
idx = int(idx)
if self.tokenizer:
tokens = self.tokenizer.tokenize(target)
else:
tokens = list(target)
if self.config.probe_first:
target_idx = 0
else:
target_idx = len(tokens) - 1
return self.datafield_class(
raw_sentence=sent,
probe_target=tokens,
target_idx=target_idx,
raw_idx=idx,
raw_target=target,
input_len=len(tokens),
label=label,
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
class SLSTMDataset(BaseDataset):
datafield_class = SLSTMFields
def __init__(self, config, stream_or_file, **kwargs):
if config.external_tokenizer:
lower = 'uncased' in config.external_tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.external_tokenizer, do_lower_case=lower)
else:
self.tokenizer = None
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent, raw_target, raw_idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
raw_idx = int(raw_idx)
if self.tokenizer:
words = raw_sent.split(' ')
subwords = []
for idx, word in enumerate(words):
if self.config.probe_first:
if idx == raw_idx:
target_idx = len(subwords)
subwords.extend(self.tokenizer.tokenize(word))
else:
subwords.extend(self.tokenizer.tokenize(word))
if idx == raw_idx:
target_idx = len(subwords) - 1
input = subwords
else:
input = list(raw_sent)
words = raw_sent.split(' ')
if self.config.probe_first:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx
else:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx + len(raw_target) - 1
return self.datafield_class(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
input=input,
input_len=len(input),
target_idx=target_idx,
label=label
)
def to_idx(self):
super().to_idx()
self.mtx.target_idx = np.array(self.mtx.target_idx) + 1
self.mtx.input_len = np.array(self.mtx.input_len) + 2
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = np.argmax(model_output[i])
self.raw[i].label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
class SequenceClassificationWithSubwords(BaseDataset):
datafield_class = SequenceClassificationWithSubwordsDataFields
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, is_unlabeled=False):
global_key = f'{config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
globals()[global_key] = self.tokenizer
super().__init__(config, stream_or_file, max_samples, share_vocabs_with, is_unlabeled)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.tokens.vocab = self.tokenizer.get_vocab()
self.vocabs.tokens.pad_token = self.tokenizer.pad_token
self.vocabs.tokens.bos_token = self.tokenizer.cls_token
self.vocabs.tokens.eos_token = self.tokenizer.sep_token
self.vocabs.tokens.unk_token = self.tokenizer.unk_token
self.vocabs.tokens.frozen = True
def load_stream(self, stream):
self.raw = []
sent = []
for line in stream:
if not line.strip():
if sent:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
if self.max_samples and len(self.raw) >= self.max_samples:
break
sent = []
else:
sent.append(line.rstrip("\n"))
if sent:
if self.max_samples is None or len(self.raw) < self.max_samples:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
def create_sentence_from_lines(self, lines):
sent = []
labels = []
token_starts = []
subwords = []
for line in lines:
fd = line.rstrip("\n").split("\t")
sent.append(fd[0])
if len(fd) > 1:
labels.append(fd[1])
token_starts.append(len(subwords))
token = fd[0]
if self.config.remove_diacritics:
token = unidecode.unidecode(token)
pieces = self.tokenizer.tokenize(token)
subwords.extend(pieces)
token_starts.append(len(subwords))
if len(labels) == 0:
labels = None
return self.datafield_class(
raw_sentence=sent, labels=labels,
sentence_len=len(sent),
tokens=subwords,
sentence_subword_len=len(subwords),
token_starts=token_starts,
)
def ignore_sample(self, sample):
return sample.sentence_subword_len > 500
def to_idx(self):
super().to_idx()
prefixed_token_starts = []
for ti, tokstarts in enumerate(self.mtx.token_starts):
tokstarts = [t+1 for t in tokstarts]
token_starts = [0] + tokstarts + [len(self.mtx.tokens[ti]) + 1]
prefixed_token_starts.append(token_starts)
self.mtx.token_starts = prefixed_token_starts
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
padded_token_starts = []
maxlen = max(len(t) for t in batch.token_starts)
pad = 1000
for sample in batch.token_starts:
padded = sample + [pad] * (maxlen - len(sample))
padded_token_starts.append(padded)
batch.token_starts = np.array(padded_token_starts)
if batch.labels:
batch.labels = np.concatenate(batch.labels)
yield batch
def decode(self, model_output):
offset = 0
for si, sample in enumerate(self.raw):
labels = []
for ti in range(sample.sentence_len):
label_idx = model_output[offset + ti].argmax()
labels.append(self.vocabs.labels.inv_lookup(label_idx))
sample.labels = labels
offset += sample.sentence_len
def print_sample(self, sample, stream):
stream.write("\n".join(
"{}\t{}".format(sample.raw_sentence[i], sample.labels[i])
for i in range(sample.sentence_len)
))
stream.write("\n")
def print_raw(self, stream):
for si, sample in enumerate(self.raw):
self.print_sample(sample, stream)
if si < len(self.raw) - 1:
stream.write("\n")
class SentenceProberDataset(BaseDataset):
datafield_class = TokenInSequenceProberFields
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, is_unlabeled=False):
global_key = f'{config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
globals()[global_key] = self.tokenizer
self.MASK = self.tokenizer.mask_token
self.mask_positions = set(config.mask_positions)
if config.use_character_tokenization:
if not config.model_name.startswith('bert-'):
raise ValueError("Character tokenization is only "
"supported for BERT models.")
logging.info("Using character tokenization.")
super().__init__(config, stream_or_file, max_samples, share_vocabs_with, is_unlabeled)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.subword_tokens.vocab = self.tokenizer.get_vocab()
self.vocabs.subword_tokens.pad_token = self.tokenizer.pad_token
self.vocabs.subword_tokens.bos_token = self.tokenizer.cls_token
self.vocabs.subword_tokens.eos_token = self.tokenizer.sep_token
self.vocabs.subword_tokens.unk_token = self.tokenizer.unk_token
self.vocabs.subword_tokens.frozen = True
def to_idx(self):
super().to_idx()
prefixed_token_starts = []
for ti, tokstarts in enumerate(self.mtx.token_starts):
tokstarts = [t+1 for t in tokstarts]
token_starts = [0] + tokstarts + [len(self.mtx.subword_tokens[ti]) - 1]
prefixed_token_starts.append(token_starts)
self.mtx.token_starts = prefixed_token_starts
self.mtx.probe_target_idx = np.array(self.mtx.probe_target_idx) + 1
self.mtx.input_len = np.array(self.mtx.input_len) + 2
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
padded_token_starts = []
maxlen = max(len(t) for t in batch.token_starts)
pad = 1000
for sample in batch.token_starts:
padded = sample + [pad] * (maxlen - len(sample))
padded_token_starts.append(padded)
batch.token_starts = np.array(padded_token_starts)
yield batch
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent, raw_target, raw_idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
raw_idx = int(raw_idx)
# Only include the target from the sentence.
if self.config.target_only:
if self.config.remove_diacritics:
target = unidecode.unidecode(raw_target)
else:
target = raw_target
tokenized = [self.tokenizer.tokenize(target)]
target_idx = 0
# Build a list-of-lists from the tokenized words.
# This allows shuffling it later.
else:
tokenized = []
for ti, token in enumerate(raw_sent.split(" ")):
if ti - raw_idx in self.mask_positions:
pieces = [self.MASK]
else:
if self.config.remove_diacritics:
token = unidecode.unidecode(token)
if self.config.use_character_tokenization == 'full':
pieces = [token[0]]
pieces.extend(f'##{c}' for c in token[1:])
elif self.config.use_character_tokenization == 'target_only':
if ti == raw_idx:
pieces = [token[0]]
pieces.extend(f'##{c}' for c in token[1:])
else:
pieces = self.tokenizer.tokenize(token)
else:
pieces = self.tokenizer.tokenize(token)
tokenized.append(pieces)
# Add [SEP] token start.
# Perform BOW.
if self.config.bow:
all_idx = np.arange(len(tokenized))
np.random.shuffle(all_idx)
tokenized = [tokenized[i] for i in all_idx]
target_map = np.argsort(all_idx)
target_idx | |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from tqdm import tqdm
from torch._C import _nvtx
import sys
sys.path.insert(0, "./Swin-Transformer-Quantization")
# from config_modified_int8 import get_config
from SwinTransformer.config import get_config
from models import build_model
from data import build_val_loader
from SwinTransformer.optimizer import build_optimizer
from SwinTransformer.logger import create_logger
from SwinTransformer.utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
from SwinTransformerINT8Weight import SwinTransformerINT8Weight
import quant_utils
def saveToTxt(x, name, clean=False):
if clean :
with open("tmp2/"+name, 'w+') as fout:
xx = x.reshape([-1])
for i in xx:
fout.write("{}\n".format(i))
else:
with open("tmp2/"+name, 'a+') as fout:
shape = x.shape
fout.write("{}\n".format(len(shape)))
fout.write(" ".join([str(s) for s in shape])+"\n")
xx = x.reshape([-1])
for i in xx:
fout.write("{}\n".format(i))
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--profile', action='store_true', help='Perform profiling only, with some random data')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--calib', action='store_true', help='Perform calibration only')
parser.add_argument('--train', action='store_true', help='Perform training only')
parser.add_argument('--int8-mode', type=int, help='int8 mode', choices=[1, 2])
parser.add_argument('--num-calib-batch', type=int, default=4, help='Number of batches for calibration. 0 will disable calibration.')
# distributed training
parser.add_argument("--local_rank", type=int, default=0, help='local rank for DistributedDataParallel')
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
if args.quant_mode is not None:
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
return args, config
def main(args, config):
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
model.cuda()
# logger.info(str(model))
# quant_utils.print_quant_summary(model)
optimizer = build_optimizer(config, model)
if config.AMP_OPT_LEVEL != "O0":
model, optimizer = amp.initialize(model, optimizer, opt_level=config.AMP_OPT_LEVEL)
# model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
'''
lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))
if config.AUG.MIXUP > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = torch.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}")
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f'auto resuming from {resume_file}')
else:
logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')
'''
lr_scheduler = None
if config.MODEL.RESUME:
# max_accuracy = load_checkpoint(config, model_without_ddp, optimizer, lr_scheduler, logger)
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
msg = model_without_ddp.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
logger.info(msg)
del checkpoint
if args.profile:
quant_utils.configure_model(model, args, calib=False)
validate_with_random_data(config, args, model_without_ddp)
return
if args.eval:
dataset_val, data_loader_val = build_val_loader(config)
quant_utils.configure_model(model, args, calib=False)
# validate_with_random_data(config, model_without_ddp)
acc1, acc5, loss = validate(config, args, data_loader_val, model_without_ddp)
logger.info(f"Accuracy of resumed network on the {len(dataset_val)} test images: {acc1:.1f}%")
return
@torch.no_grad()
def validate(config, args, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
th_path = args.th_path
depths = config.MODEL.SWIN.DEPTHS
depths_tensor = torch.tensor(depths, dtype=torch.int)
num_heads = config.MODEL.SWIN.NUM_HEADS
num_heads_tensor = torch.tensor(num_heads, dtype=torch.int)
layer_num = len(depths)
window_size = config.MODEL.SWIN.WINDOW_SIZE
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
int8_mode = args.int8_mode
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
sw_weights = SwinTransformerINT8Weight(layer_num, window_size, depths, num_heads, th_path, model.state_dict())
# sw_weights.to_half()
# sw_weights.to_cuda()
torch.classes.load_library(th_path)
try:
swin_transformer = torch.classes.SwinTransformerINT8.Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale)
except:
# legacy ths for 20.03 image
swin_transformer = torch.classes.SwinTransformerINT8Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale)
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images_half = torch.tensor(images, dtype=torch.half)
images_half = images_half.cuda(non_blocking=True)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
# output_th = model(images)
swin_tansformer_output = swin_transformer.forward(images_half)
output = model.head(swin_tansformer_output)
# diff = output - output_th
# print(diff.mean(), diff.max(), diff.min())
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def run_swintransformernv_op(config, args, model, images, use_fp16):
th_path = args.th_path
depths = config.MODEL.SWIN.DEPTHS
depths_tensor = torch.tensor(depths, dtype=torch.int)
num_heads = config.MODEL.SWIN.NUM_HEADS
num_heads_tensor = torch.tensor(num_heads, dtype=torch.int)
layer_num = len(depths)
window_size = config.MODEL.SWIN.WINDOW_SIZE
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
int8_mode = args.int8_mode
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
torch.classes.load_library(th_path)
sw_weights = SwinTransformerINT8Weight(layer_num, window_size, depths, num_heads, th_path, model.state_dict())
# if use_fp16:
# sw_weights.to_half()
# sw_weights.to_cuda()
##run original swin-transformer
test_time = 100
warmup_time = 10
##run pytorch op
try:
swin_transformer = torch.classes.SwinTransformerINT8.Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale)
except:
# legacy ths for 20.03 image
swin_transformer = torch.classes.SwinTransformerINT8Class(sw_weights.weights, int8_mode, depths_tensor, num_heads_tensor, max_batch, img_size, patch_size, in_chans, embed_dim, window_size, ape, patch_norm, layer_num, mlp_ratio, qkv_bias, qk_scale)
# warm up
for i in range(warmup_time):
op_embedding = swin_transformer.forward(images)
op_output = model.head(op_embedding)
torch.cuda.synchronize()
op_begin = time.time()
for i in range(test_time):
# print('Before {}: {} GB'.format(i, torch.cuda.max_memory_allocated()/1024/1024/1024))
# print('Before res{}: {} GB'.format(i, torch.cuda.max_memory_reserved()/1024/1024/1024))
_nvtx.rangePushA("op {}".format(i))
op_embedding = swin_transformer.forward(images)
_nvtx.rangePop()
# print('After {}: {} GB'.format(i, torch.cuda.max_memory_allocated()/1024/1024/1024))
# print('After res{}: {} GB'.format(i, torch.cuda.max_memory_reserved()/1024/1024/1024))
# torch.cuda.empty_cache()
# op_output = model.head(op_embedding)
torch.cuda.synchronize()
op_end = time.time()
op_output = op_output.cpu().numpy()
if use_fp16:
print("INT8 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
else:
print("INT8 op time : ", (op_end - op_begin)/test_time*1000.0, "ms")
return op_output
@torch.no_grad()
def validate_with_random_data(config, args, model):
model.eval()
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
print(images.shape)
images_half = torch.tensor(images, dtype=torch.half)
images_float = torch.tensor(images, dtype=torch.float)
images_half = images_half.cuda(non_blocking=True)
images_float = images_float.cuda(non_blocking=True)
##run original swin-transformer
test_time = 100
warmup_time = 10
'''
# warm up
for i in range(warmup_time):
output = model(images_float)
torch_end = time.time()
for i in range(test_time):
FP32_torch_output = model(images_float)
FP32_torch_output = FP32_torch_output.cpu().numpy()
print("FP32 input torch time : ", (time.time() - torch_end)/test_time*1000.0, "ms")
'''
# warm up
# for i in range(warmup_time):
# output = model(images_half)
# torch.cuda.synchronize()
# torch_start = time.time()
# for i in range(test_time):
INT8_torch_output = model(images_half)
# torch.cuda.synchronize()
# torch_end = time.time()
INT8_torch_output = INT8_torch_output.cpu().numpy()
# print("FP16 input torch time : ", (torch_end - torch_start)/test_time*1000.0, "ms")
'''
diff = abs(FP32_torch_output - FP16_torch_output)
print("FP32_torch_output vs FP16_torch_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
| |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import json
import time
import pdb
import platform
import string
import random
# 此脚本与create_script.sh由算法同事
# 帮忙维护,当代码变更时需更新此版本号
code_version="1.0"
def create_hccl_mindspore():
done = 0
rank_id = 0
hccl_data = {}
# for test only
#os.environ['DLWS_WORKER_NUM'] = "2"
#os.environ['DLWS_JOB_ID'] = "test_npu_device"
#os.environ['DLWS_USER_NAME'] = "bifeng.peng"
#
## 单机任务,用DLWS_PS_NUM=0判断最好
if "DLWS_WORKER_NUM" not in os.environ:
os.environ['DLWS_WORKER_NUM'] = "1"
else:
pass
worker_num = int(os.environ['DLWS_WORKER_NUM'])
job_id = os.environ['DLWS_JOB_ID']
user_name = os.environ['DLWS_USER_NAME']
# 1)hccl文件和相关脚本都会放到此目录
# 2)文件和具体的JOB有关, 不同JOB隔离存储
npu_dir = '/home/%s/.npu/%s/' % (user_name, job_id)
# 以下变量写死
hccl_data["board_id"] = "0x0020"
hccl_data["chip_info"] = "910"
hccl_data["deploy_mode"] = "lab"
hccl_data["group_count"] = "1"
hccl_data["para_plane_nic_location"] = "device"
hccl_data["para_plane_nic_name"] = [
"eth0",
"eth1",
"eth2",
"eth3",
"eth4",
"eth5",
"eth6",
"eth7"
]
hccl_data["para_plane_nic_num"] = "8"
hccl_data["status"] = "completed"
hccl_data["group_list"] = []
group = {}
group["device_num"] = str(worker_num * 8)
group["server_num"] = str(worker_num)
group["group_name"] = "test"
group["instance_count"] = group["device_num"]
group["instance_list"] = []
## 生成npu_idx.info文件
## 文件数量和worker个数一致
while True:
PATH = npu_dir + ('/npu_%d.info' % (done))
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
with open(PATH, "r") as f:
ips = ""
host_ip = ""
# 文件中的格式:
# ip=id1:ip1,id2:ip2
# host=xxx
for line in f:
print(line)
if "ip=" in line:
_, ips = line.strip().split("=")
elif "host=" in line:
_, host_ip = line.strip().split("=")
ip_list = ips.split(",")
ip_list = sorted(ip_list)
for ip_elem in ip_list:
# 设备id和ip
device_id, device_ip = ip_elem.split(":")
## set up group list
device_item = {} # item of instance list
device_item["devices"] = [{
"device_id" : device_id,
"device_ip" : device_ip
}]
device_item["rank_id"] = str(rank_id)
device_item["server_id"] = str(host_ip)
#pdb.set_trace()
rank_id = rank_id + 1
group["instance_list"].append(device_item)
f.close()
done = done + 1
else:
pass
if done == worker_num:
break
else:
pass
time.sleep(1)
group["instance_count"] = group["device_num"] = str(len(group["instance_list"]))
print("succ!")
hccl_data["group_list"].append(group)
# dump to json file
with open(npu_dir + '/hccl_ms.json', 'w') as fp:
json.dump(hccl_data, fp)
return
def create_hccl_tensorflow():
done = 0 # worker node to process
rank_id = 0 # equals to device count
hccl_data = {}
# for test only
#os.environ['DLWS_WORKER_NUM'] = "2"
#os.environ['DLWS_JOB_ID'] = "test_npu_device"
#os.environ['DLWS_USER_NAME'] = "bifeng.peng"
#
## non distributed job
if "DLWS_WORKER_NUM" not in os.environ:
os.environ['DLWS_WORKER_NUM'] = "1"
else:
pass
worker_num = int(os.environ['DLWS_WORKER_NUM'])
job_id = os.environ['DLWS_JOB_ID']
pod_name = os.environ['POD_NAME']
user_name = os.environ['DLWS_USER_NAME']
distributing_job= False
if "DLWS_NUM_PS" in os.environ:
if int(os.environ["DLWS_NUM_PS"]) > 0:
distributing_job = True
else:
pass
else:
pass
# 1)hccl文件和相关脚本都会放到此目录
# 2)文件和具体的JOB有关, 不同JOB隔离存储
npu_dir = '/home/%s/.npu/%s/' % (user_name, job_id)
hccl_data["group_count"] = "1"
hccl_data["status"] = "completed"
hccl_data["group_list"] = []
group = {}
#group["device_count"] = worker_num * 8
group["instance_count"] = str(worker_num)
group["group_name"] = "test"
group["instance_list"] = []
## 生成npu_idx.info文件
## 文件数量和worker个数一致
while True:
PATH = npu_dir + ('/npu_%d.info' % (done))
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
with open(PATH, "r") as f:
ips = ""
host_ip = ""
# 文件中的格式:
# ip=id1:ip1,id2:ip2
# host=xxx
for line in f:
print(line)
if "ip=" in line:
_, ips = line.strip().split("=")
elif "host=" in line:
_, host_ip = line.strip().split("=")
instance_item = {} # item of instance list
if distributing_job is True:
instance_item["pod_name"] = job_id + "-worker-" + str(done)
else:
instance_item["pod_name"] = pod_name
instance_item["server_id"] = host_ip
instance_item["devices"] = []
# parse string to get all device ips
ip_list = ips.split(",")
ip_list = sorted(ip_list)
for ip_elem in ip_list:
# one device
device_id, device_ip = ip_elem.split(":")
## set up group list
device_item = {
"device_id" : device_id,
"device_ip" : device_ip
}
# append to instance list
rank_id = rank_id + 1
instance_item["devices"].append(device_item)
#pdb.set_trace()
group["instance_list"].append(instance_item)
f.close()
done = done + 1
else:
pass
if done == worker_num:
break
else:
pass
time.sleep(1)
group["device_count"] = str(rank_id)
group["instance_count"] = str(len(group["instance_list"]))
hccl_data["group_list"].append(group)
print("succ!")
# dump to json file
with open(npu_dir + '/hccl_tf.json', 'w') as fp:
json.dump(hccl_data, fp)
return
# 从/pod.env导入环境变量
def load_env(file_path):
envs = {}
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip().lstrip("export")
if line is not "" and "=" in line:
key_val = line.strip().split("=")
key = key_val[0]
value = key_val[1]
envs[key] = value
else:
pass
f.close()
return envs
# 向/pod.env写入环境变量
# 先判断是否存在此环境量,如果已存在,则覆盖
def add_env(path, envs):
# 覆盖相同key数据,文件已有的key保持不变
envs_orig = load_env(path)
for k, v in envs.items():
envs_orig[k] = v
with open(path, "w") as f:
for k, v in envs_orig.items():
f.write("export %s=%s\n" % (k, v))
f.close()
return
def get_os_flag():
osflag="x86_64"
if platform.machine() == "aarch64":
osflag = "arm64"
else:
pass
return osflag
# gnu安装目录中的架构和算法组件的不一样
# 单独处理
def get_gnu_arch_flag():
osflag="x86_64"
if platform.machine() == "aarch64":
osflag = "aarch64"
else:
pass
return osflag
def get_random_num(length):
return ''.join(random.choice(string.digits) for _ in range(length))
# 用于将环境变量更新 写入指定用户的shell加载文件
def set_bashrc(username):
path = ""
if username == "root":
path = "/root/.bashrc"
else:
path = "/home/" + username + "/.bashrc"
with open(path, "a") as f:
cmd = '''
if [ -f "/pod.env" ]; then
. /pod.env
fi
'''
f.write(cmd + "\n")
f.close()
return
# 准备mindspore环境
# 1) 预备环境变量,并写入/pod.env
# 2) 创建算法需要的训练shell脚本
# 3) 创建算法需要的hccl文件
def handle_mindspore():
path = "/pod.env"
envs = load_env(path) # 导入平台加载过程中已创建的环境变量
envs_to_add= {}
envs_to_add["DEVICE_ID"] = "0"
# 解析GPU/NPU设备ID
if "VISIBLE_IDS" in envs:
envs["VISIBLE_IDS"] = envs["VISIBLE_IDS"].replace("\\","")
envs_to_add["VISIBLE_IDS"] = envs["VISIBLE_IDS"]
else:
pass
# 解析NPU Device ID
if "NPU_IPS" in envs:
envs["NPU_IPS"] = envs["NPU_IPS"].replace("\\","")
envs_to_add["NPU_IPS"] = envs["NPU_IPS"]
else:
pass
## 将/pod.env已有的环境变量
## 与os当前具有的环境变量合并, 放入envs
for k, v in os.environ.items():
if k not in envs:
envs[k] = v
else:
pass
## 不需要解析device id
## 设置随机参数, 算法要求
envs["RANDOM"] = get_random_num(6)
envs["osflag"] = get_os_flag()
envs["gnu_arch"] = get_gnu_arch_flag()
# mindspore环境变量模板
mindspore_envs = [
"PYTHONPATH=/usr/local/lib/python3.7/site-packages/mindspore/lib:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/opp/op_impl/built-in/ai_core/tbe:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/pyACL/python/site-packages/acl:${PYTHONPATH}",
"LD_LIBRARY_PATH=/usr/lib/${gnu_arch}-linux-gnu/hdf5/serial:/usr/local/Ascend/add-ons/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/fwkacllib/lib64:/usr/local/Ascend/add-ons:/home/HwHiAiUser/Ascend/nnae/latest/fwkacllib/lib64:/usr/local/Ascend/driver/lib64/common/:/usr/local/Ascend/driver/lib64/driver/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe/op_tiling:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/atc/lib64:/usr/local/Ascend/fwkacllib/lib64/:/usr/local/lib/python3.7/site-packages/mindspore/lib/:/usr/local/lib/python3.7/site-packages/torch/lib:/usr/local/lib:/home/clang+llvm/lib/:$LD_LIBRARY_PATH",
"TBE_IMPL_PATH=/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/opp/op_impl/built-in/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe",
"PATH=$PATH:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/fwkacllib/ccec_compiler/bin/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin/:/home/clang+llvm/bin/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/atc/bin",
"ASCEND_OPP_PATH=/home/HwHiAiUser/Ascend/ascend-toolkit/latest/opp",
"LLVM_CONFIG=/home/clang+llvm/bin/llvm-config",
"SOC_VERSION=Ascend910",
"POD_NAME=${DLWS_JOB_ID}",
"JOB_ID=${RANDOM}",
"RANK_SIZE=1",
"ASCEND_GLOBAL_LOG_LEVEL=3",
"ASCEND_GLOBAL_EVENT_ENABLE=0"
]
# 模板渲染
for item in mindspore_envs:
tpl = string.Template(item)
new_item = tpl.safe_substitute(envs)
if "=" in new_item:
key_val = new_item.strip().split("=")
k = key_val[0]
v = key_val[1]
envs_to_add[k] = v
else:
pass
# 1) 更新/pod.env, 创建环境变量
add_env(path, envs_to_add)
# 2) 生成shell训练脚本
pod_cmd = os.environ["DLWS_LAUNCH_CMD"]
npu_info_dir = "/home/" + os.environ["DLWS_USER_NAME"] + "/.npu/" + os.environ["DLWS_JOB_ID"] + "/train.sh"
cmd = 'python /pod/scripts/create_script.py --type mindspore --command "%s" --out %s'% (pod_cmd, npu_info_dir)
os.system(cmd)
os.system("chmod 777 " + npu_info_dir)
# 将环境变量更新写入 root
set_bashrc("root")
## 3) 生成hccl_tf.json
if need_create_hccl() is True:
create_hccl_mindspore()
else:
pass
# 4) 分布式训练任务,环境配置同步
if is_distributed_job() is True and is_ps_pod() is True:
notify()
elif is_distributed_job() is True and is_worker_pod() is True:
wait()
else:
pass
return
# 准备tensorflow环境
# 1) 预备环境变量,并写入/pod.env
# 2) 创建算法需要的训练shell脚本
# 3) 创建算法需要的hccl文件
def handle_tensorflow():
# 1) 预备环境变量,并写入/pod.env
path = "/pod.env"
envs = load_env(path) # 导入平台加载过程中已创建的环境变量
envs_to_add= {}
# 解析GPU/NPU设备ID
if "VISIBLE_IDS" in envs:
envs["VISIBLE_IDS"] = envs["VISIBLE_IDS"].replace("\\","")
envs_to_add["VISIBLE_IDS"] = envs["VISIBLE_IDS"]
else:
pass
if "NPU_IPS" in envs:
envs["NPU_IPS"] = envs["NPU_IPS"].replace("\\","")
envs_to_add["NPU_IPS"] = envs["NPU_IPS"]
else:
pass
## 将/pod.env已有的环境变量
## 与os当前具有的环境变量合并, 放入envs
for k, v in os.environ.items():
if k not in envs:
envs[k] = v
else:
pass
## 第一个设备id
device_id="0"
device_index="0"
if "VISIBLE_IDS" in envs:
devid = envs["VISIBLE_IDS"].split(",")[0].strip()
if len(devid) > 0:
device_id = devid
else:
pass
else:
pass
device_index = device_id
## 设置随机参数
envs["RANDOM"] = get_random_num(6)
envs["osflag"] = get_os_flag()
envs["gnu_arch"] = get_gnu_arch_flag()
# 模板配置
tensorflow_envs = [
"PYTHONPATH=/usr/local/lib/python3.7/site-packages/mindspore/lib:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/opp/op_impl/built-in/ai_core/tbe:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/pyACL/python/site-packages/acl:${PYTHONPATH}",
"LD_LIBRARY_PATH=/usr/lib/${gnu_arch}-linux-gnu/hdf5/serial:/usr/local/Ascend/add-ons/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/fwkacllib/lib64:/usr/local/Ascend/add-ons:/home/HwHiAiUser/Ascend/nnae/latest/fwkacllib/lib64:/usr/local/Ascend/driver/lib64/common/:/usr/local/Ascend/driver/lib64/driver/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe/op_tiling:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/atc/lib64:/usr/local/Ascend/fwkacllib/lib64/:/usr/local/lib/python3.7/site-packages/mindspore/lib/:/usr/local/lib/python3.7/site-packages/torch/lib:/usr/local/lib:/home/clang+llvm/lib/:$LD_LIBRARY_PATH",
"TBE_IMPL_PATH=/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/opp/op_impl/built-in/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe",
"PATH=$PATH:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/${osflag}-linux/fwkacllib/ccec_compiler/bin/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin/:/home/clang+llvm/bin/:/home/HwHiAiUser/Ascend/ascend-toolkit/latest/atc/bin",
"ASCEND_OPP_PATH=/home/HwHiAiUser/Ascend/ascend-toolkit/latest/opp",
"LLVM_CONFIG=/home/clang+llvm/bin/llvm-config",
"SOC_VERSION=Ascend910",
"POD_NAME=${DLWS_JOB_ID}",
"JOB_ID=${RANDOM}",
"RANK_SIZE=1",
"ASCEND_GLOBAL_LOG_LEVEL=3",
"ASCEND_GLOBAL_EVENT_ENABLE=0"
]
envs_to_add["DEVICE_ID"] = device_id
envs_to_add["DEVICE_INDEX"] = device_index
# 渲染模板
for item in tensorflow_envs:
tpl = string.Template(item)
new_item = tpl.safe_substitute(envs)
if "=" in new_item:
key_val = new_item.strip().split("=")
k = key_val[0]
v = key_val[1]
envs_to_add[k] = v
else:
pass
# 1) 更新环境变量
add_env(path, envs_to_add)
## 2) 生成shell脚本
pod_cmd = os.environ["DLWS_LAUNCH_CMD"]
npu_info_dir = "/home/" + os.environ["DLWS_USER_NAME"] + "/.npu/" + os.environ["DLWS_JOB_ID"] + "/train.sh"
cmd = 'python /pod/scripts/create_script.py --type tensorflow --command "%s" --out %s'% (pod_cmd, npu_info_dir)
print(cmd, "==========================")
os.system(cmd)
os.system("chmod 777 " + npu_info_dir)
# 更新用户bash脚本
set_bashrc("root")
# 3) 生成hccl_tf.json
if need_create_hccl() is True:
create_hccl_tensorflow()
else:
pass
# 4) 分布式训练任务,环境配置同步
if is_distributed_job() is True and is_ps_pod() is True:
notify()
elif is_distributed_job() is True and is_worker_pod() is True:
wait()
else:
pass
return
# 是否分布式训练任务
def is_distributed_job():
if "DLWS_NUM_PS" in os.environ:
dlws_num_ps = os.environ["DLWS_NUM_PS"].strip().lower()
if len(dlws_num_ps) > 0 and int(dlws_num_ps) >0:
print("is_distributed_job return true")
return True
return False
# 是否master节点
def is_ps_pod():
if "DLWS_ROLE_NAME" in os.environ:
dlws_role_name = os.environ["DLWS_ROLE_NAME"].strip().lower()
## Ps表示多机多卡ps pod
if dlws_role_name == "ps":
return True
return False
# 是否worker节点
def is_worker_pod():
if "DLWS_ROLE_NAME" in os.environ:
dlws_role_name = os.environ["DLWS_ROLE_NAME"].strip().lower()
## Ps表示多机多卡ps pod
if dlws_role_name == "worker":
return True
return False
# 分布式训练任务
# ps节点在环境预备结束后,创建setup_environment_done文件
# 用作环境准备完成的标识
def notify():
# 单机训练任务,只有一个POD不需要做协同
if is_distributed_job() is False:
return
setup_environment_done = "/home/" + os.environ["DLWS_USER_NAME"] + "/.npu/" + os.environ["DLWS_JOB_ID"] + "/setup_environment_done"
# 多机多卡训练,ps节点预备环境
if not os.path.exists(setup_environment_done):
open(setup_environment_done, 'a').close()
return
# 分布式训练任务
# worker节点通过检查setup_environment_done文件
# 来判断环境准备是否结束
def wait():
| |
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD1A"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD1A"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.fail(" Domain admin is able to deploy a VM for regular user in parent domain in a shared network with scope=Domain and subdomain access")
except Exception as e:
self.debug("When a user from parent domain deploys a VM in a shared network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for regular user in parent domain in a shared network with scope=Domain and subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_parentdomainadminuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for admin user in parent domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as an admin user in parentdomain of a domain that has shared network with subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD1"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD1"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.fail("Domain admin is able to deploy a VM for admin user in parent domain in a shared network with scope=Domain and subdomain access")
except Exception as e:
self.debug("When an admin user from parent domain deploys a VM in a shared network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for admin user in parent domain in a shared network with scope=Domain and subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_ROOTuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for user in ROOT domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in ROOT domain
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmROOTA"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmROOTA"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail("Domain admin is able to deploy a VM for user in ROOT domain in a shared network with scope=Domain and subdomain access")
except Exception as e:
self.debug("When a user from ROOT domain deploys a VM in a shared network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for user in ROOT domain in a shared network with scope=Domain and subdomain access")
## Test cases relating to deploying Virtual Machine as Domain admin for other users in shared network with scope=account
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_domainuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for user in the same domain but belonging to a different account in a shared network with scope=account
"""
# Deploy VM as user in a domain under the same domain but different account from the acount that has a shared network with scope=account
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111B"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111B"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111b.name,
domainid=self.account_d111b.domainid
)
self.fail("Domain admin is able to deploy a VM for user in the same domain but belonging to a different account in a shared network with scope=account")
except Exception as e:
self.debug("When a user from same domain but different account deploys a VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for user in the same domain but belonging to a different account in a shared network with scope=account")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_domainadminuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for an admin user in the same domain but belonging to a different account in a shared network with scope=account
"""
# Deploy VM as admin user for a domain that has an account with shared network with scope=account
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.fail("Domain admin is able to deploy a VM for user in the same domain but belonging to a different account in a shared network with scope=account")
except Exception as e:
self.debug("When a user from same domain but different account deploys a VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for user in the same domain but belonging to a different account in a shared network with scope=account")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_user(self):
"""
Valiate that Domain admin is able to deploy a VM for an regular user in a shared network with scope=account
"""
# Deploy VM as account with shared network with scope=account
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111A"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111A"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111a.name,
domainid=self.account_d111a.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d111a.name and vm.domainid == self.account_d111a.domainid,
True,
"Domain admin is not able to deploy a VM for an regular user in a shared network with scope=account")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_differentdomain(self):
"""
Valiate that Domain admin is able NOT able to deploy a VM for an regular user from a differnt domain in a shared network with scope=account
"""
# Deploy VM as an admin user in a subdomain under ROOT
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD2A"]["name"] + "-shared-scope-account-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD2A"]["displayname"] + "-shared-scope-account-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail("Domain admin is able able to deploy a VM for an regular user from a differnt domain in a shared network with scope=account")
except Exception as e:
self.debug("When a user from different domain deploys a VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for an regular user from a differnt domain in a shared network with scope=account")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_ROOTuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for an regular user in ROOT domain in a shared network with scope=account
"""
# Deploy VM as user in ROOT domain
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmROOTA"]["name"] + "-shared-scope-account-domain-admin"
self.vmdata["displayname"] = self.acldata["vmROOTA"]["displayname"] + "-shared-scope-account-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail("Domain admin is able to deploy a VM for an regular user in ROOT domain in a shared network with scope=account")
except Exception as e:
self.debug("When a user from ROOT domain deploys a VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail("Error message validation failed when Domain admin tries to deploy a VM for an regular user in ROOT domain in a shared network with scope=account")
## Test cases relating to deploying Virtual Machine as Regular user for other users in shared network with scope=all
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_regularuser_scope_all_anotherusersamedomain(self):
"""
Valiate that regular user is able NOT able to deploy a VM for another user in the same domain in a shared network with scope=all
"""
# Deploy VM for a user in a domain under ROOT as admin
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
self.vmdata["name"] = self.acldata["vmD11A"]["name"] + "-shared-scope-all-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD11A"]["displayname"] + "-shared-scope-all-domain-admin"
try:
vm_d1a = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d12a.name,
domainid=self.account_d12a.domainid
)
self.fail("Regular user is allowed to deploy a VM for another user in the same domain in a shared network | |
"order_id" field but a maker/taker_order_id field
if 'maker_order_id' in message:
if message['maker_order_id'] == self.currentOrderId:
print("GDAX - Current order msg: %s" % message)
if message['type'] == 'match' and 'size' in message:
# To preserve buy price calculation integrity,
# matched order must be processed once (but it appears both in user and full channels)
# If this matched message is not processed yet
if self.matchOrderProcessedSequenceId != message['sequence']:
print("GDAX - on_message: current order has been matched")
newFillAverageInFiat = (self.currentOrderAverageFilledPriceInFiat*self.currentOrderFilledSizeInCrypto + float(message['size']) * float(message['price'])) / (self.currentOrderFilledSizeInCrypto + float(message['size']))
self.currentOrderFilledSizeInCrypto += float(message['size'])
print("GDAX - on_message: average order fill price updated from %s to %s" % (self.currentOrderAverageFilledPriceInFiat, newFillAverageInFiat))
print("GDAX - on_message: current order total fill quantity updated to %s" % self.currentOrderFilledSizeInCrypto)
self.currentOrderAverageFilledPriceInFiat = newFillAverageInFiat
self.matchOrderProcessedSequenceId = message['sequence']
self.currentOrderState = "MATCHED"
# Order book has been updated, retrieve best bid and ask
self.liveBestBidPrice = self.get_bid()
# print("Bid %s" % self.liveBestBidPrice)
self.liveBestAskPrice = self.get_ask()
# print("Ask %s" % self.liveBestAskPrice)
self.webSocketLock.release()
def on_close(self):
print("GDAX - WebSocket connexion closed (callback)")
self.webSocketIsOpened = False
if self.isRunning: # If we are not exiting app
if self.IsConnectedAndOperational != "Requested" and self.IsConnectedAndOperational != "Ongoing": # If we are not re-initializing connexion (like settings apply)
print("GDAX - Unexpected close of websocket. Trying to restart.")
while self.isRunning and not self.webSocketIsOpened:
print("GDAX - Restarting Websocket in 10 seconds...")
time.sleep(10)
self.startWebSocketFeed()
print("GDAX - End of on_close()")
def GDAX_GetLiveBestBidPrice(self):
self.webSocketLock.acquire()
liveBestBidPriceToReturn = self.liveBestBidPrice
self.webSocketLock.release()
return liveBestBidPriceToReturn
def GDAX_GetLiveBestAskPrice(self):
self.webSocketLock.acquire()
liveBestAskPriceToReturn = self.liveBestAskPrice
self.webSocketLock.release()
return liveBestAskPriceToReturn
def updateRealTimePriceInBackground(self):
while self.isRunning:
# Attempt a GDAX Initialization if requested
if self.IsConnectedAndOperational == "Requested":
self.IsConnectedAndOperational = "Ongoing"
self.PerformConnexionInitializationAttempt()
time.sleep(1) # Don't poll GDAX API too much
self.backgroundOperationsCounter = self.backgroundOperationsCounter + 1
# Get Middle Market Price
# Order book level 1 : Just the highest bid and lowest sell proposal
result = ''
try:
result = self.clientPublic.get_product_order_book(self.productStr, 1)
self.tickBestBidPrice = float(result['bids'][0][0])
self.tickBestAskPrice = float(result['asks'][0][0])
self.midMarketPrice = (self.tickBestBidPrice + self.tickBestAskPrice) / 2
# DEBUG
# print("GDAX - Highest Bid: %s" % self.tickBestBidPrice)
# print("GDAX - Lowest Ask: %s" % self.tickBestAskPrice)
self.PriceSpread = self.tickBestBidPrice - self.tickBestAskPrice
# print("GDAX - MiddleMarket price: %s" % self.tickBestBidPrice)
self.theUIGraph.UIGR_updateConnexionText("Price data received from Coinbase Pro server")
# Refresh account balances
# Only do it if GDAX controller is OK in authenticated mode
if self.IsConnectedAndOperational == "True":
if self.backgroundOperationsCounter % 20 == 0 or self.requestAccountsBalanceUpdate:
self.requestAccountsBalanceUpdate = False
if self.IsConnectedAndOperational == "True":
self.refreshAccounts()
except BaseException as e:
print("GDAX - Error retrieving level 1 order book or account data")
print("GDAX - Exception : " + str(e))
print(result)
self.requestAccountsBalanceUpdate = False
# Get current Orders
for x in range(0, 5):
if not self.requestAccountsBalanceUpdate:
time.sleep(0.1)
self.theUIGraph.UIGR_resetConnexionText()
for x in range(0, 15):
if not self.requestAccountsBalanceUpdate:
time.sleep(0.1)
def GDAX_closeBackgroundOperations(self):
self.isRunning = False
if (self.webSocketIsOpened == True):
print("GDAX - Closing Websocket...")
self.close()
def GDAX_GetRealTimePriceInEUR(self):
return self.midMarketPrice
def GDAX_GetCurrentLimitOrderState(self):
self.webSocketLock.acquire()
currentState = self.currentOrderState
if (currentState == "FILLED"):
self.currentOrderState = "NONE"
self.webSocketLock.release()
return currentState
def GDAX_GetAveragePriceInFiatAndSizeFilledInCrypto(self):
print("GDAX - GDAX_GetAveragePriceInFiatAndSizeFilledInCrypto : AverageFilledPrice = %s, currentOrderFilledSizeInCrypo = %s" % (self.currentOrderAverageFilledPriceInFiat, self.currentOrderFilledSizeInCrypto))
return [self.currentOrderAverageFilledPriceInFiat, self.currentOrderFilledSizeInCrypto]
def GDAX_PlaceLimitBuyOrder(self, amountToBuyInCrypto, buyPriceInFiat):
self.webSocketLock.acquire()
if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):
print("GDAX - GDAX_PlaceLimitBuyOrder")
# First, cancel ongoing order if any
if (self.currentOrderState != "NONE"):
self.INTERNAL_CancelOngoingLimitOrder()
# Send Limit order
amountToBuyInCrypto = round(amountToBuyInCrypto, 8)
# Don't use round because order could be placed on the other side of the spread -> rejected
# Prix exprimé en BTC, arrondi variable
if (self.productFiatStr == "BTC"):
if (self.productCryptoStr == "LTC"):
buyPriceInFiat = math.floor(buyPriceInFiat*1000000)/1000000 # Floor à 0.000001
else:
buyPriceInFiat = math.floor(buyPriceInFiat*100000)/100000 # Floor à 0.00001
else: # Prix exprimé en Fiat, arrondi à 0.01
buyPriceInFiat = math.floor(buyPriceInFiat*100)/100
buyRequestReturn = self.clientAuth.buy(price=str(buyPriceInFiat), size=str(amountToBuyInCrypto), product_id=self.productStr, order_type='limit', post_only=True) # with Post Only
print("GDAX - Actual buy sent with LIMIT order set to %s. Amount is %s Crypto" % (buyPriceInFiat, amountToBuyInCrypto))
print("GDAX - Limit order placing sent. Request return is: %s" % buyRequestReturn)
if ('id' in buyRequestReturn):
if (not 'reject_reason' in buyRequestReturn):
self.currentOrderId = buyRequestReturn['id']
self.currentOrderState = "SUBMITTED"
self.currentOrderInitialSizeInCrypto = amountToBuyInCrypto
self.currentOrderFilledSizeInCrypto = 0
self.currentOrderAverageFilledPriceInFiat = 0
print("GDAX - Limit order state set to SUBMITTED")
self.webSocketLock.release()
return True
else:
print("GDAX - Buy limit order has been interpreted as rejected. Reason: %s" % buyRequestReturn['reject_reason'])
self.webSocketLock.release()
return False
else:
print("GDAX - Buy limit order has been interpreted as rejected")
self.webSocketLock.release()
return False
else:
# Simulation mode: simulate immediate order fill
self.currentOrderId = -1
self.currentOrderFilledSizeInCrypto = float(amountToBuyInCrypto)
self.currentOrderAverageFilledPriceInFiat = float(buyPriceInFiat)
print("GDAX - Limit buy simulated, buy price: %s, amountToBuyInCrypto: %s" % (round(float(buyPriceInFiat), 2), float(amountToBuyInCrypto)))
self.currentOrderState = "FILLED"
self.webSocketLock.release()
return True
def GDAX_PlaceLimitSellOrder(self, amountToSellInCrypto, sellPriceInFiat):
if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):
self.webSocketLock.acquire()
# First, cancel ongoing order if any
if (self.currentOrderState != "NONE"):
self.INTERNAL_CancelOngoingLimitOrder()
# Send Limit order
amountToSellInCrypto = round(amountToSellInCrypto, 8)
# Don't use round because order could be placed on the other side of the spread -> rejected
# Prix exprimé en BTC, arrondi variable
if (self.productFiatStr == "BTC"):
if (self.productCryptoStr == "LTC"):
sellPriceInFiat = math.floor(sellPriceInFiat*1000000)/1000000 # Floor à 0.000001
else:
sellPriceInFiat = math.floor(sellPriceInFiat*100000)/100000 # Floor à 0.00001
else: # Prix exprimé en Fiat, arrondi à 0.01
sellPriceInFiat = math.floor(sellPriceInFiat*100)/100
sellRequestReturn = self.clientAuth.sell(price=str(sellPriceInFiat), size=str(amountToSellInCrypto), product_id=self.productStr, order_type='limit', post_only=True) # with Post Only
print("GDAX - Actual sell sent with LIMIT order set to %s. Amount is %s Crypto" % (sellPriceInFiat, amountToSellInCrypto))
print("GDAX - Limit order placing sent. Request return is: %s" % sellRequestReturn)
if ('id' in sellRequestReturn):
self.currentOrderId = sellRequestReturn['id']
self.currentOrderState = "SUBMITTED"
self.currentOrderInitialSizeInCrypto = amountToSellInCrypto
self.currentOrderFilledSizeInCrypto = 0
self.currentOrderAverageFilledPriceInFiat = 0
self.webSocketLock.release()
return True
else:
print("GDAX - Sell limit order has been interpreted as rejected")
self.webSocketLock.release()
return False
else:
# Simulation mode: simulate immediate order fill
self.currentOrderFilledSizeInCrypto = amountToSellInCrypto
self.currentOrderAverageFilledPriceInFiat = sellPriceInFiat
self.currentOrderState = "FILLED"
self.webSocketLock.release()
return True
# Include thread safe protection: shall be called from outside
def GDAX_CancelOngoingLimitOrder(self):
self.webSocketLock.acquire()
if (self.currentOrderId != 0):
self.currentOrderId = 0 # So that websocket won't get the cancel notification
self.currentOrderState = "NONE"
self.currentOrderInitialSizeInCrypto = 0
self.currentOrderFilledSizeInCrypto = 0
self.currentOrderAverageFilledPriceInFiat = 0
cancelAllReturn = self.clientAuth.cancel_all(self.productStr)
print("GDAX - GDAX_CancelOngoingLimitOrder: Ongoing order canceled. Request return is: %s" % cancelAllReturn)
else:
print("GDAX - GDAX_CancelOngoingLimitOrder: No order to cancel! Just filled?")
self.webSocketLock.release()
# Does not include thread safe protection: shall not be called from outside
def INTERNAL_CancelOngoingLimitOrder(self):
if (self.currentOrderId != 0):
self.currentOrderId = 0 # So that websocket won't get the cancel notification
self.currentOrderState = "NONE"
self.currentOrderInitialSizeInCrypto = 0
self.currentOrderFilledSizeInCrypto = 0
self.currentOrderAverageFilledPriceInFiat = 0
cancelAllReturn = self.clientAuth.cancel_all(self.productStr)
print("GDAX - INTERNAL_CancelOngoingLimitOrder: Ongoing order canceled. Request return is: %s" % cancelAllReturn)
else:
print("GDAX - INTERNAL_CancelOngoingLimitOrder: No order to cancel! Just filled?")
def GDAX_SendBuyOrder(self, amountToBuyInBTC):
if theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET:
if theConfig.CONFIG_ENABLE_REAL_TRANSACTIONS:
# Prepare the right amount to buy precision. Smallest GDAX unit is 0.00000001
amountToBuyInBTC = round(amountToBuyInBTC, 8)
# Send Market order
buyRequestReturn = self.clientAuth.buy(size=amountToBuyInBTC, product_id=self.productStr, order_type='market')
print("GDAX - Actual buy sent with MARKET order. Amount is %s BTC" % amountToBuyInBTC)
print("GDAX - Buy Request return is : \n %s \nGDAX - End of Request Return" % buyRequestReturn)
self.requestAccountsBalanceUpdate = True
# Check if order was successful or not depending on existence of an order ID in the request response
if 'id' in buyRequestReturn:
print("GDAX - Buy order has been interpreted as successful")
return True
else:
print("GDAX - Buy order has been interpreted as failed")
return False
def GDAX_SendSellOrder(self, amountToSellInBTC):
if theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET:
if theConfig.CONFIG_ENABLE_REAL_TRANSACTIONS:
# Prepare the right amount to sell precision. Smallest GDAX unit is 0.00000001
amountToSellInBTC = round(amountToSellInBTC, 8)
# Send Market order
sellRequestReturn = self.clientAuth.sell(size=amountToSellInBTC, product_id=self.productStr, order_type='market')
print("Actual sell sent with MARKET order. Amount is %s" % amountToSellInBTC)
print("GDAX - Sell Request return is : \n %s \nGDAX - | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import pandas as pd
import pytest
import pytz
from eeweather import (
ISDStation,
get_isd_station_metadata,
get_isd_filenames,
get_gsod_filenames,
get_isd_file_metadata,
fetch_isd_raw_temp_data,
fetch_isd_hourly_temp_data,
fetch_isd_daily_temp_data,
fetch_gsod_raw_temp_data,
fetch_gsod_daily_temp_data,
fetch_tmy3_hourly_temp_data,
fetch_cz2010_hourly_temp_data,
get_isd_hourly_temp_data_cache_key,
get_isd_daily_temp_data_cache_key,
get_gsod_daily_temp_data_cache_key,
get_tmy3_hourly_temp_data_cache_key,
get_cz2010_hourly_temp_data_cache_key,
cached_isd_hourly_temp_data_is_expired,
cached_isd_daily_temp_data_is_expired,
cached_gsod_daily_temp_data_is_expired,
validate_isd_hourly_temp_data_cache,
validate_isd_daily_temp_data_cache,
validate_gsod_daily_temp_data_cache,
validate_tmy3_hourly_temp_data_cache,
validate_cz2010_hourly_temp_data_cache,
serialize_isd_hourly_temp_data,
serialize_isd_daily_temp_data,
serialize_gsod_daily_temp_data,
serialize_tmy3_hourly_temp_data,
serialize_cz2010_hourly_temp_data,
deserialize_isd_hourly_temp_data,
deserialize_isd_daily_temp_data,
deserialize_gsod_daily_temp_data,
deserialize_tmy3_hourly_temp_data,
deserialize_cz2010_hourly_temp_data,
read_isd_hourly_temp_data_from_cache,
read_isd_daily_temp_data_from_cache,
read_gsod_daily_temp_data_from_cache,
read_tmy3_hourly_temp_data_from_cache,
read_cz2010_hourly_temp_data_from_cache,
write_isd_hourly_temp_data_to_cache,
write_isd_daily_temp_data_to_cache,
write_gsod_daily_temp_data_to_cache,
write_tmy3_hourly_temp_data_to_cache,
write_cz2010_hourly_temp_data_to_cache,
destroy_cached_isd_hourly_temp_data,
destroy_cached_isd_daily_temp_data,
destroy_cached_gsod_daily_temp_data,
destroy_cached_tmy3_hourly_temp_data,
destroy_cached_cz2010_hourly_temp_data,
load_isd_hourly_temp_data_cached_proxy,
load_isd_daily_temp_data_cached_proxy,
load_gsod_daily_temp_data_cached_proxy,
load_tmy3_hourly_temp_data_cached_proxy,
load_cz2010_hourly_temp_data_cached_proxy,
load_isd_hourly_temp_data,
load_isd_daily_temp_data,
load_gsod_daily_temp_data,
load_tmy3_hourly_temp_data,
load_cz2010_hourly_temp_data,
load_cached_isd_hourly_temp_data,
load_cached_isd_daily_temp_data,
load_cached_gsod_daily_temp_data,
load_cached_tmy3_hourly_temp_data,
load_cached_cz2010_hourly_temp_data,
)
from eeweather.exceptions import (
UnrecognizedUSAFIDError,
ISDDataNotAvailableError,
GSODDataNotAvailableError,
TMY3DataNotAvailableError,
CZ2010DataNotAvailableError,
NonUTCTimezoneInfoError,
)
from eeweather.testing import (
MockNOAAFTPConnectionProxy,
MockKeyValueStoreProxy,
mock_request_text_tmy3,
mock_request_text_cz2010,
)
@pytest.fixture
def monkeypatch_noaa_ftp(monkeypatch):
monkeypatch.setattr(
"eeweather.connections.noaa_ftp_connection_proxy", MockNOAAFTPConnectionProxy()
)
@pytest.fixture
def monkeypatch_tmy3_request(monkeypatch):
monkeypatch.setattr("eeweather.mockable.request_text", mock_request_text_tmy3)
@pytest.fixture
def monkeypatch_cz2010_request(monkeypatch):
monkeypatch.setattr("eeweather.mockable.request_text", mock_request_text_cz2010)
@pytest.fixture
def monkeypatch_key_value_store(monkeypatch):
key_value_store_proxy = MockKeyValueStoreProxy()
monkeypatch.setattr(
"eeweather.connections.key_value_store_proxy", key_value_store_proxy
)
return key_value_store_proxy.get_store()
def test_get_isd_station_metadata():
assert get_isd_station_metadata("722874") == {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_08",
"elevation": "+0054.6",
"icao_code": "KCQT",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
"latitude": "+34.024",
"longitude": "-118.291",
"name": "DOWNTOWN L.A./USC CAMPUS",
"quality": "high",
"recent_wban_id": "93134",
"state": "CA",
"usaf_id": "722874",
"wban_ids": "93134",
}
def test_isd_station_no_load_metadata():
station = ISDStation("722880", load_metadata=False)
assert station.usaf_id == "722880"
assert station.iecc_climate_zone is None
assert station.iecc_moisture_regime is None
assert station.ba_climate_zone is None
assert station.ca_climate_zone is None
assert station.elevation is None
assert station.latitude is None
assert station.longitude is None
assert station.coords is None
assert station.name is None
assert station.quality is None
assert station.wban_ids is None
assert station.recent_wban_id is None
assert station.climate_zones == {}
assert str(station) == "722880"
assert repr(station) == "ISDStation('722880')"
def test_isd_station_no_load_metadata_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
station = ISDStation("FAKE", load_metadata=False)
def test_isd_station_with_load_metadata():
station = ISDStation("722880", load_metadata=True)
assert station.usaf_id == "722880"
assert station.iecc_climate_zone == "3"
assert station.iecc_moisture_regime == "B"
assert station.ba_climate_zone == "Hot-Dry"
assert station.ca_climate_zone == "CA_09"
assert station.elevation == 236.2
assert station.icao_code == "KBUR"
assert station.latitude == 34.201
assert station.longitude == -118.358
assert station.coords == (34.201, -118.358)
assert station.name == "<NAME>"
assert station.quality == "high"
assert station.wban_ids == ["23152", "99999"]
assert station.recent_wban_id == "23152"
assert station.climate_zones == {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_09",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
}
def test_isd_station_json():
station = ISDStation("722880", load_metadata=True)
assert station.json() == {
"elevation": 236.2,
"icao_code": "KBUR",
"latitude": 34.201,
"longitude": -118.358,
"name": "<NAME>",
"quality": "high",
"recent_wban_id": "23152",
"wban_ids": ["23152", "99999"],
"climate_zones": {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_09",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
},
}
def test_isd_station_unrecognized_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError):
station = ISDStation("FAKE", load_metadata=True)
def test_get_isd_filenames_bad_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_isd_filenames("000000", 2007)
assert excinfo.value.value == "000000"
def test_get_isd_filenames_single_year(snapshot):
filenames = get_isd_filenames("722860", 2007)
snapshot.assert_match(filenames, "filenames")
def test_get_isd_filenames_multiple_year(snapshot):
filenames = get_isd_filenames("722860")
snapshot.assert_match(filenames, "filenames")
def test_get_isd_filenames_future_year():
filenames = get_isd_filenames("722860", 2050)
assert filenames == ["/pub/data/noaa/2050/722860-23119-2050.gz"]
def test_get_isd_filenames_with_host():
filenames = get_isd_filenames("722860", 2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/2017/722860-23119-2017.gz"
]
def test_isd_station_get_isd_filenames(snapshot):
station = ISDStation("722860")
filenames = station.get_isd_filenames()
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_isd_filenames_with_year(snapshot):
station = ISDStation("722860")
filenames = station.get_isd_filenames(2007)
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_isd_filenames_with_host():
station = ISDStation("722860")
filenames = station.get_isd_filenames(2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/2017/722860-23119-2017.gz"
]
def test_get_gsod_filenames_bad_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_gsod_filenames("000000", 2007)
assert excinfo.value.value == "000000"
def test_get_gsod_filenames_single_year(snapshot):
filenames = get_gsod_filenames("722860", 2007)
snapshot.assert_match(filenames, "filenames")
def test_get_gsod_filenames_multiple_year(snapshot):
filenames = get_gsod_filenames("722860")
snapshot.assert_match(filenames, "filenames")
def test_get_gsod_filenames_future_year():
filenames = get_gsod_filenames("722860", 2050)
assert filenames == ["/pub/data/gsod/2050/722860-23119-2050.op.gz"]
def test_get_gsod_filenames_with_host():
filenames = get_gsod_filenames("722860", 2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/gsod/2017/722860-23119-2017.op.gz"
]
def test_isd_station_get_gsod_filenames(snapshot):
station = ISDStation("722860")
filenames = station.get_gsod_filenames()
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_gsod_filenames_with_year(snapshot):
station = ISDStation("722860")
filenames = station.get_gsod_filenames(2007)
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_gsod_filenames_with_host():
station = ISDStation("722860")
filenames = station.get_gsod_filenames(2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/gsod/2017/722860-23119-2017.op.gz"
]
def test_get_isd_file_metadata():
assert get_isd_file_metadata("722874") == [
{"usaf_id": "722874", "wban_id": "93134", "year": "2006"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2007"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2008"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2009"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2010"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2011"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2012"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2013"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2014"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2015"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2016"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2017"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2018"},
]
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_isd_file_metadata("000000")
assert excinfo.value.value == "000000"
def test_isd_station_get_isd_file_metadata():
station = ISDStation("722874")
assert station.get_isd_file_metadata() == [
{"usaf_id": "722874", "wban_id": "93134", "year": "2006"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2007"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2008"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2009"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2010"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2011"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2012"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2013"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2014"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2015"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2016"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2017"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2018"},
]
# fetch raw
def test_fetch_isd_raw_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_raw_temp_data("722874", 2007)
assert round(data.sum()) == 185945
assert data.shape == (11094,)
def test_fetch_gsod_raw_temp_data(monkeypatch_noaa_ftp):
data = fetch_gsod_raw_temp_data("722874", 2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
# station fetch raw
def test_isd_station_fetch_isd_raw_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_raw_temp_data(2007)
assert round(data.sum()) == 185945
assert data.shape == (11094,)
def test_isd_station_fetch_gsod_raw_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_gsod_raw_temp_data(2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
# fetch raw invalid station
def test_fetch_isd_raw_temp_data_invalid_station():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_raw_temp_data("INVALID", 2007)
def test_fetch_gsod_raw_temp_data_invalid_station():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_gsod_raw_temp_data("INVALID", 2007)
# fetch raw invalid year
def test_fetch_isd_raw_temp_data_invalid_year(monkeypatch_noaa_ftp):
with pytest.raises(ISDDataNotAvailableError):
fetch_isd_raw_temp_data("722874", 1800)
def test_fetch_gsod_raw_temp_data_invalid_year(monkeypatch_noaa_ftp):
with pytest.raises(GSODDataNotAvailableError):
fetch_gsod_raw_temp_data("722874", 1800)
# fetch file full of nans
def test_isd_station_fetch_isd_raw_temp_data_all_nan(monkeypatch_noaa_ftp):
station = ISDStation("994035")
data = station.fetch_isd_raw_temp_data(2013)
assert round(data.sum()) == 0
assert data.shape == (8611,)
# fetch
def test_fetch_isd_hourly_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_hourly_temp_data("722874", 2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
def test_fetch_isd_daily_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_daily_temp_data("722874", 2007)
assert data.sum() == 6510.002260821784
assert data.shape == (365,)
def test_fetch_gsod_daily_temp_data(monkeypatch_noaa_ftp):
data = fetch_gsod_daily_temp_data("722874", 2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
def test_fetch_tmy3_hourly_temp_data(monkeypatch_tmy3_request):
data = fetch_tmy3_hourly_temp_data("722880")
assert data.sum() == 156194.3
assert data.shape == (8760,)
def test_fetch_cz2010_hourly_temp_data(monkeypatch_cz2010_request):
data = fetch_cz2010_hourly_temp_data("722880")
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
# station fetch
def test_isd_station_fetch_isd_hourly_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_hourly_temp_data(2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
def test_isd_station_fetch_isd_daily_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_daily_temp_data(2007)
assert data.sum() == 6510.002260821784
assert data.shape == (365,)
def test_isd_station_fetch_gsod_daily_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_gsod_daily_temp_data(2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
def test_tmy3_station_hourly_temp_data(monkeypatch_tmy3_request):
station = ISDStation("722880")
data = station.fetch_tmy3_hourly_temp_data()
assert data.sum() == 156194.3
assert data.shape == (8760,)
def test_cz2010_station_hourly_temp_data(monkeypatch_cz2010_request):
station = ISDStation("722880")
data = station.fetch_cz2010_hourly_temp_data()
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
# fetch invalid station
def test_fetch_isd_hourly_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_hourly_temp_data("INVALID", 2007)
def test_fetch_isd_daily_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_daily_temp_data("INVALID", 2007)
def test_fetch_gsod_daily_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_gsod_daily_temp_data("INVALID", 2007)
def test_fetch_tmy3_hourly_temp_data_invalid():
with pytest.raises(TMY3DataNotAvailableError):
fetch_tmy3_hourly_temp_data("INVALID")
def test_fetch_cz2010_hourly_temp_data_invalid():
with pytest.raises(CZ2010DataNotAvailableError):
fetch_cz2010_hourly_temp_data("INVALID")
def test_fetch_tmy3_hourly_temp_data_not_in_tmy3_list(monkeypatch_noaa_ftp):
data = fetch_isd_hourly_temp_data("722874", 2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
with pytest.raises(TMY3DataNotAvailableError):
fetch_tmy3_hourly_temp_data("722874")
def test_fetch_cz2010_hourly_temp_data_not_in_cz2010_list(monkeypatch_cz2010_request):
data = fetch_cz2010_hourly_temp_data("722880")
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
with pytest.raises(CZ2010DataNotAvailableError):
fetch_cz2010_hourly_temp_data("725340")
# get cache key
def test_get_isd_hourly_temp_data_cache_key():
assert (
get_isd_hourly_temp_data_cache_key("722874", 2007) == "isd-hourly-722874-2007"
)
def test_get_isd_daily_temp_data_cache_key():
assert get_isd_daily_temp_data_cache_key("722874", 2007) == "isd-daily-722874-2007"
def test_get_gsod_daily_temp_data_cache_key():
assert (
get_gsod_daily_temp_data_cache_key("722874", 2007) == "gsod-daily-722874-2007"
)
def test_get_tmy3_hourly_temp_data_cache_key():
assert get_tmy3_hourly_temp_data_cache_key("722880") == "tmy3-hourly-722880"
def test_get_cz2010_hourly_temp_data_cache_key():
assert get_cz2010_hourly_temp_data_cache_key("722880") == "cz2010-hourly-722880"
# station get cache key
def test_isd_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_isd_hourly_temp_data_cache_key(2007) == "isd-hourly-722874-2007"
def test_isd_station_get_isd_daily_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_isd_daily_temp_data_cache_key(2007) == "isd-daily-722874-2007"
def test_isd_station_get_gsod_daily_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_gsod_daily_temp_data_cache_key(2007) == "gsod-daily-722874-2007"
def test_tmy3_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722880")
assert station.get_tmy3_hourly_temp_data_cache_key() == "tmy3-hourly-722880"
def test_cz2010_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722880")
assert station.get_cz2010_hourly_temp_data_cache_key() == "cz2010-hourly-722880"
# cache expired empty
def test_cached_isd_hourly_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is True
def test_cached_isd_daily_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is True
def test_cached_gsod_daily_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is True
# station cache expired empty
def test_isd_station_cached_isd_hourly_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_isd_hourly_temp_data_is_expired(2007) is True
def test_isd_station_cached_isd_daily_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_isd_daily_temp_data_is_expired(2007) is True
def test_isd_station_cached_gsod_daily_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_gsod_daily_temp_data_is_expired(2007) is True
# cache expired false
def test_cached_isd_hourly_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is False
def test_cached_isd_daily_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is False
def test_cached_gsod_daily_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is False
# cache expired true
def test_cached_isd_hourly_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_hourly_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is True
def test_cached_isd_daily_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
# | |
import numpy as np
>>> from vectorbt.signals.nb import generate_stop_ex_nb
>>> entries = np.asarray([False, True, False, False, False])[:, None]
>>> ts = np.asarray([1, 2, 3, 2, 1])[:, None]
>>> generate_stop_ex_nb(entries, ts, -0.1, True, 1, True, True)
array([[False],
[False],
[False],
[ True],
[False]])
>>> generate_stop_ex_nb(entries, ts, 0.1, False, 1, True, True)
array([[False],
[False],
[ True],
[False],
[False]])
```
"""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_ex_nb(
entries,
wait,
until_next,
skip_until_exit,
pick_first,
stop_choice_nb,
ts,
stop,
trailing,
wait,
pick_first,
temp_idx_arr,
flex_2d
)
@njit
def generate_stop_enex_nb(entries: tp.Array2d,
ts: tp.Array,
stop: tp.MaybeArray[float],
trailing: tp.MaybeArray[bool],
entry_wait: int,
exit_wait: int,
pick_first: bool,
flex_2d: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Generate one after another using `generate_enex_nb` and `stop_choice_nb`.
Returns two arrays: new entries and exits.
!!! note
Has the same logic as calling `generate_stop_ex_nb` with `skip_until_exit=True`, but
removes all entries that come before the next exit."""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_enex_nb(
entries.shape,
entry_wait,
exit_wait,
True,
pick_first,
first_choice_nb, (entries,),
stop_choice_nb, (ts, stop, trailing, exit_wait, pick_first, temp_idx_arr, flex_2d)
)
@njit(cache=True)
def ohlc_stop_choice_nb(from_i: int,
to_i: int,
col: int,
open: tp.ArrayLike,
high: tp.ArrayLike,
low: tp.ArrayLike,
close: tp.ArrayLike,
stop_price_out: tp.Array2d,
stop_type_out: tp.Array2d,
sl_stop: tp.MaybeArray[float],
sl_trail: tp.MaybeArray[bool],
tp_stop: tp.MaybeArray[float],
reverse: tp.MaybeArray[bool],
is_open_safe: bool,
wait: int,
pick_first: bool,
temp_idx_arr: tp.Array1d,
flex_2d: bool) -> tp.Array1d:
"""`choice_func_nb` that returns the indices of the stop price being hit within OHLC.
Compared to `stop_choice_nb`, takes into account the whole bar, can check for both
(trailing) stop loss and take profit simultaneously, and tracks hit price and stop type.
!!! note
We don't have intra-candle data. If there was a huge price fluctuation in both directions,
we can't determine whether SL was triggered before TP and vice versa. So some assumptions
need to be made: 1) trailing stop can only be based on previous close/high, and
2) we pessimistically assume that SL comes before TP.
Args:
col (int): Current column.
from_i (int): Index to start generation from (inclusive).
to_i (int): Index to run generation to (exclusive).
open (array of float): Entry price such as open or previous close.
high (array of float): High price.
low (array of float): Low price.
close (array of float): Close price.
stop_price_out (array of float): Array where hit price of each exit will be stored.
stop_type_out (array of int): Array where stop type of each exit will be stored.
0 for stop loss, 1 for take profit.
sl_stop (float or array_like): Percentage value for stop loss.
Can be per frame, column, row, or element-wise. Set to `np.nan` to disable.
sl_trail (bool or array_like): Whether `sl_stop` is trailing.
Can be per frame, column, row, or element-wise. Set to False to disable.
tp_stop (float or array_like): Percentage value for take profit.
Can be per frame, column, row, or element-wise. Set to `np.nan` to disable.
reverse (bool or array_like): Whether to do the opposite, i.e.: prices are followed downwards.
is_open_safe (bool): Whether entry price comes right at or before open.
If True and wait is 0, can use high/low at entry bar. Otherwise uses only close.
wait (int): Number of ticks to wait before placing exits.
Setting False or 0 may result in entry and exit signal at one bar.
!!! note
If `wait` is greater than 0, even with `is_open_safe` set to True,
trailing stop won't update at bars that come before `from_i`.
pick_first (bool): Whether to stop as soon as the first exit signal is found.
temp_idx_arr (array of int): Empty integer array used to temporarily store indices.
flex_2d (bool): See `vectorbt.base.reshape_fns.flex_select_auto_nb`.
"""
init_i = from_i - wait
init_open = flex_select_auto_nb(open, init_i, col, flex_2d)
init_sl_stop = flex_select_auto_nb(np.asarray(sl_stop), init_i, col, flex_2d)
if init_sl_stop < 0:
raise ValueError("Stop value must be 0 or greater")
init_sl_trail = flex_select_auto_nb(np.asarray(sl_trail), init_i, col, flex_2d)
init_tp_stop = flex_select_auto_nb(np.asarray(tp_stop), init_i, col, flex_2d)
if init_tp_stop < 0:
raise ValueError("Stop value must be 0 or greater")
init_reverse = flex_select_auto_nb(np.asarray(reverse), init_i, col, flex_2d)
max_p = min_p = init_open
j = 0
for i in range(from_i, to_i):
# Resolve current bar
_open = flex_select_auto_nb(open, i, col, flex_2d)
_high = flex_select_auto_nb(high, i, col, flex_2d)
_low = flex_select_auto_nb(low, i, col, flex_2d)
_close = flex_select_auto_nb(close, i, col, flex_2d)
if np.isnan(_open):
_open = _close
if np.isnan(_low):
_low = min(_open, _close)
if np.isnan(_high):
_high = max(_open, _close)
# Calculate stop price
if not np.isnan(init_sl_stop):
if init_sl_trail:
if init_reverse:
curr_sl_stop_price = min_p * (1 + init_sl_stop)
else:
curr_sl_stop_price = max_p * (1 - init_sl_stop)
else:
if init_reverse:
curr_sl_stop_price = init_open * (1 + init_sl_stop)
else:
curr_sl_stop_price = init_open * (1 - init_sl_stop)
if not np.isnan(init_tp_stop):
if init_reverse:
curr_tp_stop_price = init_open * (1 - init_tp_stop)
else:
curr_tp_stop_price = init_open * (1 + init_tp_stop)
# Check if stop price is within bar
if i > init_i or is_open_safe:
# is_open_safe means open is either open or any other price before it
# so it's safe to use high/low at entry bar
curr_high = _high
curr_low = _low
else:
# Otherwise, we can only use close price at entry bar
curr_high = curr_low = _close
exit_signal = False
if not np.isnan(init_sl_stop):
if (not init_reverse and curr_low <= curr_sl_stop_price) or \
(init_reverse and curr_high >= curr_sl_stop_price):
exit_signal = True
stop_price_out[i, col] = curr_sl_stop_price
if init_sl_trail:
stop_type_out[i, col] = StopType.TrailStop
else:
stop_type_out[i, col] = StopType.StopLoss
if not exit_signal and not np.isnan(init_tp_stop):
if (not init_reverse and curr_high >= curr_tp_stop_price) or \
(init_reverse and curr_low <= curr_tp_stop_price):
exit_signal = True
stop_price_out[i, col] = curr_tp_stop_price
stop_type_out[i, col] = StopType.TakeProfit
if exit_signal:
temp_idx_arr[j] = i
j += 1
if pick_first:
return temp_idx_arr[:1]
# Keep track of highest high if trailing
if init_sl_trail:
if curr_low < min_p:
min_p = curr_low
if curr_high > max_p:
max_p = curr_high
return temp_idx_arr[:j]
@njit
def generate_ohlc_stop_ex_nb(entries: tp.Array2d,
open: tp.ArrayLike,
high: tp.ArrayLike,
low: tp.ArrayLike,
close: tp.ArrayLike,
stop_price_out: tp.Array2d,
stop_type_out: tp.Array2d,
sl_stop: tp.MaybeArray[float],
sl_trail: tp.MaybeArray[bool],
tp_stop: tp.MaybeArray[float],
reverse: tp.MaybeArray[bool],
is_open_safe: bool,
wait: int,
until_next: bool,
skip_until_exit: bool,
pick_first: bool,
flex_2d: bool) -> tp.Array2d:
"""Generate using `generate_ex_nb` and `ohlc_stop_choice_nb`.
Usage:
* Generate trailing stop loss and take profit signals for 10%.
Illustrates how exit signal can be generated within the same bar as entry.
```pycon
>>> import numpy as np
>>> from vectorbt.signals.nb import generate_ohlc_stop_ex_nb
>>> entries = np.asarray([True, False, True, False, False])[:, None]
>>> entry_price = np.asarray([10, 11, 12, 11, 10])[:, None]
>>> high_price = entry_price + 1
>>> low_price = entry_price - 1
>>> close_price = entry_price
>>> stop_price_out = np.full_like(entries, np.nan, dtype=np.float_)
>>> stop_type_out = np.full_like(entries, -1, dtype=np.int_)
>>> generate_ohlc_stop_ex_nb(
... entries=entries,
... open=entry_price,
... high=high_price,
... low=low_price,
... close=close_price,
... stop_price_out=stop_price_out,
... stop_type_out=stop_type_out,
... sl_stop=0.1,
... sl_trail=True,
... tp_stop=0.1,
... reverse=False,
... is_open_safe=True,
... wait=1,
... until_next=True,
... skip_until_exit=False,
... pick_first=True,
... flex_2d=True
... )
array([[ True],
[False],
[False],
[ True],
[False]])
>>> stop_price_out
array([[ 9. ], << trailing SL from 10 (entry_price)
[ nan],
[ nan],
[11.7], << trailing SL from 13 (high_price)
[ nan]])
>>> stop_type_out
array([[ 1],
[-1],
[-1],
[ 1],
[-1]])
```
Note that if `is_open_safe` was False, the first exit would be executed at the second bar.
This is because we don't know whether the entry price comes before the high and low price
at the first bar, and so the trailing stop isn't triggered for the low price of 9.0.
"""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_ex_nb(
entries,
wait,
until_next,
skip_until_exit,
pick_first,
ohlc_stop_choice_nb,
open,
high,
low,
close,
stop_price_out,
stop_type_out,
sl_stop,
sl_trail,
tp_stop,
reverse,
is_open_safe,
wait,
pick_first,
temp_idx_arr,
flex_2d
)
@njit
def generate_ohlc_stop_enex_nb(entries: tp.Array2d,
open: tp.ArrayLike,
high: tp.ArrayLike,
low: tp.ArrayLike,
close: tp.ArrayLike,
stop_price_out: tp.Array2d,
stop_type_out: tp.Array2d,
sl_stop: tp.MaybeArray[float],
sl_trail: tp.MaybeArray[bool],
tp_stop: tp.MaybeArray[float],
reverse: tp.MaybeArray[bool],
is_open_safe: bool,
entry_wait: int,
exit_wait: int,
pick_first: bool,
flex_2d: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Generate one after another using `generate_enex_nb` and `ohlc_stop_choice_nb`.
Returns two arrays: new entries and exits.
!!! note
Has the same logic as calling `generate_ohlc_stop_ex_nb` with `skip_until_exit=True`, but
removes all entries that come before the next exit."""
temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_)
return generate_enex_nb(
entries.shape,
entry_wait,
exit_wait,
True,
pick_first,
first_choice_nb, (entries,),
ohlc_stop_choice_nb, (
open,
high,
low,
close,
| |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from test_data import generate_test
from time import time
from sklearn.svm import OneClassSVM
from admm_graph_OP_tweak import GOP
from Simple_GOP import SGOP
from outlier_pursuit import outlier_pursuit
from ae import get_ae_losses
from vae import get_vae_losses
from sklearn.ensemble import IsolationForest
from gru import get_GRU_os, get_LSTM_os
from sklearn import metrics
from sklearn.cluster import DBSCAN
from sklearn.mixture import GaussianMixture
from var import get_VAR_OS
import os
import stopit
import datetime
plt.rcParams.update({'font.size': 18})
class TimeoutException(Exception):
def __init__(self, time):
Exception.__init__(self, 'timeout after {}s'.format(time))
def ese(pred, target):
"""
takes in predicted values and actual values, returns elementwise squared error
via (x-y)^2
"""
errs = (pred - target)**2
return errs
def OLS_err(X_train, y_train, X, y):
"""
takes in train test split returns elementwise error for whole dataset.
"""
reg = linear_model.LinearRegression()
reg.fit(X_train, y_train)
pred = reg.predict(X)
return ese(pred, y)
def ridge_err(X_train, y_train, X, y):
"""
takes in train test split returns elementwise error for whole dataset.
"""
reg = linear_model.Ridge()
reg.fit(X_train, y_train)
pred = reg.predict(X)
return ese(pred, y)
def lasso_err(X_train, y_train, X, y):
"""
takes in train test split returns elementwise error for whole dataset.
"""
reg = linear_model.Lasso()
reg.fit(X_train, y_train)
pred = reg.predict(X)
return ese(pred, y)
def get_reg_os(X):
n,p = X.shape
err_sum = np.zeros(n)
for i in range(p):
inds = np.arange(p)
inds = inds
X_x = np.delete(X, i, axis=1)
y_y = X[:,i]
X_train, X_test, y_train, y_test = train_test_split(X_x, y_y)
err = OLS_err(X_train, y_train, X_x, y_y)
err_sum +=err
return err_sum/n
def get_ridge_os(X):
n,p = X.shape
err_sum = np.zeros(n)
for i in range(p):
inds = np.arange(p)
inds = inds
X_x = np.delete(X, i, axis=1)
y_y = X[:,i]
X_train, X_test, y_train, y_test = train_test_split(X_x, y_y)
err = ridge_err(X_train, y_train, X_x, y_y)
err_sum +=err
return err_sum/n
def get_LASSO_os(X):
n,p = X.shape
err_sum = np.zeros(n)
for i in range(p):
inds = np.arange(p)
inds = inds
X_x = np.delete(X, i, axis=1)
y_y = X[:,i]
X_train, X_test, y_train, y_test = train_test_split(X_x, y_y)
err = lasso_err(X_train, y_train, X_x, y_y)
err_sum +=err
return err_sum/n
# The testing algorithms
#regression
def test_VAR(X):
os = get_VAR_OS(X)
return os
def test_OLS(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_reg_os(X)
# print(len(losses))
#loss here is summed elementwise errors
return losses
def test_Ridge(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_ridge_os(X)
# print(len(losses))
#loss here is summed elementwise errors
return losses
def test_LASSO(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_LASSO_os(X)
# print(len(losses))
#loss here is summed elementwise errors
return losses
#tersting algorithms
#density
def test_OCSVM(X):
"""
takes in only data 'X'
returns only list of outlier scores for each sample
higher score = more outlier
"""
clf = OneClassSVM(gamma='scale')
clf.fit(X)
dists = clf.decision_function(X)*-1
return dists #largest is now most outlier
def test_GMM(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
k = 3
# arr, pi_mu_sigs,i = em(X, k, 1000)
# log_likelihoods = log_Ls(X, pi_mu_sigs)
clf = GaussianMixture(n_components=k)
clf.fit(X)
scores = clf.score_samples(X)*-1 # returns log probs for data
return scores #to give in higher score = more outlier
def test_IF(X):
clf = IsolationForest()#contamination='auto', behaviour='new')
clf.fit(X)
os = clf.decision_function(X)
return os*-1 # average number splits to isolation. small is outlier.
def test_DBSCAN(X):
"""
takes in only data 'X', in samples as rows format
DBSCAN from sklearn returns -1 as label for outliers.
use from scartch implementaiton and get distance from nn as os
returns only list of outlier scores for each sample
higher score = more outlier
own implementation is very slow for higher N..
"""
n,p = X.shape
eps = 0.3 #normalised data
if int(n//20) < 3:
minnum = 3
elif int(n//20) > 100:
minnum = 100
else:
minnum = int(n//20)
# point_classes, cl, os = dbscan(X, eps, minnum)
clf = DBSCAN(eps=eps, min_samples=minnum)
classes = clf.fit_predict(X)
# print(classes)
#returns only in class or out of class binary classification
i = -1
n_found = 0
cl_sizes = {}
while n_found <n:
n_found_inds = len(np.where(classes == i)[0])
n_found += n_found_inds
# print(i, n_found_inds)
cl_sizes[i] = n_found_inds
i+=1
# print(cl_sizes)
cl_lst = [i[0] for i in sorted(cl_sizes.items(), key=lambda k:k[1], reverse=True)]
# print(cl_lst)
n_classes = len(cl_lst)
# most populous group get score zero, then 1, 2, etc..
os = [n_classes if x<0 else x for x in classes]
# print(os)
# raise
# os = [1 if x < 0 else 0 for x in classes]
return np.array(os)
# deep learning algorithms
def test_VAE(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_vae_losses(X)
# print(losses[:10])
#gives reconstruciton error from AE, should be largest for outliers
return losses
def test_AE(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
losses = get_ae_losses(X)
#gives reconstruciton error from AE, should be largest for outliers
return losses
def test_GRU(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
errs = get_GRU_os(X)
#gives error from GRU, should be largest for outliers
return errs
def test_LSTM(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
errs = get_LSTM_os(X)
#gives error from LSTM, should be largest for outliers
errs = np.array(errs).reshape(-1)
return errs
# Matrix methods
def test_OP(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
lamb = 0.5
M = X.T
L_hat, C_hat, count = outlier_pursuit(M, lamb)
return np.sum(C_hat, axis=0)
def test_GOP(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
lamb = 0.5
gamma = 0.1
M = X.T
S_hat = GOP(M, lamb, gamma)
return np.sum(S_hat, axis=0)
def test_SGOP(X):
"""
takes in only data 'X', in samples as rows format
returns only list of outlier scores for each sample
higher score = more outlier
"""
lamb = 0.5
gamma = 0.1
M = X.T
S_hat = SGOP(M, lamb, gamma)
return np.sum(S_hat, axis=0)
# end of testing algorithms
def test_algo(X, outs, algo, metric):
"""
takes in algorithm 'algo', data 'X', with outlier indices 'outs'
returns fp rate, as given by separation_metric
algo must have input only X
"""
outlier_scores = algo(X)
fps = metric[1](outlier_scores, outs)
aucs = metric[0](outlier_scores, outs)
return fps, aucs
def contour_fp_algo(n, p, r, ta, n_steps, n_runs, gamma, algo, metric):
"""
does 2d contour plot varying
p_frac - number of parameters changed in the outliers
p_quant - amount each parameter is varied by
ie when both are 0, there are no outliers in terms of data
"""
# step_size = 1/n_steps
pf = np.linspace(0,1,n_steps)
pq = np.linspace(0,1,n_steps)
fps = []
for p_frac in pf:
# print(p_frac)
fp_row=[]
for p_quant in pq:
# print(p_quant)
runs=[]
for i in range(n_runs):
# print('run {}'.format(i))
# print(n,p,r)
la_err = False
while not la_err:
try:
X, outs = generate_test(n, p, r, p_frac, p_quant, gamma, ta)
fp, auc = test_algo(X, outs, algo, metric)
la_err = False
except numpy.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
la_err = True
print('redoing due to sungular matrix err')
runs.append(fp)
# print(runs)
fp_row.append(np.mean(runs))
fps.append(fp_row)
fpz = np.array(fps)
# print(fps)
return pf, pq, fpz
def auc(est_out_scores, outs):
"""
measures how good the separation is between outliers and inliers
uses auc
uses the estimated outlier score from each algorithm.
"""
n = len(est_out_scores)
actual_os = [1 if i in | |
#!/bin/python3
# -*- coding: utf-8 -*-
#==============================
# Author: <NAME>
# Last modified: 2018-06-29 14:48
# Filename: downloader.py
# Description:
#
#=============================#
# change from keras.utils
import hashlib
import os
import gzip
import tarfile
import zipfile
import gzip
import six
import re
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from .generic_utils import Progbar
try:
# from urllib.request import urlretrieve
from six.moves.urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve # py2
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
DEFAULT_DIR = '/tmp/datasets'
def get_dataset(urls=None, base_url=None, filenames=None, directory=None, md5_hashs=None, extract=False):
"""get_dataset(urls=None, base_url=None, filenames=None, directory=DEFAULT_DIR):
download dataset from urls or base_url + filenames relatively.
arguements
------------
urls: The urls to be downloaded.
base_url: The base url of each filenames.
filenames: Using with base_url, the files to be downloaded.
directory: The directory path to store the dataset. If not exists, it would be created.
It would set to '/tmp/datasets' by default.
"""
if directory is None:
directory = DEFAULT_DIR
if os.path.exists(directory) is False:
os.makedirs(directory)
if urls is None and ( base_url and filenames ) is not None:
urls = list()
for filename in filenames:
urls.append(base_url + filename)
if isinstance(urls, str):
urls = [urls]
if md5_hashs is None:
md5_hashs = [None] * len(urls)
fpaths = list()
for url, md5_hash in zip(urls, md5_hashs):
fpath = download(url, directory, md5_hash, extract=extract)
fpaths.append(fpath)
return fpaths
def download(url,
directory,
md5_hash=None,
file_hash=None,
hash_algorithm='auto',
extract=False,
archive_format='auto'):
filename = os.path.basename(url)
fpath = os.path.join(directory, filename)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
print('Downloading data from {} to directory {}'.format(url, fpath))
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(url, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(url, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(url, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if extract:
ret = _extract_archive(fpath, directory, archive_format)
# if ret is True:
# print("extracted to", directory)
# else:
# print("extracting failed!")
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
# check weather extracted or not
extracted = True
for fname in archive.getnames():
if not os.path.exists(os.path.join(path, fname)):
extracted = False
if not extracted:
try:
archive.extractall(path)
print('extracted to', path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(origin,
fname=None,
# untar=False,
md5_hash=None,
file_hash=None,
cache_dir=None,
cache_subdir=None,
hash_algorithm='auto',
extract=False,
archive_format='auto'):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `/tmp/datasets`
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
origin: Original URL of the file.
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
By default fname will remain it's original name.
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_dir: Location to store cached files, when None it
defaults to /tmp/datasets
cache_subdir: Subdirectory under the cache_dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
Path to the downloaded file
"""
if fname is None:
fname = os.path.basename(origin)
if fname == '':
raise Exception("Please specify fname")
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
if cache_dir is None:
cache_dir = '/tmp/datasets'
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not os.access(cache_dir, os.W_OK):
raise Exception("Can't write to {}".format(cache_dir))
if cache_subdir is not None:
datadir = os.path.join(cache_dir, cache_subdir)
else:
datadir = cache_dir
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar | |
len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_ShapeUpgrade.Handle_ShapeUpgrade_Tool_DownCast)
__swig_destroy__ = _ShapeUpgrade.delete_Handle_ShapeUpgrade_Tool
Handle_ShapeUpgrade_Tool.Nullify = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_Tool_Nullify,None,Handle_ShapeUpgrade_Tool)
Handle_ShapeUpgrade_Tool.IsNull = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_Tool_IsNull,None,Handle_ShapeUpgrade_Tool)
Handle_ShapeUpgrade_Tool.GetObject = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_Tool_GetObject,None,Handle_ShapeUpgrade_Tool)
Handle_ShapeUpgrade_Tool_swigregister = _ShapeUpgrade.Handle_ShapeUpgrade_Tool_swigregister
Handle_ShapeUpgrade_Tool_swigregister(Handle_ShapeUpgrade_Tool)
def Handle_ShapeUpgrade_Tool_DownCast(*args):
return _ShapeUpgrade.Handle_ShapeUpgrade_Tool_DownCast(*args)
Handle_ShapeUpgrade_Tool_DownCast = _ShapeUpgrade.Handle_ShapeUpgrade_Tool_DownCast
class ShapeUpgrade_UnifySameDomain(OCC.MMgt.MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* empty constructor
:rtype: None
:param aShape:
:type aShape: TopoDS_Shape &
:param UnifyEdges: default value is Standard_True
:type UnifyEdges: bool
:param UnifyFaces: default value is Standard_True
:type UnifyFaces: bool
:param ConcatBSplines: default value is Standard_False
:type ConcatBSplines: bool
:rtype: None
"""
_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_swiginit(self,_ShapeUpgrade.new_ShapeUpgrade_UnifySameDomain(*args))
def Initialize(self, *args):
"""
:param aShape:
:type aShape: TopoDS_Shape &
:param UnifyEdges: default value is Standard_True
:type UnifyEdges: bool
:param UnifyFaces: default value is Standard_True
:type UnifyFaces: bool
:param ConcatBSplines: default value is Standard_False
:type ConcatBSplines: bool
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Initialize(self, *args)
def Build(self, *args):
"""
* Builds the resulting shape
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Build(self, *args)
def Shape(self, *args):
"""
* Gives the resulting shape
:rtype: TopoDS_Shape
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Shape(self, *args)
def Generated(self, *args):
"""
:param aShape:
:type aShape: TopoDS_Shape &
:rtype: TopoDS_Shape
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Generated(self, *args)
def UnifyFaces(self, *args):
"""
* this method makes if possible a common face from each group of faces lying on coincident surfaces
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_UnifyFaces(self, *args)
def UnifyEdges(self, *args):
"""
* this method makes if possible a common edge from each group of edges connecting common couple of faces
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_UnifyEdges(self, *args)
def UnifyFacesAndEdges(self, *args):
"""
* this method unifies same domain faces and edges
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_UnifyFacesAndEdges(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_ShapeUpgrade_UnifySameDomain(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _ShapeUpgrade.delete_ShapeUpgrade_UnifySameDomain
ShapeUpgrade_UnifySameDomain.Initialize = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Initialize,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain.Build = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Build,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain.Shape = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Shape,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain.Generated = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Generated,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain.UnifyFaces = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_UnifyFaces,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain.UnifyEdges = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_UnifyEdges,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain.UnifyFacesAndEdges = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_UnifySameDomain_UnifyFacesAndEdges,None,ShapeUpgrade_UnifySameDomain)
ShapeUpgrade_UnifySameDomain_swigregister = _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_swigregister
ShapeUpgrade_UnifySameDomain_swigregister(ShapeUpgrade_UnifySameDomain)
class Handle_ShapeUpgrade_UnifySameDomain(OCC.MMgt.Handle_MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_swiginit(self,_ShapeUpgrade.new_Handle_ShapeUpgrade_UnifySameDomain(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_DownCast)
__swig_destroy__ = _ShapeUpgrade.delete_Handle_ShapeUpgrade_UnifySameDomain
Handle_ShapeUpgrade_UnifySameDomain.Nullify = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_Nullify,None,Handle_ShapeUpgrade_UnifySameDomain)
Handle_ShapeUpgrade_UnifySameDomain.IsNull = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_IsNull,None,Handle_ShapeUpgrade_UnifySameDomain)
Handle_ShapeUpgrade_UnifySameDomain.GetObject = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_GetObject,None,Handle_ShapeUpgrade_UnifySameDomain)
Handle_ShapeUpgrade_UnifySameDomain_swigregister = _ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_swigregister
Handle_ShapeUpgrade_UnifySameDomain_swigregister(Handle_ShapeUpgrade_UnifySameDomain)
def Handle_ShapeUpgrade_UnifySameDomain_DownCast(*args):
return _ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_DownCast(*args)
Handle_ShapeUpgrade_UnifySameDomain_DownCast = _ShapeUpgrade.Handle_ShapeUpgrade_UnifySameDomain_DownCast
class ShapeUpgrade_ConvertSurfaceToBezierBasis(ShapeUpgrade_SplitSurface):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor.
:rtype: None
"""
_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_swiginit(self,_ShapeUpgrade.new_ShapeUpgrade_ConvertSurfaceToBezierBasis(*args))
def Segments(self, *args):
"""
* Returns the grid of bezier based surfaces correspondent to original surface.
:rtype: Handle_ShapeExtend_CompositeSurface
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_Segments(self, *args)
def SetPlaneMode(self, *args):
"""
* Sets mode for conversion Geom_Plane to Bezier
:param mode:
:type mode: bool
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode(self, *args)
def GetPlaneMode(self, *args):
"""
* Returns the Geom_Pline conversion mode.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetPlaneMode(self, *args)
def SetRevolutionMode(self, *args):
"""
* Sets mode for conversion Geom_SurfaceOfRevolution to Bezier
:param mode:
:type mode: bool
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetRevolutionMode(self, *args)
def GetRevolutionMode(self, *args):
"""
* Returns the Geom_SurfaceOfRevolution conversion mode.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetRevolutionMode(self, *args)
def SetExtrusionMode(self, *args):
"""
* Sets mode for conversion Geom_SurfaceOfLinearExtrusion to Bezier
:param mode:
:type mode: bool
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetExtrusionMode(self, *args)
def GetExtrusionMode(self, *args):
"""
* Returns the Geom_SurfaceOfLinearExtrusion conversion mode.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetExtrusionMode(self, *args)
def SetBSplineMode(self, *args):
"""
* Sets mode for conversion Geom_BSplineSurface to Bezier
:param mode:
:type mode: bool
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetBSplineMode(self, *args)
def GetBSplineMode(self, *args):
"""
* Returns the Geom_BSplineSurface conversion mode.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetBSplineMode(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _ShapeUpgrade.delete_ShapeUpgrade_ConvertSurfaceToBezierBasis
ShapeUpgrade_ConvertSurfaceToBezierBasis.Segments = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_Segments,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.SetPlaneMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.GetPlaneMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetPlaneMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.SetRevolutionMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetRevolutionMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.GetRevolutionMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetRevolutionMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.SetExtrusionMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetExtrusionMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.GetExtrusionMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetExtrusionMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.SetBSplineMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetBSplineMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis.GetBSplineMode = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetBSplineMode,None,ShapeUpgrade_ConvertSurfaceToBezierBasis)
ShapeUpgrade_ConvertSurfaceToBezierBasis_swigregister = _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_swigregister
ShapeUpgrade_ConvertSurfaceToBezierBasis_swigregister(ShapeUpgrade_ConvertSurfaceToBezierBasis)
class Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis(Handle_ShapeUpgrade_SplitSurface):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_swiginit(self,_ShapeUpgrade.new_Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_DownCast)
__swig_destroy__ = _ShapeUpgrade.delete_Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis
Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis.Nullify = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_Nullify,None,Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis)
Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis.IsNull = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_IsNull,None,Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis)
Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis.GetObject = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_GetObject,None,Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis)
Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_swigregister = _ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_swigregister
Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_swigregister(Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis)
def Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_DownCast(*args):
return _ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_DownCast(*args)
Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_DownCast = _ShapeUpgrade.Handle_ShapeUpgrade_ConvertSurfaceToBezierBasis_DownCast
class ShapeUpgrade_EdgeDivide(ShapeUpgrade_Tool):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor
:rtype: None
"""
_ShapeUpgrade.ShapeUpgrade_EdgeDivide_swiginit(self,_ShapeUpgrade.new_ShapeUpgrade_EdgeDivide(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_Clear(self, *args)
def SetFace(self, *args):
"""
* Sets supporting surface by face
:param F:
:type F: TopoDS_Face &
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetFace(self, *args)
def Compute(self, *args):
"""
:param E:
:type E: TopoDS_Edge &
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_Compute(self, *args)
def HasCurve2d(self, *args):
"""
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_HasCurve2d(self, *args)
def HasCurve3d(self, *args):
"""
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_HasCurve3d(self, *args)
def Knots2d(self, *args):
"""
:rtype: Handle_TColStd_HSequenceOfReal
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_Knots2d(self, *args)
def Knots3d(self, *args):
"""
:rtype: Handle_TColStd_HSequenceOfReal
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_Knots3d(self, *args)
def SetSplitCurve2dTool(self, *args):
"""
* Sets the tool for splitting pcurves.
:param splitCurve2dTool:
:type splitCurve2dTool: Handle_ShapeUpgrade_SplitCurve2d &
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetSplitCurve2dTool(self, *args)
def SetSplitCurve3dTool(self, *args):
"""
* Sets the tool for splitting 3D curves.
:param splitCurve3dTool:
:type splitCurve3dTool: Handle_ShapeUpgrade_SplitCurve3d &
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetSplitCurve3dTool(self, *args)
def GetSplitCurve2dTool(self, *args):
"""
* Returns the tool for splitting pcurves.
:rtype: Handle_ShapeUpgrade_SplitCurve2d
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_GetSplitCurve2dTool(self, *args)
def GetSplitCurve3dTool(self, *args):
"""
* Returns the tool for splitting 3D curves.
:rtype: Handle_ShapeUpgrade_SplitCurve3d
"""
return _ShapeUpgrade.ShapeUpgrade_EdgeDivide_GetSplitCurve3dTool(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_ShapeUpgrade_EdgeDivide(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _ShapeUpgrade.delete_ShapeUpgrade_EdgeDivide
ShapeUpgrade_EdgeDivide.Clear = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_Clear,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.SetFace = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetFace,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.Compute = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_Compute,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.HasCurve2d = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_HasCurve2d,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.HasCurve3d = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_HasCurve3d,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.Knots2d = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_Knots2d,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.Knots3d = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_Knots3d,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.SetSplitCurve2dTool = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetSplitCurve2dTool,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.SetSplitCurve3dTool = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_SetSplitCurve3dTool,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.GetSplitCurve2dTool = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_GetSplitCurve2dTool,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide.GetSplitCurve3dTool = new_instancemethod(_ShapeUpgrade.ShapeUpgrade_EdgeDivide_GetSplitCurve3dTool,None,ShapeUpgrade_EdgeDivide)
ShapeUpgrade_EdgeDivide_swigregister = _ShapeUpgrade.ShapeUpgrade_EdgeDivide_swigregister
ShapeUpgrade_EdgeDivide_swigregister(ShapeUpgrade_EdgeDivide)
class Handle_ShapeUpgrade_EdgeDivide(Handle_ShapeUpgrade_Tool):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_swiginit(self,_ShapeUpgrade.new_Handle_ShapeUpgrade_EdgeDivide(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_DownCast)
__swig_destroy__ = _ShapeUpgrade.delete_Handle_ShapeUpgrade_EdgeDivide
Handle_ShapeUpgrade_EdgeDivide.Nullify = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_Nullify,None,Handle_ShapeUpgrade_EdgeDivide)
Handle_ShapeUpgrade_EdgeDivide.IsNull = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_IsNull,None,Handle_ShapeUpgrade_EdgeDivide)
Handle_ShapeUpgrade_EdgeDivide.GetObject = new_instancemethod(_ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_GetObject,None,Handle_ShapeUpgrade_EdgeDivide)
Handle_ShapeUpgrade_EdgeDivide_swigregister = _ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_swigregister
Handle_ShapeUpgrade_EdgeDivide_swigregister(Handle_ShapeUpgrade_EdgeDivide)
def Handle_ShapeUpgrade_EdgeDivide_DownCast(*args):
return _ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_DownCast(*args)
Handle_ShapeUpgrade_EdgeDivide_DownCast = _ShapeUpgrade.Handle_ShapeUpgrade_EdgeDivide_DownCast
class ShapeUpgrade_FaceDivide(ShapeUpgrade_Tool):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Creates empty constructor.
:rtype: None
* Initialize by a Face.
:param F:
:type F: TopoDS_Face &
:rtype: None
"""
_ShapeUpgrade.ShapeUpgrade_FaceDivide_swiginit(self,_ShapeUpgrade.new_ShapeUpgrade_FaceDivide(*args))
def Init(self, *args):
"""
* Initialize by a Face.
:param F:
:type F: TopoDS_Face &
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_Init(self, *args)
def SetSurfaceSegmentMode(self, *args):
"""
* Purpose sets mode for trimming (segment) surface by wire UV bounds.
:param Segment:
:type Segment: bool
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SetSurfaceSegmentMode(self, *args)
def Perform(self, *args):
"""
* Performs splitting and computes the resulting shell The context is used to keep track of former splittings in order to keep sharings. It is updated according to modifications made.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_Perform(self, *args)
def SplitSurface(self, *args):
"""
* Performs splitting of surface and computes the shell from source face.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SplitSurface(self, *args)
def SplitCurves(self, *args):
"""
* Performs splitting of curves of all the edges in the shape and divides these edges.
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SplitCurves(self, *args)
def Result(self, *args):
"""
* Gives the resulting Shell, or Face, or Null shape if not done.
:rtype: TopoDS_Shape
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_Result(self, *args)
def Status(self, *args):
"""
* Queries the status of last call to Perform OK : no splitting was done (or no call to Perform) DONE1: some edges were splitted DONE2: surface was splitted DONE3: surface was modified without splitting FAIL1: some fails encountered during splitting wires FAIL2: face cannot be splitted
:param status:
:type status: ShapeExtend_Status
:rtype: bool
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_Status(self, *args)
def SetSplitSurfaceTool(self, *args):
"""
* Sets the tool for splitting surfaces.
:param splitSurfaceTool:
:type splitSurfaceTool: Handle_ShapeUpgrade_SplitSurface &
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SetSplitSurfaceTool(self, *args)
def SetWireDivideTool(self, *args):
"""
* Sets the tool for dividing edges on Face.
:param wireDivideTool:
:type wireDivideTool: Handle_ShapeUpgrade_WireDivide &
:rtype: None
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SetWireDivideTool(self, *args)
def GetWireDivideTool(self, *args):
"""
* Returns the tool for dividing edges on Face. This tool must be already initialized.
:rtype: Handle_ShapeUpgrade_WireDivide
"""
return _ShapeUpgrade.ShapeUpgrade_FaceDivide_GetWireDivideTool(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_ShapeUpgrade_FaceDivide(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ | |
"""Module to implement training operations for Neural Networks."""
import os
import math
from abc import ABC, abstractmethod
import tensorflow as tf
from tensorflow.python.client import device_lib
import nasgym.utl.configreader as cr
from nasgym import nas_logger
from nasgym import CONFIG_INI
from nasgym.net_ops.net_builder import sequence_to_net
from nasgym.net_ops.net_utils import compute_network_density
from nasgym.net_ops.net_utils import compute_network_flops
class NasEnvTrainerBase(ABC):
"""Simple trainer interface."""
def __init__(self, encoded_network, input_shape, n_classes,
batch_size=256, log_path="./trainer",
variable_scope="custom", profile_path="./profiler"):
"""General purpose constructor."""
# Encapsulation
self.encoded_network = encoded_network
self.input_shape = input_shape
self.n_clases = n_classes
self.batch_size = batch_size
self.log_path = log_path
self.variable_scope = variable_scope
self.tf_partial_network = None
self.classifier = None
# Init of superclass
super().__init__()
@abstractmethod
def build_model_fn(self):
"""Add the training graph."""
@abstractmethod
def train(self, train_data, train_labels, train_input_fn, n_epochs):
"""Abstract method to implement training."""
raise NotImplementedError("Method must be implemented by subclass")
@abstractmethod
def evaluate(self, eval_data, eval_labels, eval_input_fn):
"""Abstract method to implement evaluation."""
raise NotImplementedError("Method must be implemented by subclass")
class OomReportingHook(tf.train.SessionRunHook):
"""Report OOM during training when using Estimator."""
def before_run(self, run_context):
return tf.train.SessionRunArgs(
fetches=[], # no extra fetches
options=tf.RunOptions(
report_tensor_allocations_upon_oom=True
)
)
class DefaultNASTrainer(NasEnvTrainerBase):
"""Implement Training with Eearly Stop Strategy."""
def __init__(self, encoded_network, input_shape, n_classes, batch_size=256,
log_path="./trainer", variable_scope="custom",
profile_path="./profiler", op_decay_steps=12, op_beta1=0.9,
op_beta2=0.999, op_epsilon=10e-08, fcl_units=1024,
dropout_rate=0.4, n_obs_train=None):
"""Specific constructor with option for FLOPS and Density."""
super(DefaultNASTrainer, self).__init__(
encoded_network=encoded_network,
input_shape=input_shape,
n_classes=n_classes, # TODO: We don't use it.
batch_size=batch_size,
log_path=log_path,
variable_scope=variable_scope,
profile_path=profile_path
)
self.op_decay_steps = op_decay_steps
self.op_beta1 = op_beta1
self.op_beta2 = op_beta2
self.op_epsilon = op_epsilon
self.fcl_units = fcl_units
self.dropout_rate = dropout_rate
self.n_obs_train = n_obs_train
# Total number of steps
self._n_steps = \
self.op_decay_steps * math.ceil(self.n_obs_train/self.batch_size)
self._n_steps = math.ceil(self._n_steps)
# Steps per epoch
self._steps_per_epoch = math.floor(self._n_steps/self.op_decay_steps)
self._set_estimator()
# An empty list storting all accuracies found during evaluation
self.eval_accuracies = []
def _set_estimator(self):
nas_logger.debug(
"Configuring the estimator that will be used for training and \
evaluation"
)
if self.classifier is None:
# Read the configuration for the distributed strategy (config.ini)
try:
aux = CONFIG_INI[cr.SEC_TRAINER_TENSORFLOW]
self._distributed_enabled = aux[cr.PROP_ENABLE_DISTRIBUTED]
except KeyError:
self._distributed_enabled = False
# Read the configuration for the log device placement (config.ini)
try:
aux = CONFIG_INI[cr.SEC_TRAINER_TENSORFLOW]
self._devplacement_enabled = \
aux[cr.PROP_ENABLE_DEVICEPLACEMENT]
except KeyError:
self._devplacement_enabled = False
# Read the configuration for the memory growth (config.ini)
try:
aux = CONFIG_INI[cr.SEC_TRAINER_TENSORFLOW]
allow_memory_growth = aux[cr.PROP_ALLOW_MEMORYGROWTH]
except KeyError:
allow_memory_growth = False
# Actually evaluation if distributed strategy is enabled
if self._distributed_enabled:
nas_logger.info("Distributed strategy has been enabled")
# Obtain the available GPUs
local_device = device_lib.list_local_devices()
gpu_devices = \
[x.name for x in local_device if x.device_type == 'GPU']
self.distributed_nreplicas = len(gpu_devices)
distributed_strategy = tf.contrib.distribute.MirroredStrategy(
num_gpus=self.distributed_nreplicas
)
nas_logger.info(
"Number of replicas: %d", self.distributed_nreplicas
)
else:
distributed_strategy = None
self.distributed_nreplicas = 1
# Evaluating if log device placement
if self._devplacement_enabled:
nas_logger.debug(
"Distributed strategy has been indicated. Obtaining the \
number of replicas available."
)
sess_config = tf.ConfigProto(log_device_placement=True)
else:
sess_config = tf.ConfigProto()
# pylint: disable=no-member
if allow_memory_growth:
nas_logger.debug(
"Dynamic memory growth for TensorFlow is enabled"
)
sess_config.gpu_options.allow_growth = True
# pylint: disable=no-member
run_config = tf.estimator.RunConfig(
session_config=sess_config,
# save_checkpoints_steps=5,
# save_checkpoints_secs=None,
train_distribute=distributed_strategy,
eval_distribute=distributed_strategy
)
# pylint: disable=no-member
self.classifier = tf.estimator.Estimator(
config=run_config,
model_fn=self.build_model_fn(),
model_dir="{root_dir}/{model_dir}".format(
root_dir=self.log_path,
model_dir="model"
)
)
def build_model_fn(self):
"""Implement training of network with custom approach."""
# Define the model_fn we want to return
def model_fn(features, labels, mode):
with tf.variable_scope(self.variable_scope):
# 1. Define the input placeholder
if len(self.input_shape) == 2:
nas_logger.debug("Reshaping input during model building.")
net_input = tf.reshape(
tensor=features["x"],
shape=[-1] + list(self.input_shape) + [1],
name="L0_RESHAPE"
)
else:
net_input = features["x"]
# 2. Simply call the network
self.tf_partial_network = sequence_to_net(
sequence=self.encoded_network,
input_tensor=net_input
)
# 3. Build the Fully-Connected layers after block.
with tf.name_scope("L_FC"):
# Flatten and connect to the Dense Layer
ll_flat = tf.layers.flatten(
inputs=self.tf_partial_network,
name="Flatten"
)
dense_layer = tf.layers.dense(
inputs=ll_flat,
units=self.fcl_units,
activation=tf.nn.relu,
name="DENSE"
)
dropout_layer = tf.layers.dropout(
inputs=dense_layer,
rate=self.dropout_rate,
# pylint: disable=no-member
training=mode == tf.estimator.ModeKeys.TRAIN,
name="DROPOUT"
)
# 4. Build the Prediction Layer based on a Softmax
with tf.name_scope("L_PRED"):
# Logits layer
logits_layer = tf.layers.dense(
inputs=dropout_layer,
units=self.n_clases,
name="PL_Logits"
)
predictions = {
"classes": tf.argmax(
input=logits_layer,
axis=1,
name="PL_Classes"
),
"probabilities": tf.nn.softmax(
logits=logits_layer,
name="PL_Softmax"
)
}
# If we are asked for prediction only, we return the
# prediction and stop adding nodes to the graph.
# pylint: disable=no-member
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions
)
# 4. Build the training nodes
with tf.name_scope("L_TRAIN"):
# Loss
loss_layer = tf.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=logits_layer
)
# Training Op
# pylint: disable=no-member
if mode == tf.estimator.ModeKeys.TRAIN:
# The optimizer via Gradient Descent (we can change it)
global_step = tf.train.get_global_step()
# learning_rate = tf.train.exponential_decay(
# learning_rate=0.0001,
# global_step=global_step,
# decay_steps=self.op_decay_steps,
# decay_rate=0.02
# )
# The paper's version of the learning rate
n_reductions = math.floor(self.op_decay_steps/5)
learning_rate = 0.001
ul = self.op_decay_steps*self._steps_per_epoch
for i in range(1, n_reductions + 1):
ll = self._steps_per_epoch*5*(i-1) + 1
if global_step in range(ll, ul+1) and i > 0:
learning_rate *= 0.2
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=self.op_beta1,
beta2=self.op_beta2,
epsilon=self.op_epsilon,
name="OPT"
)
# We say that we want to optimize the loss layer using
# the optimizer.
train_op = optimizer.minimize(
loss=loss_layer,
global_step=global_step,
name="OPT_MIN"
)
# And return
# pylint: disable=no-member
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss_layer,
train_op=train_op
)
# 5. Build the evaluation nodes.
with tf.name_scope("L_EVAL"):
# Evaluation metric is accuracy
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels,
predictions=predictions["classes"],
name="ACC"
)
}
# self.eval_accuracies.append(eval_metric_ops['accuracy'])
# pylint: disable=no-member
spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss_layer,
eval_metric_ops=eval_metric_ops
)
# self.eval_accuracies.append(spec['accuracy'])
return spec
# End of tf.variable_scope()
# Return the model_fn function
return model_fn
def train(self, train_data, train_labels, train_input_fn="default",
n_epochs=12):
"""Train the self-network with the the given training configuration."""
if isinstance(train_input_fn, str):
if train_input_fn == "default":
# pylint: disable=no-member
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=self.batch_size,
num_epochs=None,
shuffle=True
)
else:
raise ValueError(
"train_input_fn has been specified as string, but no \
valid value has been provided. Options are: 'default'"
)
steps = n_epochs * math.ceil(self.n_obs_train/self.batch_size)
nas_logger.debug(
"Running tensorflow training for %d epochs (%d steps)",
n_epochs,
steps
)
train_res = self.classifier.train(
input_fn=train_input_fn,
steps=steps,
)
nas_logger.debug("TensorFlow training finished")
return train_res
def evaluate(self, eval_data, eval_labels, eval_input_fn="default"):
"""Evaluate a given dataset, with the internal network."""
# Validations:
# If it is of type str, make sure is a valid
if isinstance(eval_input_fn, str):
if eval_input_fn == "default":
# pylint: disable=no-member
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
# batch_size=self.batch_size,
shuffle=False
)
nas_logger.debug("Running tensorflow evaluation")
eval_res = self.classifier.evaluate(input_fn=eval_input_fn)
nas_logger.debug("TensorFlow evaluation finished")
return eval_res
class EarlyStopNASTrainer(DefaultNASTrainer):
"""Implement Training with Eearly Stop Strategy."""
def __init__(self, encoded_network, input_shape, n_classes, batch_size=256,
log_path="./trainer", mu=0.5, rho=0.5, variable_scope="cnn",
profile_path="./profiler", op_decay_steps=12, op_beta1=0.9,
op_beta2=0.999, op_epsilon=10e-08, fcl_units=1024,
dropout_rate=0.4, n_obs_train=None):
"""Specific constructor with option for FLOPS and Density."""
super(EarlyStopNASTrainer, self).__init__(
encoded_network=encoded_network,
input_shape=input_shape,
n_classes=n_classes,
batch_size=batch_size,
log_path=log_path,
variable_scope=variable_scope,
profile_path=profile_path,
op_decay_steps=op_decay_steps,
op_beta1=op_beta1,
op_beta2=op_beta2,
op_epsilon=op_epsilon,
fcl_units=fcl_units,
dropout_rate=dropout_rate,
n_obs_train=n_obs_train
)
# Custom variables for the refined accuracy in BlockQNN implementation
# pylint: disable=invalid-name
self.mu = mu
self.rho = rho
# Updated during training call
self.density = None
self.flops = None
# Build the estimator
self._set_estimator()
def build_model_fn(self):
"""Implement training of network with custom approach."""
# Define the model_fn we want to return
def model_fn(features, labels, mode):
with tf.variable_scope(self.variable_scope):
# 1. Define the input placeholder
if len(self.input_shape) == 2: # Reshape if necessary
new_shape = [-1] + list(self.input_shape) + [1]
net_input = tf.reshape(
tensor=features["x"],
shape=new_shape,
name="L0_RESHAPE"
)
else:
net_input = features["x"]
# 2. Simply call the network
self.tf_partial_network = sequence_to_net(
sequence=self.encoded_network,
input_tensor=net_input
)
# 3. Call here the functions for flops & density to avoid more
# elements. The check is done because for some reason, the
# number of FLOPS changes during training.
if self.flops is None:
self.flops = compute_network_flops(
graph=tf.get_default_graph(),
collection_name=self.variable_scope,
logdir=self.log_path
)
if self.density is None:
self.density = compute_network_density(
graph=tf.get_default_graph(),
collection_name=self.variable_scope
)
# 4. Build the fully-connected layer after the block
with tf.name_scope("L_FC"):
# Flatten and connect to the Dense Layer
ll_flat = tf.layers.flatten(
inputs=self.tf_partial_network,
name="Flatten"
)
dense_layer = tf.layers.dense(
inputs=ll_flat,
units=self.fcl_units,
activation=tf.nn.relu,
name="DENSE"
)
dropout_layer = tf.layers.dropout(
inputs=dense_layer,
rate=self.dropout_rate,
# pylint: disable=no-member
training=mode == tf.estimator.ModeKeys.TRAIN,
name="DROPOUT"
)
# 5. Build the prediction layer, based on a softmax
with tf.name_scope("L_PRED"):
# Logits layer
logits_layer = tf.layers.dense(
inputs=dropout_layer,
units=self.n_clases,
name="PL_Logits"
)
predictions = {
"classes": tf.argmax(
input=logits_layer,
axis=1,
name="PL_Classes"
),
"probabilities": tf.nn.softmax(
logits=logits_layer,
name="PL_Softmax"
)
}
# If we are asked for prediction only, we return the
# prediction and stop adding nodes to | |
sqrt(a_ + x_ ** S(2) * WC("b", S(1)))
/ (
sqrt(c_ + x_ ** S(2) * WC("d", S(1)))
* sqrt(e_ + x_ ** S(2) * WC("f", S(1)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons155,
)
rule1025 = ReplacementRule(pattern1025, replacement1025)
pattern1026 = Pattern(
Integral(
sqrt(c_ + x_ ** S(2) * WC("d", S(1)))
/ (
(a_ + x_ ** S(2) * WC("b", S(1))) ** (S(3) / 2)
* sqrt(e_ + x_ ** S(2) * WC("f", S(1)))
),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons155,
)
rule1026 = ReplacementRule(pattern1026, replacement1026)
pattern1027 = Pattern(
Integral(
sqrt(a_ + x_ ** S(2) * WC("b", S(1)))
* sqrt(c_ + x_ ** S(2) * WC("d", S(1)))
/ sqrt(e_ + x_ ** S(2) * WC("f", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons645,
)
rule1027 = ReplacementRule(pattern1027, replacement1027)
pattern1028 = Pattern(
Integral(
sqrt(a_ + x_ ** S(2) * WC("b", S(1)))
* sqrt(c_ + x_ ** S(2) * WC("d", S(1)))
/ sqrt(e_ + x_ ** S(2) * WC("f", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons646,
)
rule1028 = ReplacementRule(pattern1028, replacement1028)
pattern1029 = Pattern(
Integral(
sqrt(a_ + x_ ** S(2) * WC("b", S(1)))
* sqrt(c_ + x_ ** S(2) * WC("d", S(1)))
/ (e_ + x_ ** S(2) * WC("f", S(1))) ** (S(3) / 2),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons155,
)
rule1029 = ReplacementRule(pattern1029, replacement1029)
pattern1030 = Pattern(
Integral(
(a_ + x_ ** n_ * WC("b", S(1))) ** p_
* (c_ + x_ ** n_ * WC("d", S(1))) ** q_
* (e_ + x_ ** n_ * WC("f", S(1))) ** r_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons5,
cons52,
cons54,
cons150,
CustomConstraint(With1030),
)
rule1030 = ReplacementRule(pattern1030, replacement1030)
pattern1031 = Pattern(
Integral(
(a_ + x_ ** n_ * WC("b", S(1))) ** p_
* (c_ + x_ ** n_ * WC("d", S(1))) ** q_
* (e_ + x_ ** n_ * WC("f", S(1))) ** r_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons5,
cons52,
cons54,
cons198,
)
rule1031 = ReplacementRule(pattern1031, replacement1031)
pattern1032 = Pattern(
Integral(
(a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons4,
cons5,
cons52,
cons54,
cons647,
)
rule1032 = ReplacementRule(pattern1032, replacement1032)
pattern1033 = Pattern(
Integral(
(u_ ** n_ * WC("b", S(1)) + WC("a", S(0))) ** WC("p", S(1))
* (v_ ** n_ * WC("d", S(1)) + WC("c", S(0))) ** WC("q", S(1))
* (w_ ** n_ * WC("f", S(1)) + WC("e", S(0))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons5,
cons4,
cons52,
cons54,
cons648,
cons649,
cons70,
cons71,
)
rule1033 = ReplacementRule(pattern1033, replacement1033)
pattern1034 = Pattern(
Integral(
(c_ + x_ ** WC("mn", S(1)) * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** WC("n", S(1)) * WC("f", S(1))) ** WC("r", S(1))
* (x_ ** WC("n", S(1)) * WC("b", S(1)) + WC("a", S(0))) ** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons4,
cons5,
cons54,
cons587,
cons588,
)
rule1034 = ReplacementRule(pattern1034, replacement1034)
pattern1035 = Pattern(
Integral(
(c_ + x_ ** WC("mn", S(1)) * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** WC("n", S(1)) * WC("f", S(1))) ** WC("r", S(1))
* (x_ ** WC("n", S(1)) * WC("b", S(1)) + WC("a", S(0))) ** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons4,
cons52,
cons587,
cons40,
cons650,
)
rule1035 = ReplacementRule(pattern1035, replacement1035)
pattern1036 = Pattern(
Integral(
(c_ + x_ ** WC("mn", S(1)) * WC("d", S(1))) ** q_
* (e_ + x_ ** WC("n", S(1)) * WC("f", S(1))) ** WC("r", S(1))
* (x_ ** WC("n", S(1)) * WC("b", S(1)) + WC("a", S(0))) ** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons4,
cons5,
cons52,
cons54,
cons587,
cons388,
)
rule1036 = ReplacementRule(pattern1036, replacement1036)
pattern1037 = Pattern(
Integral(
(a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e1_ + x_ ** WC("n2", S(1)) * WC("f1", S(1))) ** WC("r", S(1))
* (e2_ + x_ ** WC("n2", S(1)) * WC("f2", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons654,
cons655,
cons656,
cons657,
cons4,
cons5,
cons52,
cons54,
cons651,
cons652,
cons653,
)
rule1037 = ReplacementRule(pattern1037, replacement1037)
pattern1038 = Pattern(
Integral(
(a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e1_ + x_ ** WC("n2", S(1)) * WC("f1", S(1))) ** WC("r", S(1))
* (e2_ + x_ ** WC("n2", S(1)) * WC("f2", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons654,
cons655,
cons656,
cons657,
cons4,
cons5,
cons52,
cons54,
cons651,
cons652,
)
rule1038 = ReplacementRule(pattern1038, replacement1038)
pattern1039 = Pattern(
Integral(
(x_ * WC("g", S(1))) ** WC("m", S(1))
* (x_ ** n_ * WC("b", S(1))) ** p_
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons3,
cons8,
cons29,
cons50,
cons127,
cons210,
cons19,
cons4,
cons5,
cons52,
cons54,
cons658,
cons502,
)
rule1039 = ReplacementRule(pattern1039, replacement1039)
pattern1040 = Pattern(
Integral(
(x_ * WC("g", S(1))) ** WC("m", S(1))
* (x_ ** WC("n", S(1)) * WC("b", S(1))) ** p_
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons3,
cons8,
cons29,
cons50,
cons127,
cons210,
cons19,
cons4,
cons5,
cons52,
cons54,
cons658,
cons503,
)
rule1040 = ReplacementRule(pattern1040, replacement1040)
pattern1041 = Pattern(
Integral(
(g_ * x_) ** m_
* (x_ ** WC("n", S(1)) * WC("b", S(1))) ** p_
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons3,
cons8,
cons29,
cons50,
cons127,
cons210,
cons19,
cons4,
cons5,
cons52,
cons54,
cons21,
)
rule1041 = ReplacementRule(pattern1041, replacement1041)
pattern1042 = Pattern(
Integral(
(x_ * WC("g", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons210,
cons19,
cons4,
cons659,
)
rule1042 = ReplacementRule(pattern1042, replacement1042)
pattern1043 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons54,
cons55,
)
rule1043 = ReplacementRule(pattern1043, replacement1043)
pattern1044 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons660,
cons504,
)
rule1044 = ReplacementRule(pattern1044, replacement1044)
pattern1045 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons54,
cons502,
)
rule1045 = ReplacementRule(pattern1045, replacement1045)
pattern1046 = Pattern(
Integral(
(g_ * x_) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1))
* (c_ + x_ ** n_ * WC("d", S(1))) ** WC("q", S(1))
* (e_ + x_ ** n_ * WC("f", S(1))) ** WC("r", S(1)),
x_,
),
cons2,
cons3,
cons8,
| |
of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
The following limits apply to the orifice plate standard [1]_:
The measured pressure difference for the orifice plate should be under
250 kPa.
There are roughness limits as well; the roughness should be under 6
micrometers, although there are many more conditions to that given in [1]_.
For orifice plates with D and D/2 or corner pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 (for :math:`0.10 \le \beta \le 0.56`)
or for :math:`\beta \ge 0.56, Re_D \ge 16000\beta^2`
For orifice plates with flange pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 and also larger than
:math:`170000\beta^2 D`.
This is also presented in Crane's TP410 (2009)publication, whereas the
1999 and 1982 editions showed only a graph for discharge coefficients.
Examples
--------
>>> C_Reader_Harris_Gallagher(D=0.07391, Do=0.0222, rho=1.165, mu=1.85E-5,
... m=0.12, taps='flange')
0.5990326277163659
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
.. [3] <NAME>., "The Equation for the Expansibility Factor for
Orifice Plates," Proceedings of FLOMEKO 1998, Lund, Sweden, 1998:
209-214.
.. [4] <NAME>. Orifice Plates and Venturi Tubes. Springer,
2015.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
if taps == 'corner':
L1, L2_prime = 0.0, 0.0
elif taps == 'D' or taps == 'D/2':
L1 = 1.0
L2_prime = 0.47
elif taps == 'flange':
L1 = L2_prime = 0.0254/D
else:
raise Exception('Unsupported tap location')
beta2 = beta*beta
beta4 = beta2*beta2
beta8 = beta4*beta4
A = (19000.0*beta/Re_D)**0.8
M2_prime = 2*L2_prime/(1.0 - beta)
delta_C_upstream = ((0.043 + 0.080*exp(-1E1*L1) - 0.123*exp(-7.0*L1))
*(1.0 - 0.11*A)*beta4/(1.0 - beta4))
# The max part is not in the ISO standard
delta_C_downstream = (-0.031*(M2_prime - 0.8*M2_prime**1.1)*beta**1.3
*(1.0 + 8*max(log10(3700./Re_D), 0.0)))
# C_inf is discharge coefficient with corner taps for infinite Re
# Cs, slope term, provides increase in discharge coefficient for lower
# Reynolds numbers.
# max term is not in the ISO standard
C_inf_C_s = (0.5961 + 0.0261*beta2 - 0.216*beta8
+ 0.000521*(1E6*beta/Re_D)**0.7
+ (0.0188 + 0.0063*A)*beta**3.5*(
max((1E6/Re_D)**0.3, 22.7 - 4700.0*(Re_D/1E6))))
C = (C_inf_C_s + delta_C_upstream + delta_C_downstream)
if D < 0.07112:
# Limit is 2.8 inches, .1 inches smaller than the internal diameter of
# a sched. 80 pipe.
# Suggested to be required not becausue of any effect of small
# diameters themselves, but because of edge radius differences.
# max term is given in [4]_ Reader-Harris, Michael book
delta_C_diameter = 0.011*(0.75 - beta)*max((2.8 - D/0.0254), 0.0)
C += delta_C_diameter
return C
def Reader_Harris_Gallagher_discharge(D, Do, P1, P2, rho, mu, k, taps='corner'):
r'''Calculates the mass flow rate of fluid through an orifice based on the
geometry of the plate, measured pressures of the orifice, and the density,
viscosity, and isentropic exponent of the fluid. This solves an equation
iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
m : float
Mass flow rate of fluid through the orifice, [kg/s]
Notes
-----
Examples
--------
>>> Reader_Harris_Gallagher_discharge(D=0.07366, Do=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33, taps='D')
7.702338035732167
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
def to_solve(m):
C = C_Reader_Harris_Gallagher(D=D, Do=Do,
rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=Do, P1=P1, P2=P2, k=k)
m_calc = orifice_discharge(D=D, Do=Do, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return newton(to_solve, 2.81)
def discharge_coefficient_to_K(D, Do, C):
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2
def K_to_discharge_coefficient(D, Do, K):
r'''Converts a standard loss coefficient to a discharge coefficient.
.. math::
C = \sqrt{\frac{1}{2 \sqrt{K} \beta^{4} + K \beta^{4}}
- \frac{\beta^{4}}{2 \sqrt{K} \beta^{4} + K \beta^{4}} }
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
This expression was derived with SymPy, and checked numerically. There were
three other, incorrect roots.
Examples
--------
>>> K_to_discharge_coefficient(D=0.07366, Do=0.05, K=5.2314291729754)
0.6151200000000001
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
root_K = K**0.5
common_term = 2.0*root_K*beta4 + K*beta4
return (-beta4/(common_term) + 1.0/(common_term))**0.5
def dP_orifice(D, Do, P1, P2, C):
r'''Calculates the non-recoverable pressure drop of an orifice plate | |
<filename>udkm1Dsim/structures/structure.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = ['Structure']
__docformat__ = 'restructuredtext'
from .layers import AmorphousLayer, UnitCell
from .. import u, Q_
from ..helpers import make_hash_md5, finderb
import itertools
import numpy as np
class Structure:
"""Structure
Structure representation which holds various sub_structures.
Each sub_structure can be either a layer of :math:`N` UnitCell or
AmorphousLayer instances or a structure by itself.
It is possible to recursively build up 1D structures.
Args:
name (str): name of the sample.
Attributes:
name (str): name of sample.
sub_structures (list[AmorphousLayer, UnitCell, Structure]): list of
structures in sample.
substrate (Structure): structure of the substrate.
num_sub_systems (int): number of subsystems for heat and phonons
(electronic, lattice, spins, ...).
"""
def __init__(self, name):
self.name = name
self.num_sub_systems = 1
self.sub_structures = []
self.substrate = []
self.roughness = 0*u.nm
def __str__(self, tabs=0):
"""String representation of this class"""
tab_str = tabs*'\t'
class_str = tab_str + 'Structure properties:\n\n'
class_str += tab_str + 'Name : {:s}\n'.format(self.name)
class_str += tab_str + 'Thickness : {:0.2f}\n'.format(self.get_thickness().to('nm'))
class_str += tab_str + 'Roughness : {:0.2f}\n'.format(self.roughness)
class_str += tab_str + '----\n'
# traverse all substructures
for sub_structure in self.sub_structures:
if isinstance(sub_structure[0], (AmorphousLayer, UnitCell)):
# the substructure is an unitCell
class_str += tab_str + '{:d} times {:s}: {:0.2f}\n'.format(
sub_structure[1],
sub_structure[0].name,
sub_structure[1]*sub_structure[0].thickness.to('nm'))
else:
# the substructure is a structure instance by itself
# call the display() method recursively
class_str += tab_str + 'sub-structure {:d} times:\n'.format(
sub_structure[1])
class_str += sub_structure[0].__str__(tabs+1)
class_str += tab_str + '----\n'
# check for a substrate
if isinstance(self.substrate, Structure):
class_str += tab_str + 'Substrate:\n'
class_str += tab_str + '----\n'
class_str += tab_str + '{:d} times {:s}: {:0.2f}\n'.format(
self.substrate.sub_structures[0][1],
self.substrate.sub_structures[0][0].name,
self.substrate.sub_structures[0][1]
* self.substrate.sub_structures[0][0].thickness.to('nm'))
else:
class_str += tab_str + 'no substrate\n'
return class_str
def visualize(self, unit='nm', fig_size=[20, 1], cmap='Set1', linewidth=0.1, show=True):
"""visualize
Simple visualization of the structure.
Args:
unit (str): SI unit of the distance of the Structure. Defaults to
'nm'.
fig_size (list[float]): figure size of the visualization plot.
Defaults to [20, 1].
cmap (str): Matplotlib colormap for colors of layers.
linewidth (float): line width of the patches.
show (boolean): show visualization plot at the end.
"""
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import cm
_, d_end, _ = self.get_distances_of_layers(True) # distance vector of all layers
layer_interfaces = np.append(0, d_end.to(unit).magnitude) # Append zero at the start
thickness = np.max(layer_interfaces)
layer_ids = self.get_unique_layers()[0]
N = len(layer_ids) # number of unique layers
colortable = {}
for i in range(N):
colortable[layer_ids[i]] = cm.get_cmap(cmap)(i)
plt.figure(figsize=fig_size)
ax = plt.axes()
for i, name in enumerate(self.get_layer_vectors()[1]):
col = colortable.get(name, 'k')
rect = patches.Rectangle((layer_interfaces[i], 0), np.diff(layer_interfaces)[i], 1,
linewidth=linewidth, facecolor=col, edgecolor='k')
ax.add_patch(rect)
plt.xlim(0, thickness)
plt.ylim(0, 1)
plt.xlabel('Distance [{:s}]'.format(unit))
plt.yticks([], [])
# add labels for legend
for layer_id, col in colortable.items():
plt.plot(0, 0, color=col, label=layer_id)
leg = plt.legend(bbox_to_anchor=(0., 1.08, 1, .102), frameon=False, ncol=8)
for line in leg.get_lines():
line.set_linewidth(8.0)
if show:
plt.show()
def get_hash(self, **kwargs):
"""get_hash
Create an unique hash from all layer IDs in the correct order in the
structure as well as the corresponding material properties which are
given by the `kwargs`.
Args:
**kwargs (list[str]): types of requested properties..
Returns:
hash (str): unique hash.
"""
param = []
layers = self.get_unique_layers()
for layer in layers[1]:
param.append(layer.get_property_dict(**kwargs))
_, IDs, _ = self.get_layer_vectors()
param.append(IDs)
return make_hash_md5(param)
def add_sub_structure(self, sub_structure, N=1):
"""add_sub_structure
Add a sub_structure of :math:`N` layers or sub-structures to the
structure.
Args:
sub_structure (AmorphousLayer, UnitCell, Structure):
amorphous layer, unit cell, or structure to add as sub-structure.
N (int): number or repetitions.
"""
# check of the sub_structure is an instance of the unitCell or
# structure class
if not isinstance(sub_structure, (AmorphousLayer, UnitCell, Structure)):
raise ValueError('Class '
+ type(sub_structure).__name__
+ ' is no possible sub structure. '
+ 'Only AmorphousLayer, UnitCell, and '
+ 'Structure classes are allowed!')
# if a structure is added as a sub_structure, the sub_structure
# can not have a substrate
if isinstance(sub_structure, Structure):
if sub_structure.substrate:
raise ValueError('No substrate in sub_structure allowed!')
# check the number of subsystems of the sub_structure
if ((self.num_sub_systems > 1)
and not (sub_structure.num_sub_systems == self.num_sub_systems)):
raise ValueError('The number of subsystems in each sub_structure'
'must be the same!')
else:
self.num_sub_systems = sub_structure.num_sub_systems
# add a sub_structure of N repetitions to the structure with
self.sub_structures.append([sub_structure, N])
def add_substrate(self, sub_structure):
"""add_substrate
Add a structure as static substrate to the structure.
Args:
sub_structure (Structure): substrate structure.
"""
if not isinstance(sub_structure, Structure):
raise ValueError('Class '
+ type(sub_structure).__name__
+ ' is no possible substrate. '
+ 'Only structure class is allowed!')
self.substrate = sub_structure
def get_number_of_sub_structures(self):
"""get_number_of_sub_structures
This methods does not return the number of all layers in the
structure, see :meth:`.get_number_of_layers`.
Returns:
N (int): number of all sub structures.
"""
N = 0
for i in range(len(self.sub_structures)):
if isinstance(self.sub_structures[i][0], (AmorphousLayer, UnitCell)):
N = N + 1
else:
N = N + self.sub_structures[i][0].get_number_of_sub_structures()
return N
def get_number_of_layers(self):
"""get_number_of_layers
Determines the number of all layers in the structure.
Returns:
L (int): number of all layers in the structure.
"""
L = 0
# traverse the substructures
for i in range(len(self.sub_structures)):
if isinstance(self.sub_structures[i][0], AmorphousLayer) or \
isinstance(self.sub_structures[i][0], UnitCell):
L = L + self.sub_structures[i][1]
else:
# its a structure, so call the method recursively
L = L + self.sub_structures[i][0].get_number_of_layers() \
* self.sub_structures[i][1]
return L
def get_number_of_unique_layers(self):
"""get_number_of_unique_layers
Determines the number of unique layers in the structure.
Returns:
N (int): number of unique layers in the structure.
"""
N = len(self.get_unique_layers()[0])
return N
def get_thickness(self, units=True):
"""get_thickness
Determines the thickness of the structure.
Args:
units (boolean, optional): whether units should be returned or not.
Defaults to True.
Returns:
thickness (float, Quantity): the thickness from surface to bottom
of the structure.
"""
_, d_end, _ = self.get_distances_of_layers(units)
return d_end[-1]
def get_unique_layers(self):
"""get_unique_layers
The uniqueness is determined by the handle of each layer instance.
Returns:
(tuple):
- *layer_ids (list[str])* - ids of all unique layers instances in
the structure.
- *layer_handles (list[AmorphousLayer, UnitCell, Structure])* -
handles of all unique layers instances in the structure.
"""
layer_ids = []
layer_handles = []
# traverse the sub_structures
for i in range(len(self.sub_structures)):
if isinstance(self.sub_structures[i][0], (AmorphousLayer)) or \
isinstance(self.sub_structures[i][0], (UnitCell)):
# its a AmorphousLayer or UnitCell
layer_id = self.sub_structures[i][0].id
if not layer_ids:
# the list is empty at the beginning so add
# the first layer
layer_ids = layer_ids + [layer_id]
layer_handles = layer_handles + [self.sub_structures[i][0]]
else:
# the list is not empty so check if the id is
# already in the layers id list
if layer_id not in layer_ids:
# if id not in list, so add it
layer_ids = layer_ids + [layer_id]
layer_handles = layer_handles + [self.sub_structures[i][0]]
else:
# its a sub_structure
if not layer_ids:
# the list is empty at the beginning so call
# the method recursively and add the result to the
# layers list
layer_ids = self.sub_structures[i][0].get_unique_layers()[0]
layer_handles = self.sub_structures[i][0].get_unique_layers()[1]
else:
# the list is not empty so check if the ids
# from the recursive call are already in the layers id
# list.
| |
(self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetConsumerByEmailPassword_result)
GetConsumerByEmailPassword_result.thrift_spec = (
(0, TType.STRUCT, 'success', [SpotifakeManagement.ttypes.Consumer, None], None, ), # 0
(1, TType.STRUCT, 'sErrorUserE', [SpotifakeManagement.ttypes.SErrorUserException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 2
(3, TType.STRUCT, 'sErrorInvalidRequestE', [SpotifakeManagement.ttypes.SErrorInvalidRequestException, None], None, ), # 3
)
class AddConsumer_args(object):
"""
Attributes:
- newConsumer
"""
def __init__(self, newConsumer=None,):
self.newConsumer = newConsumer
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.newConsumer = SpotifakeManagement.ttypes.Consumer()
self.newConsumer.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddConsumer_args')
if self.newConsumer is not None:
oprot.writeFieldBegin('newConsumer', TType.STRUCT, 1)
self.newConsumer.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddConsumer_args)
AddConsumer_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'newConsumer', [SpotifakeManagement.ttypes.Consumer, None], None, ), # 1
)
class AddConsumer_result(object):
"""
Attributes:
- success
- sErrorUserE
"""
def __init__(self, success=None, sErrorUserE=None,):
self.success = success
self.sErrorUserE = sErrorUserE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I16:
self.success = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorUserE = SpotifakeManagement.ttypes.SErrorUserException()
self.sErrorUserE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddConsumer_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I16, 0)
oprot.writeI16(self.success)
oprot.writeFieldEnd()
if self.sErrorUserE is not None:
oprot.writeFieldBegin('sErrorUserE', TType.STRUCT, 1)
self.sErrorUserE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddConsumer_result)
AddConsumer_result.thrift_spec = (
(0, TType.I16, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorUserE', [SpotifakeManagement.ttypes.SErrorUserException, None], None, ), # 1
)
class DeleteConsumer_args(object):
"""
Attributes:
- email
"""
def __init__(self, email=None,):
self.email = email
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.email = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DeleteConsumer_args')
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 1)
oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(DeleteConsumer_args)
DeleteConsumer_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'email', 'UTF8', None, ), # 1
)
class DeleteConsumer_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
- sErrorInvalidRequestE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None, sErrorInvalidRequestE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
self.sErrorInvalidRequestE = sErrorInvalidRequestE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I16:
self.success = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.sErrorInvalidRequestE = SpotifakeManagement.ttypes.SErrorInvalidRequestException()
self.sErrorInvalidRequestE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DeleteConsumer_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I16, 0)
oprot.writeI16(self.success)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorInvalidRequestE is not None:
oprot.writeFieldBegin('sErrorInvalidRequestE', TType.STRUCT, 3)
self.sErrorInvalidRequestE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(DeleteConsumer_result)
DeleteConsumer_result.thrift_spec = (
(0, TType.I16, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
(3, TType.STRUCT, 'sErrorInvalidRequestE', [SpotifakeManagement.ttypes.SErrorInvalidRequestException, None], None, ), # 3
)
class UpdateConsumerPassword_args(object):
"""
Attributes:
- email
- newPassword
"""
def __init__(self, email=None, newPassword=None,):
self.email = email
self.newPassword = newPassword
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.email = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.newPassword = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UpdateConsumerPassword_args')
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 1)
oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email)
oprot.writeFieldEnd()
if self.newPassword is not None:
oprot.writeFieldBegin('newPassword', TType.STRING, 2)
oprot.writeString(self.newPassword.encode('utf-8') if sys.version_info[0] == 2 else self.newPassword)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(UpdateConsumerPassword_args)
UpdateConsumerPassword_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'email', 'UTF8', None, ), # 1
(2, TType.STRING, 'newPassword', 'UTF8', None, ), # 2
)
class UpdateConsumerPassword_result(object):
"""
Attributes:
- success
- sErrorUserE
- sErrorNotFoundE
- sErrorSystemE
- sErrorInvalidRequestE
"""
def __init__(self, success=None, sErrorUserE=None, sErrorNotFoundE=None, sErrorSystemE=None, sErrorInvalidRequestE=None,):
self.success = success
self.sErrorUserE = sErrorUserE
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
self.sErrorInvalidRequestE = sErrorInvalidRequestE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorUserE = SpotifakeManagement.ttypes.SErrorUserException()
self.sErrorUserE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.sErrorInvalidRequestE = SpotifakeManagement.ttypes.SErrorInvalidRequestException()
self.sErrorInvalidRequestE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UpdateConsumerPassword_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.sErrorUserE is not None:
oprot.writeFieldBegin('sErrorUserE', TType.STRUCT, 1)
self.sErrorUserE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 2)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 3)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorInvalidRequestE is not None:
oprot.writeFieldBegin('sErrorInvalidRequestE', TType.STRUCT, 4)
self.sErrorInvalidRequestE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(UpdateConsumerPassword_result)
UpdateConsumerPassword_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorUserE', | |
<filename>bin/scientificLaws.py
from functools import reduce
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
from bin.rational import RandomParameterization
from dreamcoder.domains.arithmetic.arithmeticPrimitives import (
f0,
f1,
fpi,
real_power,
real_subtraction,
real_addition,
real_division,
real_multiplication,
)
from dreamcoder.domains.list.listPrimitives import bootstrapTarget
from dreamcoder.dreamcoder import explorationCompression, commandlineArguments
from dreamcoder.grammar import Grammar
from dreamcoder.program import Program
from dreamcoder.recognition import RecurrentFeatureExtractor, DummyFeatureExtractor
from dreamcoder.task import DifferentiableTask, squaredErrorLoss
from dreamcoder.type import baseType, tlist, arrow
from dreamcoder.utilities import eprint, numberOfCPUs
tvector = baseType("vector")
treal = baseType("real")
tpositive = baseType("positive")
def makeTrainingData(
request,
law,
# Number of examples
N=10,
# Vector dimensionality
D=2,
# Maximum absolute value of a random number
S=20.0,
):
from random import random, randint
def sampleArgument(a, listLength):
if a.name == "real":
return random() * S * 2 - S
elif a.name == "positive":
return random() * S
elif a.name == "vector":
return [random() * S * 2 - S for _ in range(D)]
elif a.name == "list":
return [
sampleArgument(a.arguments[0], listLength) for _ in range(listLength)
]
else:
assert False, "unknown argument tp %s" % a
arguments = request.functionArguments()
e = []
for _ in range(N):
# Length of any requested lists
l = randint(1, 4)
xs = tuple(sampleArgument(a, l) for a in arguments)
y = law(*xs)
e.append((xs, y))
return e
def makeTask(
name,
request,
law,
# Number of examples
N=20,
# Vector dimensionality
D=3,
# Maximum absolute value of a random number
S=20.0,
):
print(name)
e = makeTrainingData(request, law, N=N, D=D, S=S)
print(e)
print()
def genericType(t):
if t.name == "real":
return treal
elif t.name == "positive":
return treal
elif t.name == "vector":
return tlist(treal)
elif t.name == "list":
return tlist(genericType(t.arguments[0]))
elif t.isArrow():
return arrow(genericType(t.arguments[0]), genericType(t.arguments[1]))
else:
assert False, "could not make type generic: %s" % t
return DifferentiableTask(
name,
genericType(request),
e,
BIC=10.0,
likelihoodThreshold=-0.001,
restarts=2,
steps=25,
maxParameters=1,
loss=squaredErrorLoss,
)
def norm(v):
return sum(x * x for x in v) ** 0.5
def unit(v):
return scaleVector(1.0 / norm(v), v)
def scaleVector(a, v):
return [a * x for x in v]
def innerProduct(a, b):
return sum(x * y for x, y in zip(a, b))
def crossProduct(a, b):
(a1, a2, a3) = tuple(a)
(b1, b2, b3) = tuple(b)
return [a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1]
def vectorAddition(u, v):
return [a + b for a, b in zip(u, v)]
def vectorSubtraction(u, v):
return [a - b for a, b in zip(u, v)]
class LearnedFeatureExtractor(RecurrentFeatureExtractor):
def tokenize(self, examples):
# Should convert both the inputs and the outputs to lists
def t(z):
if isinstance(z, list):
return ["STARTLIST"] + [y for x in z for y in t(x)] + ["ENDLIST"]
assert isinstance(z, (float, int))
return ["REAL"]
return [(tuple(map(t, xs)), t(y)) for xs, y in examples]
def __init__(self, tasks, examples, testingTasks=[], cuda=False):
lexicon = {
c
for t in tasks + testingTasks
for xs, y in self.tokenize(t.examples)
for c in reduce(lambda u, v: u + v, list(xs) + [y])
}
super(LearnedFeatureExtractor, self).__init__(
lexicon=list(lexicon), cuda=cuda, H=64, tasks=tasks, bidirectional=True
)
def featuresOfProgram(self, p, tp):
p = program.visit(RandomParameterization.single)
return super(LearnedFeatureExtractor, self).featuresOfProgram(p, tp)
if __name__ == "__main__":
pi = 3.14 # I think this is close enough to pi
# Data taken from:
# https://secure-media.collegeboard.org/digitalServices/pdf/ap/ap-physics-1-equations-table.pdf
# https://secure-media.collegeboard.org/digitalServices/pdf/ap/physics-c-tables-and-equations-list.pdf
# http://mcat.prep101.com/wp-content/uploads/ES_MCATPhysics.pdf
# some linear algebra taken from "parallel distributed processing"
tasks = [
# parallel distributed processing
makeTask(
"vector addition (2)", arrow(tvector, tvector, tvector), vectorAddition
),
makeTask(
"vector addition (many)",
arrow(tlist(tvector), tvector),
lambda vs: reduce(vectorAddition, vs),
),
makeTask(
"vector norm", arrow(tvector, treal), lambda v: innerProduct(v, v) ** 0.5
),
# mcat
makeTask(
"freefall velocity = (2gh)**.5",
arrow(tpositive, treal),
lambda h: (2 * 9.8 * h) ** 0.5,
),
makeTask(
"v^2 = v_0^2 + 2a(x-x0)",
arrow(treal, treal, treal, treal, treal),
lambda v0, a, x, x0: v0**2 + 2 * a * (x - x0),
),
makeTask(
"v = (vx**2 + vy**2)**0.5",
arrow(treal, treal, treal),
lambda vx, vy: (vx**2 + vy**2) ** 0.5,
),
makeTask(
"a_r = v**2/R", arrow(treal, tpositive, treal), lambda v, r: v * v / r
),
makeTask(
"e = mc^2", arrow(tpositive, tpositive, treal), lambda m, c: m * c * c
),
makeTask(
"COM (general scalar)",
arrow(tvector, tvector, treal),
lambda ms, xs: sum(m * x for m, x in zip(ms, xs)) / sum(ms),
),
makeTask(
"COM (2 vectors)",
arrow(tvector, tvector, tpositive, tpositive, tvector),
lambda x1, x2, m1, m2: scaleVector(
1.0 / (m1 + m2),
vectorAddition(scaleVector(m1, x1), scaleVector(m2, x2)),
),
),
makeTask(
"density = mass/volume", arrow(treal, treal, treal), lambda m, v: m / v
),
makeTask(
"pressure = force/area", arrow(treal, treal, treal), lambda m, v: m / v
),
makeTask("P = I^2R", arrow(treal, treal, treal), lambda i, r: i * i * r),
makeTask("P = V^2/R", arrow(treal, treal, treal), lambda v, r: v * v / r),
makeTask("V_{rms} = V/sqrt2", arrow(treal, treal), lambda v: v / (2.0**0.5)),
makeTask(
"U = 1/2CV^2", arrow(treal, treal, treal), lambda c, v: 0.5 * c * v * v
),
makeTask("U = 1/2QV", arrow(treal, treal, treal), lambda c, v: 0.5 * c * v),
makeTask(
"U = 1/2Q^2/C", arrow(treal, tpositive, treal), lambda q, c: 0.5 * q * q / c
),
makeTask("P = 1/f", arrow(tpositive, tpositive), lambda f: 1.0 / f),
makeTask("c = 1/2*r", arrow(treal, treal), lambda r: r / 2.0),
# AP physics
makeTask(
"Fnet = sum(F)",
arrow(tlist(tvector), tvector),
lambda vs: reduce(vectorAddition, vs),
),
makeTask(
"a = sum(F)/m",
arrow(tpositive, tlist(tvector), tvector),
lambda m, vs: scaleVector(1.0 / m, reduce(vectorAddition, vs)),
),
makeTask(
"work = F.d",
arrow(tvector, tvector, treal),
lambda f, d: innerProduct(f, d),
S=20.0,
),
makeTask(
"P = F.v",
arrow(tvector, tvector, treal),
lambda f, d: innerProduct(f, d),
S=20.0,
),
makeTask(
"F = qvxB (3d)",
arrow(treal, tvector, tvector, tvector),
lambda q, v, b: scaleVector(q, crossProduct(v, b)),
),
makeTask(
"F = qvxB (2d)",
arrow(treal, treal, treal, treal, treal, treal),
lambda q, a1, a2, b1, b2: q * (a1 * b2 - a2 * b1),
),
makeTask("tau = rxF (3d)", arrow(tvector, tvector, tvector), crossProduct),
makeTask(
"tau = rxF (2d)",
arrow(treal, treal, treal, treal, treal),
lambda a1, a2, b1, b2: a1 * b2 - a2 * b1,
),
makeTask(
"v(t)", arrow(treal, treal, treal, treal), lambda v0, a, t: v0 + a * t
),
makeTask(
"x(t)",
arrow(treal, treal, treal, treal, treal),
lambda x0, v0, a, t: x0 + v0 * t + 0.5 * a * t * t,
),
makeTask(
"p=mv",
arrow(tpositive, tvector, tvector),
lambda m, v: [m * _v for _v in v],
),
makeTask(
"dp=Fdt", arrow(treal, tvector, tvector), lambda m, v: [m * _v for _v in v]
),
makeTask(
"K=1/2mv^2",
arrow(tpositive, tvector, tpositive),
lambda m, v: 0.5 * m * norm(v) ** 2,
),
makeTask(
"K=1/2Iw^2",
arrow(tpositive, tpositive, tpositive),
lambda m, v: 0.5 * m * v**2,
),
makeTask(
"E=pJ", arrow(treal, tvector, tvector), lambda p, j: [p * _j for _j in j]
),
makeTask(
"Fs=kx", arrow(treal, tvector, tvector), lambda k, x: [k * _x for _x in x]
),
makeTask("P=dE/dt", arrow(treal, treal, treal), lambda de, dt: de / dt),
makeTask(
"theta(t)",
arrow(treal, treal, treal, treal, treal),
lambda x0, v0, a, t: x0 + v0 * t + 0.5 * a * t * t,
),
makeTask(
"omega(t)", arrow(treal, treal, treal, treal), lambda v0, a, t: v0 + a * t
),
makeTask("T=2pi/w", arrow(tpositive, tpositive), lambda w: 2 * pi / w),
makeTask(
"Ts=2pi(m/k)^1/2",
arrow(tpositive, tpositive, tpositive),
lambda m, k: 2 * pi * (m / k) ** 0.5,
),
makeTask(
"Tp=2pi(l/g)^1/2",
arrow(tpositive, tpositive, tpositive),
lambda m, k: 2 * pi * (m / k) ** 0.5,
),
# makeTask("Newtonian gravitation (2 vectors)",
# arrow(tpositive, tpositive, tvector, tvector, tvector),
# lambda m1, m2, r1, r2: scaleVector(m1 * m2 / (norm(vectorSubtraction(r1, r2)) ** 2),
# unit(vectorSubtraction(r1, r2)))),
makeTask(
"Coulomb's law (2 vectors)",
arrow(tpositive, tpositive, tvector, tvector, tvector),
lambda m1, m2, r1, r2: scaleVector(
m1 * m2 / (norm(vectorSubtraction(r1, r2)) ** 2),
unit(vectorSubtraction(r1, r2)),
),
),
makeTask(
"Newtonian gravitation (vector)",
arrow(tpositive, tpositive, tvector, tvector),
lambda m1, m2, r: scaleVector(m1 * m2 / (norm(r) ** 2), unit(r)),
),
makeTask(
"Coulomb's law (vector)",
| |
makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releases_listByDistributionGroup(distribution_group_name, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string distribution_group_name: The name of the distribution group. (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.releases_listByDistributionGroup_with_http_info(distribution_group_name, owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.releases_listByDistributionGroup_with_http_info(distribution_group_name, owner_name, app_name, **kwargs) # noqa: E501
return data
def releases_listByDistributionGroup_with_http_info(self, distribution_group_name, owner_name, app_name, **kwargs): # noqa: E501
"""releases_listByDistributionGroup # noqa: E501
Return basic information about distributed releases in a given distribution group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releases_listByDistributionGroup_with_http_info(distribution_group_name, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string distribution_group_name: The name of the distribution group. (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['distribution_group_name', 'owner_name', 'app_name'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method releases_listByDistributionGroup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'distribution_group_name' is set
if ('distribution_group_name' not in params or
params['distribution_group_name'] is None):
raise ValueError("Missing the required parameter `distribution_group_name` when calling `releases_listByDistributionGroup`") # noqa: E501
# verify the required parameter 'owner_name' is set
if ('owner_name' not in params or
params['owner_name'] is None):
raise ValueError("Missing the required parameter `owner_name` when calling `releases_listByDistributionGroup`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `releases_listByDistributionGroup`") # noqa: E501
collection_formats = {}
path_params = {}
if 'distribution_group_name' in params:
path_params['distribution_group_name'] = params['distribution_group_name'] # noqa: E501
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data', 'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/distribution_groups/{distribution_group_name}/releases', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ErrorDetails', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def devices_listCsvFormat(self, distribution_group_name, owner_name, app_name, **kwargs): # noqa: E501
"""devices_listCsvFormat # noqa: E501
Returns all devices associated with the given distribution group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.devices_listCsvFormat(distribution_group_name, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string distribution_group_name: The name of the distribution group. (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param boolean unprovisioned_only: when true, filters out provisioned devices(optional)
:param array udids: multiple UDIDs which should be part of the resulting CSV.(optional)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.devices_listCsvFormat_with_http_info(distribution_group_name, owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.devices_listCsvFormat_with_http_info(distribution_group_name, owner_name, app_name, **kwargs) # noqa: E501
return data
def devices_listCsvFormat_with_http_info(self, distribution_group_name, owner_name, app_name, **kwargs): # noqa: E501
"""devices_listCsvFormat # noqa: E501
Returns all devices associated with the given distribution group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.devices_listCsvFormat_with_http_info(distribution_group_name, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string distribution_group_name: The name of the distribution group. (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param boolean unprovisioned_only: when true, filters out provisioned devices(optional)
:param array udids: multiple UDIDs which should be part of the resulting CSV.(optional)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['distribution_group_name', 'owner_name', 'app_name', 'unprovisioned_only', 'udids'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method devices_listCsvFormat" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'distribution_group_name' is set
if ('distribution_group_name' not in params or
params['distribution_group_name'] is None):
raise ValueError("Missing the required parameter `distribution_group_name` when calling `devices_listCsvFormat`") # noqa: E501
# verify the required parameter 'owner_name' is set
if ('owner_name' not in params or
params['owner_name'] is None):
raise ValueError("Missing the required parameter `owner_name` when calling `devices_listCsvFormat`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `devices_listCsvFormat`") # noqa: E501
collection_formats = {}
path_params = {}
if 'distribution_group_name' in params:
path_params['distribution_group_name'] = params['distribution_group_name'] # noqa: E501
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
if 'unprovisioned_only' in params:
query_params.append(('unprovisioned_only', params['unprovisioned_only'])) # noqa: E501
if 'udids' in params:
query_params.append(('udids', params['udids'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/csv', 'text/csv', 'text/csv']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data', 'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/distribution_groups/{distribution_group_name}/devices/download_devices_list', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ErrorDetails', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def devices_list(self, distribution_group_name, owner_name, app_name, **kwargs): # noqa: E501
"""devices_list # noqa: E501
Returns all devices associated with the given distribution group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.devices_list(distribution_group_name, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string distribution_group_name: The name of the distribution group. (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param number release_id: when provided, gets the provisioning state of the devices owned by users of this distribution group when compared to the provided release.(optional)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.devices_list_with_http_info(distribution_group_name, owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.devices_list_with_http_info(distribution_group_name, owner_name, app_name, **kwargs) # noqa: E501
return data
def devices_list_with_http_info(self, distribution_group_name, owner_name, app_name, **kwargs): # noqa: E501
"""devices_list # noqa: E501
Returns all devices associated with the given distribution group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.devices_list_with_http_info(distribution_group_name, owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string distribution_group_name: The name of the distribution group. (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param number release_id: when provided, gets the provisioning state of the devices owned by users of this distribution group when compared to the provided release.(optional)
:return: ErrorDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['distribution_group_name', 'owner_name', 'app_name', 'release_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method devices_list" % key
)
params[key] = val
del | |
names
by stringing together the state names in the equivalence
classes. Else we keep the name of the representative of
eqch equivalence class.
"""
if (len(D["Q"]) == 1): # Already minimal
return D
else:
# Build a dict of all state combinations of DFA.
# Function state_combos also imparts a -1 for each state pair,
# initializing the separation distance at -1.
ht = dict(state_combos(list(D["Q"])))
# Mark final and non-final states to be 0-distinguishable.
# This is achieved by putting a 0 against those state pairs.
sepFinNonFin(D, ht)
# Main fixpoint computation: Assigning distinguishability dist.
#==============================================================
ht = fixptDist(D, ht)
# Pick out equivalent state-pairs, i.e. those that cannot be
# distinguished. These are still with a "-1" in ht.
ht_1 = [ stpair for (stpair, dist) in ht.items() if dist == -1 ]
# Now form equivalence classes
# what's returned is
# [(rep_1, [all_eql_states_1]), (rep_2, [all_eql_states_2]),...]
# which includes all equivalence classes of size 2 or more.
rep_eqc = bash_eql_classes(ht_1)
# Now we have to deal with singleton equivalence classes.
# These sit unmerged, OUTSIDE OF ALL (x,y) in ht_1
# i.e. all the entries in ht_1 are PARTNERED STATE PAIRS.
# If we now take D["Q"] and subtract from it all those x and y
# which are present in some pair in ht_1, we obtain completely
# non-mergable states. These are states in their own eql. classes.
# 1. Find all partnered states from ht_1
Partnered_states = list({x for (x,y) in ht_1} |
{y for (x,y) in ht_1})
# 2. Now who is left un-partnered?
List_of_self_only_eqlt_states = listminus(D["Q"], Partnered_states)
# 3. For these singletons, i.e. "self-only equivalent states",
# they are self-representative. Form pairs that indicate this fact.
rep_eqc_1 = [(x, [x]) for x in List_of_self_only_eqlt_states]
# 4. OK now, we can combine the set of pairs where each pair is
# (representative, [the list of equivalent states])
# So finally we get the list of equivalence classes with
# representatives which is of this form:
# [(a0,[a0, a1, a2, a3, a4]), (b0,[b0, b1]), (c0,[c0]), ...]
final_rep_eqc = rep_eqc + rep_eqc_1
# We are now ready to build a DFA out of final_rep_eqc.
# =====================================================
# 1. First, form the set of minimized states, which are
# state representatives.
minQ = {x for (x,y) in final_rep_eqc}
# 2. The Alpbahet remains the same.
minSigma = D["Sigma"]
# 3. The starting state is the representative of D["q0"]
minq0 = q0_of(D["q0"], final_rep_eqc)
# 4. The final states are the representatives of the original
# final states. This is computed by helper F_of.
minF = F_of(D["F"], final_rep_eqc)
# 5. The transition relation of the minimized DFA is obtained
# by the helper Delta_of
minDelta = Delta_of(D["Delta"], final_rep_eqc)
# 6. We now need to rename the states if the user wants verbose
# names (default is succinct). Verbose names are the name of
# states in each equivalence class strung together sep by "_".
if state_name_mode == 'verbose':
# First build a state-renaming hash-table involving
# mk_state_eqc_name
state_rename_ht = { x : mk_state_eqc_name(y)
for (x,y) in final_rep_eqc }
minQ = { state_rename_ht[x] for x in minQ }
minq0 = state_rename_ht[minq0]
minF = { state_rename_ht[f] for f in minF }
minDelta = { (state_rename_ht[x], y) : state_rename_ht[z]
for ((x,y),z) in minDelta.items() }
#
# Return the finished (minimized) DFA!
return mk_dfa(minQ, minSigma, minDelta, minq0, minF)
# In[12]:
def pairFR(L):
"""In : L (list of states)
Out: List of pairs with L[0] paired with each state in L[1:],
with the distinguishability distance initialized to -1.
Helper for generating state_combos.
"""
return list(map(lambda x: ((L[0], x), -1), L[1:]))
# In[13]:
def state_combos(L):
"""In : L (list of states)
Out: List of combinations of L's states (rep. as pairs),
with distinguishability distances marked as -1.
Helper for min_dfa.
Given a list of DFA states L (assume length >= 2),
Form state combinations, paired up as (L[i], L[i+1]).
This forms the 'ht' that is acted upon by fixptDist.
"""
if len(L) <= 2:
return([((L[0], L[1]), -1)])
else:
return (pairFR(L)) + (state_combos(L[1:]))
# In[14]:
def sepFinNonFin(D, ht):
"""In : D (consistent DFA)
ht (hash table of distinguishability distances)
Out: ht with (nonfinal,final) pairs in ht
marked with a distinguishability distance of 0.
Helper for min_dfa.
Given a hash-table of separation distances and a DFA D,
mark each state pair (final,non-final) with value 0
indicating their 0-distinguishability.
"""
# Form a separation predicate
sepPred = lambda x,y: (x in D["F"] and y in (D["Q"] - D["F"]) or
y in D["F"] and x in (D["Q"] - D["F"]))
# Now separate all states where sepPred holds
for kv in ht.items():
if sepPred(kv[0][0], kv[0][1]):
# Mark that this pair is 0-distinguishable
ht[kv[0]] = 0
# In[15]:
def bash_eql_classes(eql_reln):
"""In : eql_reln (equivalence relation : list of pairs of states).
Out: List of equivalence classes with representatives.
I.e. a structure of the form
[ (state0, [state0, state1, state2,]), ... ]
where state0 is a representative for the three (for example)
equivalent states state0, state1, state2. There are as many
such pairs as equivalence classes.
Helper for min_dfa.
Given an Eql. reln. of the form
[(a,b),(a,c),(d,e),(f,h),(g,f),..].
1. Grow eql classes
2. Elect a representative for each eql class
3. Return "equivalence classes with representatives."
This is a structure of the form
[(a0,[a0, a1, a2, a3, a4]), (b0,[b0, b1]), (c0,[c0]), ...]
where "a0" is a state and a0,a1,a2,a3,a4 are equivalent to it
The same goes for the bs, cs, etc.
"""
return bash_1(eql_reln, []) # seed with empty list of eql class sets.
# In[16]:
def listminus(L1, L2):
"""In : L1 : list or set
L2 : list or set
Out: List of items in L1 that are not in L2.
Helper for min_dfa and bash_1. Implements subtraction (L1 - L2).
"""
return [x for x in L1 if x not in L2]
# In[17]:
def bash_1(eql_reln, L_eq_classes):
"""In : eql_reln (equivalence relation : list of pairs of eqlt states)
L_eq_classes (list of eql classes which are SETS of states
for now.)
Out: return list of equivalence classes with representatives.
Helper for bash_eql_classes.
1) eql_reln is the current equivalence relation
(list of pairs)
2) L_eq_classes is a list of sets that are the eqlt
classes coalesced thus far.
3) We remove one pair at a time from the eql_reln and find
existing equivalence classes to expand, thus modifying
L_eq_classes each time.
Once the equivalence relation is emptied, we call mk_rep_eqc
thus making a list of equivalence classes with representatives
of the form
[(a0,[a0, a1, a2, a3, a4]), (b0,[b0, b1]), (c0,[c0]), ...].
"""
if eql_reln == []:
# When we have fully processed the given equivalence
# relation, return a list of equivalence classes with
# representatives of the form
# [(a0,[a0, a1, a2, a3, a4]), (b0,[b0, b1]), (c0,[c0]), ...]
return mk_rep_eqc(L_eq_classes)
else:
# pick the next pair from the eql_reln being coalesced
eq0 = eql_reln[0]
a = eq0[0]
b = eq0[1]
# We know that a is a state that is equivalent to b, since
# they exist as a pair in eql_reln[0].
# Now we must see if 'a' already lives in a COALESCED
# equivalence class
# Set Sa is a typical equivalence class in L_eq_classes
# See if 'a' is in Sa.
SaL = [Sa for Sa in L_eq_classes if a in Sa]
# There must be zero or one such set as Sa.
# Thus, |SaL| = 0 or 1
# Similarly, see which (if any) eql | |
the chosen solution TODO: anglesToHkl need no
# longer check the pseudo_angles as they will be generated with the
# same function and it will prove nothing
pseudo_angles = self.get_virtual_angles(position, False)
try:
for constraint in [
self.constraints._reference,
self.constraints._detector,
]:
for constraint_name, constraint_value in constraint.items():
if constraint_name == "a_eq_b":
assert radians_equivalent(
pseudo_angles["alpha"], pseudo_angles["beta"]
)
elif constraint_name == "bin_eq_bout":
assert radians_equivalent(
pseudo_angles["betain"], pseudo_angles["betaout"]
)
elif constraint_name not in pseudo_angles:
continue
else:
assert radians_equivalent(
constraint_value, pseudo_angles[constraint_name]
)
position_pseudo_angles_pairs.append((position, pseudo_angles))
except AssertionError:
continue
return position_pseudo_angles_pairs
def _calc_N(self, Q: np.ndarray, n: np.ndarray) -> np.ndarray:
"""Return N as described by Equation 31."""
Q = normalised(Q)
n = normalised(n)
if is_small(angle_between_vectors(Q, n)):
# Replace the reference vector with an alternative vector from Eq.(78)
def __key_func(v):
return v[1] # Workaround for mypy issue #9590
idx_min, _ = min(
enumerate([abs(Q[0, 0]), abs(Q[1, 0]), abs(Q[2, 0])]),
key=__key_func,
)
idx_1, idx_2 = [idx for idx in range(3) if idx != idx_min]
qval = sqrt(Q[idx_1, 0] * Q[idx_1, 0] + Q[idx_2, 0] * Q[idx_2, 0])
n[idx_min, 0] = qval
n[idx_1, 0] = -Q[idx_min, 0] * Q[idx_1, 0] / qval
n[idx_2, 0] = -Q[idx_min, 0] * Q[idx_2, 0] / qval
if is_small(norm(n)):
n[idx_min, 0] = 0
n[idx_1, 0] = Q[idx_2, 0] / qval
n[idx_2, 0] = -Q[idx_1, 0] / qval
Qxn = cross3(Q, n)
QxnxQ = cross3(Qxn, Q)
QxnxQ = normalised(QxnxQ)
Qxn = normalised(Qxn)
return np.array(
[
[Q[0, 0], QxnxQ[0, 0], Qxn[0, 0]],
[Q[1, 0], QxnxQ[1, 0], Qxn[1, 0]],
[Q[2, 0], QxnxQ[2, 0], Qxn[2, 0]],
]
)
def _calc_angle_between_naz_and_qaz(
self, theta: float, alpha: float, tau: float
) -> float:
# Equation 30:
top = cos(tau) - sin(alpha) * sin(theta)
bottom = cos(alpha) * cos(theta)
if is_small(bottom):
if is_small(cos(alpha)):
raise ValueError("cos(alpha) is too small")
if is_small(cos(theta)):
raise ValueError("cos(theta) is too small")
if is_small(sin(tau)):
return 0.0
return acos(bound(top / bottom))
def _calc_psi(
self,
alpha: float,
theta: float,
tau: float,
qaz: Optional[float] = None,
naz: Optional[float] = None,
) -> Iterator[float]:
"""Calculate psi from Eq. (18), (25) and (28)."""
sin_tau = sin(tau)
cos_theta = cos(theta)
if is_small(sin_tau):
# The reference vector is parallel to the scattering vector
yield float("nan")
elif is_small(cos_theta):
# Reflection is unreachable as theta angle is too close to 90 deg
yield float("nan")
elif is_small(sin(theta)):
# Reflection is unreachable as |Q| is too small
yield float("nan")
else:
cos_psi = (cos(tau) * sin(theta) - sin(alpha)) / cos_theta # (28)
if qaz is None or naz is None:
try:
acos_psi = acos(bound(cos_psi / sin_tau))
if is_small(acos_psi):
yield 0.0
else:
for psi in [acos_psi, -acos_psi]:
yield psi
except AssertionError:
print("WARNING: Diffcalc could not calculate an azimuth (psi)")
yield float("nan")
else:
sin_psi = cos(alpha) * sin(qaz - naz)
sgn = sign(sin_tau)
eps = sin_psi ** 2 + cos_psi ** 2
sigma_ = eps / sin_tau ** 2 - 1
if not is_small(sigma_):
print(
"WARNING: Diffcalc could not calculate a unique azimuth "
"(psi) because of loss of accuracy in numerical calculation"
)
yield float("nan")
else:
psi = atan2(sgn * sin_psi, sgn * cos_psi)
yield psi
def _calc_remaining_reference_angles(
self, name: str, value: float, theta: float, tau: float
) -> Tuple[float, float]:
"""Return alpha and beta given one of a_eq_b, alpha, beta or psi."""
UNREACHABLE_MSG = (
"The current combination of constraints with %s = %.4f\n"
"prohibits a solution for the specified reflection."
)
if name == "psi":
psi = value
# Equation 26 for alpha
sin_alpha = cos(tau) * sin(theta) - cos(theta) * sin(tau) * cos(psi)
if abs(sin_alpha) > 1 + SMALL:
raise DiffcalcException(UNREACHABLE_MSG % (name, degrees(value)))
alpha = asin(bound(sin_alpha))
# Equation 27 for beta
sin_beta = cos(tau) * sin(theta) + cos(theta) * sin(tau) * cos(psi)
if abs(sin_beta) > 1 + SMALL:
raise DiffcalcException(UNREACHABLE_MSG % (name, degrees(value)))
beta = asin(bound(sin_beta))
elif name == "a_eq_b" or name == "bin_eq_bout":
alpha = beta = asin(cos(tau) * sin(theta)) # (24)
elif name == "alpha" or name == "betain":
alpha = value # (24)
sin_beta = 2 * sin(theta) * cos(tau) - sin(alpha)
if abs(sin_beta) > 1 + SMALL:
raise DiffcalcException(UNREACHABLE_MSG % (name, degrees(value)))
beta = asin(sin_beta)
elif name == "beta" or name == "betaout":
beta = value
sin_alpha = 2 * sin(theta) * cos(tau) - sin(beta) # (24)
if abs(sin_alpha) > 1 + SMALL:
raise DiffcalcException(UNREACHABLE_MSG % (name, degrees(value)))
alpha = asin(sin_alpha)
return alpha, beta
def _calc_det_angles_given_det_or_naz_constraint(
self,
det_constraint: Dict[str, Optional[float]],
naz_constraint: Dict[str, Optional[float]],
theta: float,
tau: float,
alpha: float,
) -> Iterator[Tuple[float, float, float, float]]:
assert det_constraint or naz_constraint
try:
naz_qaz_angle = self._calc_angle_between_naz_and_qaz(theta, alpha, tau)
except AssertionError:
return
if det_constraint:
# One of the detector angles is given (Section 5.1)
det_constraint_name, det_constraint_value = next(
iter(det_constraint.items())
)
for delta, nu, qaz in self._calc_remaining_detector_angles(
det_constraint_name, det_constraint_value, theta
):
if is_small(naz_qaz_angle):
naz_angles = [
qaz,
]
else:
naz_angles = [qaz - naz_qaz_angle, qaz + naz_qaz_angle]
for naz in naz_angles:
yield qaz, naz, delta, nu
elif naz_constraint: # The 'detector' angle naz is given:
naz_name, naz = next(iter(naz_constraint.items()))
assert naz_name == "naz"
if is_small(naz_qaz_angle):
qaz_angles = [
naz,
]
else:
qaz_angles = [naz - naz_qaz_angle, naz + naz_qaz_angle]
for qaz in qaz_angles:
for delta, nu, _ in self._calc_remaining_detector_angles(
"qaz", qaz, theta
):
yield qaz, naz, delta, nu
def _calc_remaining_detector_angles(
self, constraint_name: str, constraint_value: float, theta: float
) -> Iterator[Tuple[float, float, float]]:
"""Return delta, nu and qaz given one detector angle."""
# (section 5.1)
# Find qaz using various derivations of 17 and 18
sin_2theta = sin(2 * theta)
cos_2theta = cos(2 * theta)
if is_small(sin_2theta):
raise DiffcalcException(
"No meaningful scattering vector (Q) can be found when "
f"theta is so small {degrees(theta):.4f}."
)
if constraint_name == "delta":
delta = constraint_value
try:
asin_qaz = asin(bound(sin(delta) / sin_2theta)) # (17 & 18)
except AssertionError:
return
cos_delta = cos(delta)
if is_small(cos_delta):
# raise DiffcalcException(
# 'The %s and %s circles are redundant when delta is constrained to %.0f degrees.'
# 'Please change delta constraint or use 4-circle mode.' % ("nu", 'mu', delta * TODEG))
print(
(
"DEGENERATE: with delta=90, %s is degenerate: choosing "
"%s = 0 (allowed because %s is unconstrained)"
)
% ("nu", "nu", "nu")
)
acos_nu = 1.0
else:
try:
acos_nu = acos(bound(cos_2theta / cos_delta))
except AssertionError:
return
if is_small(cos(asin_qaz)):
qaz_angles = [
sign(asin_qaz) * pi / 2.0,
]
else:
qaz_angles = [asin_qaz, pi - asin_qaz]
if is_small(acos_nu):
nu_angles = [
0.0,
]
else:
nu_angles = [acos_nu, -acos_nu]
for qaz, nu in product(qaz_angles, nu_angles):
sgn_ref = sign(sin_2theta) * sign(cos(qaz))
sgn_ratio = sign(sin(nu)) * sign(cos_delta)
if sgn_ref == sgn_ratio:
yield delta, nu, qaz
elif constraint_name == "nu":
nu = constraint_value
cos_nu = cos(nu)
if is_small(cos_nu):
raise DiffcalcException(
"The %s circle constraint to %.0f degrees is redundant."
"Please change this constraint or use 4-circle mode."
% ("nu", degrees(nu))
)
cos_delta = cos_2theta / cos(nu)
cos_qaz = cos_delta * sin(nu) / sin_2theta
try:
acos_delta = acos(bound(cos_delta))
acos_qaz = acos(bound(cos_qaz))
except AssertionError:
return
if is_small(acos_qaz):
qaz_angles = [
0.0,
]
else:
qaz_angles = [acos_qaz, -acos_qaz]
if is_small(acos_delta):
delta_angles = [
0.0,
]
else:
delta_angles = [acos_delta, -acos_delta]
for qaz, delta in product(qaz_angles, delta_angles):
sgn_ref = sign(sin(delta))
sgn_ratio = sign(sin(qaz)) * sign(sin_2theta)
if sgn_ref == sgn_ratio:
yield delta, nu, qaz
elif constraint_name == "qaz":
qaz = constraint_value
asin_delta = asin(sin(qaz) * sin_2theta)
if is_small(cos(asin_delta)):
delta_angles = [
sign(asin_delta) * pi / 2.0,
]
else:
delta_angles = [asin_delta, pi - asin_delta]
for delta in delta_angles:
cos_delta = cos(delta)
if is_small(cos_delta):
print(
(
"DEGENERATE: with delta=90, %s is degenerate: choosing "
"%s = 0 (allowed because %s is unconstrained)"
)
% ("nu", "nu", "nu")
)
# raise DiffcalcException(
# 'The %s circle is redundant when delta is at %.0f degrees.'
# 'Please change detector constraint or use 4-circle mode.' % ("nu", delta * TODEG))
nu = 0.0
else:
sgn_delta = sign(cos_delta)
nu = atan2(
sgn_delta * sin_2theta * cos(qaz), sgn_delta * cos_2theta
)
yield delta, nu, qaz
else:
raise DiffcalcException(
constraint_name + " is not an explicit detector angle "
"(naz | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Project(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the CodeBuild project.
"""
artifacts: pulumi.Output[dict]
"""
Information about the project's build output artifacts. Artifact blocks are documented below.
* `artifactIdentifier` (`str`) - The artifact identifier. Must be the same specified inside AWS CodeBuild buildspec.
* `encryptionDisabled` (`bool`) - If set to true, output artifacts will not be encrypted. If `type` is set to `NO_ARTIFACTS` then this value will be ignored. Defaults to `false`.
* `location` (`str`) - The location of the source code from git or s3.
* `name` (`str`) - The name of the project. If `type` is set to `S3`, this is the name of the output artifact object
* `namespaceType` (`str`) - The namespace to use in storing build artifacts. If `type` is set to `S3`, then valid values for this parameter are: `BUILD_ID` or `NONE`.
* `overrideArtifactName` (`bool`) - If set to true, a name specified in the build spec file overrides the artifact name.
* `packaging` (`str`) - The type of build output artifact to create. If `type` is set to `S3`, valid values for this parameter are: `NONE` or `ZIP`
* `path` (`str`) - If `type` is set to `S3`, this is the path to the output artifact
* `type` (`str`) - The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`.
"""
badge_enabled: pulumi.Output[bool]
"""
Generates a publicly-accessible URL for the projects build badge. Available as `badge_url` attribute when enabled.
"""
badge_url: pulumi.Output[str]
"""
The URL of the build badge when `badge_enabled` is enabled.
"""
build_timeout: pulumi.Output[float]
"""
How long in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes.
"""
cache: pulumi.Output[dict]
"""
Information about the cache storage for the project. Cache blocks are documented below.
* `location` (`str`) - The location of the source code from git or s3.
* `modes` (`list`) - Specifies settings that AWS CodeBuild uses to store and reuse build dependencies. Valid values: `LOCAL_SOURCE_CACHE`, `LOCAL_DOCKER_LAYER_CACHE`, and `LOCAL_CUSTOM_CACHE`
* `type` (`str`) - The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`.
"""
description: pulumi.Output[str]
"""
A short description of the project.
"""
encryption_key: pulumi.Output[str]
"""
The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build project's build output artifacts.
"""
environment: pulumi.Output[dict]
"""
Information about the project's build environment. Environment blocks are documented below.
* `certificate` (`str`) - The ARN of the S3 bucket, path prefix and object key that contains the PEM-encoded certificate.
* `computeType` (`str`) - Information about the compute resources the build project will use. Available values for this parameter are: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE` or `BUILD_GENERAL1_2XLARGE`. `BUILD_GENERAL1_SMALL` is only valid if `type` is set to `LINUX_CONTAINER`. When `type` is set to `LINUX_GPU_CONTAINER`, `compute_type` need to be `BUILD_GENERAL1_LARGE`.
* `environmentVariables` (`list`) - A set of environment variables to make available to builds for this build project.
* `name` (`str`) - The name of the project. If `type` is set to `S3`, this is the name of the output artifact object
* `type` (`str`) - The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`.
* `value` (`str`) - The environment variable's value.
* `image` (`str`) - The Docker image to use for this build project. Valid values include [Docker images provided by CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-available.html) (e.g `aws/codebuild/standard:2.0`), [Docker Hub images](https://hub.docker.com/) (e.g. `nginx:latest`), and full Docker repository URIs such as those for ECR (e.g. `137112412989.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest`).
* `imagePullCredentialsType` (`str`) - The type of credentials AWS CodeBuild uses to pull images in your build. Available values for this parameter are `CODEBUILD` or `SERVICE_ROLE`. When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials. Default to `CODEBUILD`
* `privilegedMode` (`bool`) - If set to true, enables running the Docker daemon inside a Docker container. Defaults to `false`.
* `registryCredential` (`dict`) - Information about credentials for access to a private Docker registry. Registry Credential config blocks are documented below.
* `credential` (`str`) - The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.
* `credentialProvider` (`str`) - The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.
* `type` (`str`) - The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`.
"""
logs_config: pulumi.Output[dict]
"""
Configuration for the builds to store log data to CloudWatch or S3.
* `cloudwatchLogs` (`dict`) - Configuration for the builds to store logs to CloudWatch
* `groupName` (`str`) - The group name of the logs in CloudWatch Logs.
* `status` (`str`) - Current status of logs in S3 for a build project. Valid values: `ENABLED`, `DISABLED`. Defaults to `DISABLED`.
* `streamName` (`str`) - The stream name of the logs in CloudWatch Logs.
* `s3Logs` (`dict`) - Configuration for the builds to store logs to S3.
* `encryptionDisabled` (`bool`) - If set to true, output artifacts will not be encrypted. If `type` is set to `NO_ARTIFACTS` then this value will be ignored. Defaults to `false`.
* `location` (`str`) - The location of the source code from git or s3.
* `status` (`str`) - Current status of logs in S3 for a build project. Valid values: `ENABLED`, `DISABLED`. Defaults to `DISABLED`.
"""
name: pulumi.Output[str]
"""
The name of the project. If `type` is set to `S3`, this is the name of the output artifact object
"""
queued_timeout: pulumi.Output[float]
"""
How long in minutes, from 5 to 480 (8 hours), a build is allowed to be queued before it times out. The default is 8 hours.
"""
secondary_artifacts: pulumi.Output[list]
"""
A set of secondary artifacts to be used inside the build. Secondary artifacts blocks are documented below.
* `artifactIdentifier` (`str`) - The artifact identifier. Must be the same specified inside AWS CodeBuild buildspec.
* `encryptionDisabled` (`bool`) - If set to true, output artifacts will not be encrypted. If `type` is set to `NO_ARTIFACTS` then this value will be ignored. Defaults to `false`.
* `location` (`str`) - The location of the source code from git or s3.
* `name` (`str`) - The name of the project. If `type` is set to `S3`, this is the name of the output artifact object
* `namespaceType` (`str`) - The namespace to use in storing build artifacts. If `type` is set to `S3`, then valid values for this parameter are: `BUILD_ID` or `NONE`.
* `overrideArtifactName` (`bool`) - If set to true, a name specified in the build spec file overrides the artifact name.
* `packaging` (`str`) - The type of build output artifact to create. If `type` is set to `S3`, valid values for this parameter are: `NONE` or `ZIP`
* `path` (`str`) - If `type` is set to `S3`, this is the path to the output artifact
* `type` (`str`) - The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`.
"""
secondary_sources: pulumi.Output[list]
"""
| |
class TypedRead:
def __init__( self, ty, nm, buf_size_in_cl = "128", max_burst_count = "1<<24", buf_size_in_burst_reqs = None):
self.ty = ty
self.nm = nm
self.buf_size_in_cl = buf_size_in_cl
self.max_burst_count = max_burst_count
if buf_size_in_burst_reqs is None:
buf_size_in_burst_reqs = buf_size_in_cl
self.buf_size_in_burst_reqs = buf_size_in_burst_reqs
def loadUnitType( self):
return "LoadUnitParams< %s, %s, %s, %s>" % (self.ty,self.buf_size_in_cl,self.max_burst_count,self.buf_size_in_burst_reqs)
def reqTy( self):
return "MemTypedReadReqType<%s>" % self.ty
def respTy( self):
return "MemTypedReadRespType<%s>" % self.ty
def reqNm( self):
return "%sReq" % self.nm
def respNm( self):
return "%sResp" % self.nm
def reqNmT( self):
return self.reqNm() + "In"
def respNmT( self):
return self.respNm() + "Out"
def reqNmK( self):
return self.reqNm() + "Out"
def respNmK( self):
return self.respNm() + "In"
class SingleRead:
def __init__( self, ty, tag_ty, nm, buf_size = "128"):
self.ty = ty
self.tag_ty = tag_ty
self.nm = nm
self.buf_size = buf_size
def loadUnitType( self):
return "LoadUnitSingleReqParams< %s, %s, %s>" % (self.ty,self.tag_ty,self.buf_size)
def reqTy( self):
return "MemSingleReadReqType<%s,%s>" % (self.ty, self.tag_ty)
def respTy( self):
return "MemSingleReadRespType<%s,%s>" % (self.ty, self.tag_ty)
def reqNm( self):
return "%sReq" % self.nm
def respNm( self):
return "%sResp" % self.nm
def reqNmT( self):
return self.reqNm() + "In"
def respNmT( self):
return self.respNm() + "Out"
def reqNmK( self):
return self.reqNm() + "Out"
def respNmK( self):
return self.respNm() + "In"
class TypedWrite:
def __init__( self, ty, nm):
self.ty = ty
self.nm = nm
def storeUnitType( self):
return "StoreUnitParams< %s>" % (self.ty,)
def reqTy( self):
return "MemTypedWriteReqType<%s>" % self.ty
def dataTy( self):
return "MemTypedWriteDataType<%s>" % self.ty
def reqNm( self):
return "%sReq" % self.nm
def dataNm( self):
return "%sData" % self.nm
def reqNmT( self):
return self.reqNm() + "In"
def dataNmT( self):
return self.dataNm() + "In"
def reqNmK( self):
return self.reqNm() + "Out"
def dataNmK( self):
return self.dataNm() + "Out"
class ArrayField:
def __str__( self): return "ArrayField<" + str(self.ty) + "," + str(self.count) + ">"
def __init__( self, ty, count):
self.ty = ty
self.count = count
@property
def declaration( self):
return "%s %s[%d];" % (self.ty.ty, self.nm, self.count)
def fieldWidth( self, sum):
if type(self.ty) is UserTypeField:
ln = self.ty.user_ty.numberOfFields
return " return %s::fieldWidth( (index-%d) %% %d);" % (self.ty.user_ty.ty, sum, ln)
else:
return " return %d;" % (self.ty.bitwidth,)
def putField( self, sum):
if type(self.ty) is UserTypeField:
ln = self.ty.user_ty.numberOfFields
return " %s[(index-%d)/%d].putField( (index-%d) %% %d, d);" % (self.nm, sum, ln, sum, ln)
else:
return " %s[index-%d] = d;" % (self.ty.nm,sum)
def getField( self, sum):
if type(self.ty) is UserTypeField:
ln = self.ty.user_ty.numberOfFields
return " return %s[(index-%d)/%d].getField( (index-%d) %% %d);" % (self.nm, sum, ln, sum, ln)
else:
return " return %s[index-%d];" % (self.ty.nm,sum)
@property
def nm( self):
return self.ty.nm
@property
def numberOfFields( self):
return self.count * self.ty.numberOfFields
@property
def bitwidth( self):
return self.count * self.ty.bitwidth
class BasicField:
def __init__( self, ty, nm, tag, bitwidth):
self.ty = ty
self.nm = nm
self.tag = tag
self.bitwidth = bitwidth
def __str__( self):
return self.tag
@property
def declaration( self):
return "%s %s;" % (self.ty, self.nm)
def fieldWidth( self, sum):
return " return %d;" % self.bitwidth
def putField( self, sum):
return " %s = d;" % self.nm
def getField( self, sum):
return " return %s;" % self.nm
@property
def numberOfFields( self):
return 1
class BitReducedField(BasicField):
def __str__( self): return "BitReducedField<" + str(self.ty) + "," + str(self.bitwidth) + ">"
def __init__( self, ty, bitwidth):
assert ty.bitwidth >= bitwidth
super().__init__( ty, ty.nm, "", bitwidth)
@property
def declaration( self):
return "%s %s : %d;" % (self.ty.ty, self.nm, self.bitwidth)
class UnsignedLongLongField(BasicField):
def __init__( self, nm):
super().__init__( "unsigned long long", nm, "UnsignedLongLongField", 64)
class SignedLongLongField(BasicField):
def __init__( self, nm):
super().__init__( "long long", nm, "SignedLongLongField", 64)
class UnsignedIntField(BasicField):
def __init__( self, nm):
super().__init__( "unsigned int", nm, "UnsignedIntField", 32)
class SignedIntField(BasicField):
def __init__( self, nm):
super().__init__( "int", nm, "SignedIntField", 32)
class UnsignedShortField(BasicField):
def __init__( self, nm):
super().__init__( "unsigned short", nm, "UnsignedShortField", 16)
class SignedShortField(BasicField):
def __init__( self, nm):
super().__init__( "short", nm, "SignedShortField", 16)
class UnsignedCharField(BasicField):
def __init__( self, nm):
super().__init__( "unsigned char", nm, "UnsignedCharField", 8)
class SignedCharField(BasicField):
def __init__( self, nm):
super().__init__( "char", nm, "SignedCharField", 8)
class UserTypeField:
def __init__( self, nm, user_ty):
self.nm = nm
self.user_ty = user_ty
@property
def declaration( self):
return "%s %s;" % (self.ty, self.nm)
def fieldWidth( self, sum):
return " return %s::fieldWidth( index-%d);" % (self.ty, sum)
def putField( self, sum):
return " %s.putField( index-%d, d);" % (self.nm, sum)
def getField( self, sum):
return " return %s.getField( index-%d);" % (self.nm, sum)
@property
def ty( self):
return self.user_ty.ty
@property
def numberOfFields( self):
return self.user_ty.numberOfFields
@property
def bitwidth( self):
return self.user_ty.bitwidth
class UserType:
def __init__( self, ty, fields):
self.ty = ty
self.fields = fields
sum = 0
for field in fields:
field.offset = sum
sum += field.numberOfFields
@property
def numberOfFields( self):
sum = 0
for field in self.fields:
sum += field.numberOfFields
return sum
@property
def bitwidth( self):
sum = 0
for field in self.fields:
sum += field.bitwidth
return sum
from collections import OrderedDict
class StorageFIFO:
def __init__( self, ty, capacity, nm):
self.ty = ty
self.capacity = capacity
self.nm = nm
class Port:
def __init__( self, channel):
self.channel = channel
def __eq__( self, other):
return type(self) == type(other) and self.channel == other.channel
class RdReqPort(Port):
def __init__( self, channel):
super().__init__( channel)
@property
def reset( self):
return self.channel + "ReqOut.reset_put()"
def type( self, dut):
return dut.find_rd( self.channel).reqTy()
def __repr__( self):
return "RdReqPort(" + self.channel + ")"
class RdRespPort(Port):
def __init__( self, channel):
super().__init__( channel)
@property
def reset( self):
return self.channel + "RespIn.reset_get()"
def type( self, dut):
return dut.find_rd( self.channel).respTy()
def __repr__( self):
return "RdRespPort(" + self.channel + ")"
class WrReqPort(Port):
def __init__( self, channel):
super().__init__( channel)
@property
def reset( self):
return self.channel + "ReqOut.reset_put()"
def type( self, dut):
return dut.find_wr( self.channel).reqTy()
def __repr__( self):
return "WrReqPort(" + self.channel + ")"
class WrDataPort(Port):
def __init__( self, channel):
super().__init__( channel)
@property
def reset( self):
return self.channel + "DataOut.reset_put()"
def type( self, dut):
return dut.find_wr( self.channel).dataTy()
def __repr__( self):
return "WrDataPort(" + self.channel + ")"
class EnqueuePort(Port):
def __init__( self, channel):
super().__init__( channel)
@property
def reset( self):
return self.channel + ".reset_put()"
def type( self, dut):
return dut.find_fifo( self.channel).ty
def __repr__( self):
return "EnqueuePort(" + self.channel + ")"
class DequeuePort(Port):
def __init__( self, channel):
super().__init__( channel)
@property
def reset( self):
return self.channel + ".reset_get()"
def type( self, dut):
return dut.find_fifo( self.channel).ty
def __repr__( self):
return "DequeuePort(" + self.channel + ")"
class CThread:
def __init__( self, nm, ports=None, writes_to_done=False):
self.nm = nm
if ports is None:
self.ports = []
else:
self.ports = ports
self.writes_to_done = writes_to_done
def add_port( self, p):
self.ports.append( p)
return self
def add_ports( self, ps):
self.ports.extend( ps)
return self
class Module:
def __init__( self, nm):
self.nm = nm
self.cthreads = OrderedDict()
self.storage_fifos = []
self.modules = OrderedDict()
@property
def writes_to_done( self):
for c in self.cthreads.values():
if c.writes_to_done:
return True
return False
def add_module( self, v): self.modules[v.nm] = v
def add_modules( self, vs):
for v in vs:
self.add_modules( v)
def get_module( self, nm): return self.modules[nm]
def add_cthread( self, v): self.cthreads[v.nm] = v
def add_cthreads( self, vs):
for v in vs:
self.add_cthread( v)
def get_cthread( self, nm): return self.cthreads[nm]
def add_storage_fifo( self, v): self.storage_fifos.append( v)
def add_storage_fifos( self, vs): self.storage_fifos.extend( vs)
def portOf( self, p_in):
for c in self.cthreads.values():
for p in c.ports:
if p == p_in:
return True
return False
class DUT:
def __init__( self, nm):
self.have_run_semantic = False
self.module = Module( nm)
self.inps = []
self.outs = []
self.usertypes = OrderedDict()
self.extra_config_fields = []
@property
def nm( self):
return self.module.nm
@property
def modules( self):
return self.module.modules
@property
def cthreads( self):
return self.module.cthreads
@property
def cthreads_generator( self):
for c in self.module.cthreads.values():
yield c
for m in self.module.modules.values():
for c in m.cthreads.values():
yield c
@property
def storage_fifos( self):
self.semantic()
for f in self.module.storage_fifos:
th0 = self.put_tbl[f.nm]
th1 = self.get_tbl[f.nm]
p0 = self.find_parent(th0)
p1 = self.find_parent(th1)
if p0.nm == p1.nm:
yield f
@property
def tlm_fifos( self):
self.semantic()
for f in self.module.storage_fifos:
th0 = self.put_tbl[f.nm]
th1 = self.get_tbl[f.nm]
p0 = self.find_parent(th0)
p1 = self.find_parent(th1)
if p0.nm != p1.nm:
yield f
def isHier( self, p_in):
if self.module.portOf( p_in):
return ""
return "hier_"
def add_module( self, v):
self.module.add_module( v)
return self
def add_modules( self, vs):
for v in vs:
self.add_module( v)
return self
def get_module( self, | |
<gh_stars>1-10
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
image_Width = 448
image_Height = 448
channel = 3
label_size = 20 # pascal VOC 2012 Dataset
grid = 7
batchsize = 1
Learning_Rate = 0.00001
box_per_cell = 2 # one cell have 2 box
boundary1 = grid * grid * label_size # 7 * 7 * 20
boundary2 = boundary1 + grid * grid * box_per_cell # 7 * 7 * 20 + 7 * 7 *2
w = 32
def sigmoid(x):
y = tf.math.sigmoid(x)
return y
def batch_norm(input, n_out, training, scope='bn'):
with tf.compat.v1.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]), name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(input, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean,batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(training, true_fn=mean_var_with_update, false_fn=lambda :(ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
return normed
############
def block_conv(input, ksize, ch_input, ch_output, stride, istraining, name):
ksize = [ksize, ksize, int(ch_input), ch_output]
strides = [1, stride, stride, 1]
n_out = ksize[-1]
kernel = tf.Variable(tf.random.truncated_normal(ksize, stddev=0.1), name=name + '_weight')
conv = tf.nn.conv2d(input, kernel, strides, padding='SAME')
bn = batch_norm(conv, n_out=int(conv.shape[-1]), training=istraining)
conv = tf.nn.leaky_relu(bn, name=name + '_leaky-RELU')
b, h, w, c = conv.shape
print(name + " output ->", "[" + str(h) + ", " + str(w) + ", " + str(c) + "]")
return conv
def block_residual(input, ch_output1, ch_output2, stride, istraining, name):
ksize1 = [1, 1, int(input.shape[-1]), ch_output1]
ksize2 = [3, 3, ch_output1, ch_output2]
strides = [1, stride, stride, 1]
kernel1 = tf.Variable(tf.random.truncated_normal(ksize1, stddev=0.1), name=name + '_weight_1')
conv1 = tf.nn.conv2d(input, kernel1, strides, padding='SAME')
bn1 = batch_norm(conv1, n_out=ksize1[-1], training=istraining)
af1 = tf.nn.leaky_relu(bn1, name=name + '_leaky-RELU')
b, h, w, c = af1.shape
print(name + " output ->", "[" + str(h) + ", " + str(w) + ", " + str(c) + "]")
kernel2 = tf.Variable(tf.random.truncated_normal(ksize2, stddev=0.1), name=name + '_weight_2')
conv2 = tf.nn.conv2d(af1, kernel2, strides, padding='SAME')
bn2 = batch_norm(conv2, n_out=ksize2[-1], training=istraining)
af2 = tf.nn.leaky_relu(bn2, name=name + '_leaky-RELU')
b, h, w, c = af2.shape
print(name + " output ->", "[" + str(h) + ", " + str(w) + ", " + str(c) + "]")
return af2
def block_upsample(input, name, method="deconv"):
assert method in ["resize", "deconv"]
with tf.variable_scope(name):
if method == "resize": # case in resize
input_shape = tf.shape(input)
output = tf.image.resize_nearest_neighbor(input, (input_shape[1] * 2, input_shape[2] * 2))
return output
if method == "deconv": # case in deconvolution(transpose)
# replace resize_nearest_neighbor with conv2d_transpose To support TensorRT optimization
numm_filter = input.shape.as_list()[-1]
output = tf.layers.conv2d_transpose(input, numm_filter, kernel_size=2, padding='same',
strides=(2, 2), kernel_initializer=tf.random_normal_initializer())
return output
def block_maxpool(input, name):
ret = tf.nn.max_pool2d(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
b, h, w, c = ret.shape
print(name + " output ->", "[" + str(h) + ", " + str(w) + ", " + str(c) + "]")
return ret
def block_bottleneck(input, ch_output, istraining, residual=None, name='bottleneck'):
exp = 4
res = input
ch_input = int(input.shape[-1])
out = block_conv(input=input, ksize=1, ch_input=ch_input, ch_output=ch_output, stride=1, istraining=istraining,
name=name)
out = block_conv(input=out, ksize=3, ch_input=ch_output, ch_output=ch_output, stride=1, istraining=istraining,
name=name)
out = block_conv(input=out, ksize=1, ch_input=ch_output, ch_output=ch_output * exp, stride=1, istraining=istraining,
name=name)
if residual is not None:
res = residual
out += res
out = tf.nn.relu(out)
return out
def block_basic(input, ch_output, istraining, residual=None, name=None):
exp = 1
res = input
ch_input = int(input.shape[-1])
out = block_conv(input=input, ksize=3, ch_input=ch_input, ch_output=ch_output, stride=1, istraining=istraining,
name=name)
out = block_conv(input=out, ksize=3, ch_input=ch_output, ch_output=ch_output, stride=1, istraining=istraining,
name=name)
if residual is not None:
res = residual
out += res
out = tf.nn.relu(out)
return out
def block_stage(input, stage, output_branches, w, istraining, name=None):
branches = list()
for i in range(0, stage):
stage_w = w * (2 ** i)
out = block_basic(input, stage_w, istraining=istraining, residual=None, name=name)
out = block_basic(out, stage_w, istraining=istraining, residual=None, name=name)
out = block_basic(out, stage_w, istraining=istraining, residual=None, name=name)
out = block_basic(out, stage_w, istraining=istraining, residual=None, name=name)
branches.append(out)
fuse_layers = list()
input22 = branches[0]
for i in range(0, output_branches):
fuse_layers = list()
for j in range(0, stage):
if i == j:
pass
elif i < j:
temp1 = block_conv(input=input22, ksize=3, ch_input=input22.shape[-1], ch_output=w * (2 ** i), stride=1,
istraining=istraining, name=name)
b, w, h, c = temp1.shape
scale_factor = 2.0 ** (j - i)
temp2 = tf.keras.layers.UpSampling2D(size=(w * scale_factor, h * scale_factor),
data_format="channels_last", interpolation='nearest')(temp1)
fuse_layers[-1].append(temp2)
elif i > j:
ops = list()
for k in range(i - j - 1):
temp2 = block_conv(input=input22, ksize=3, ch_input=input22.shape[-1], ch_output=w * (2 ** j),
stride=2, istraining=istraining, name=name)
ops.append(temp2)
temp3 = block_conv(input=temp2, ksize=3, ch_input=temp2.shape[-1], ch_output=w * (2 ** i),
stride=2, istraining=istraining, name=name)
ops.append(temp3)
fuse_layers[-1].append(ops)
return fuse_layers[-1]
class network:
"""
Builds Darknet-53 model.
"""
def __init__(self, imgs, training, weights=None, sess=None):
self.w = w
self.label_size = label_size
self.imgs = imgs
self.training = training
self.grid = grid
self.reshapelayers()
self.hrlayers()
# self.gap_layers()
# self.fc_layers()
# self.probs = tf.nn.softmax(self.fc)
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def reshapelayers(self):
print("reshape layers")
################################################################################################################
# initialization input node
self.imgs = tf.reshape(self.imgs, shape=[-1, image_Height, image_Width, 3], name='input_node')
################################################################################################################
def hrlayers(self):
print("hr layers")
out = block_conv(self.imgs, ksize=3, ch_input=self.imgs.shape[-1], ch_output=64, stride=2,
istraining=self.training, name='conv1')
out = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=64, stride=2,
istraining=self.training, name='conv2')
# out = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=64, stride=2,
# istraining=self.training, name='conv3')
downsample = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=256, stride=1,
istraining=self.training, name='downsample')
# Layer 1
out = block_bottleneck(out, 64, downsample)
out = block_bottleneck(out, 64, istraining=self.training)
out = block_bottleneck(out, 64, istraining=self.training)
out = block_bottleneck(out, 64, istraining=self.training)
################################################################################################################
# Fusion Layer 1
################################################################################################################
out = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=self.w, stride=1,
istraining=self.training, name='transition1-1')
out = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=self.w * (2 ** 1), stride=2,
istraining=self.training, name='transition1-2')
################################################################################################################
# stage 2
################################################################################################################
out = block_stage(out, stage=2, output_branches=2, w=self.w, istraining=self.training, name='stage2')
################################################################################################################
# Fusion Layer 2
################################################################################################################
out = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=self.w * (2 ** 2), stride=2,
istraining=self.training, name='transition2-1')
################################################################################################################
# stage 3
################################################################################################################
out = block_stage(out, stage=3, output_branches=3, w=self.w, istraining=self.training, name='stage3')
out = block_stage(out, stage=3, output_branches=3, w=self.w, istraining=self.training, name='stage3')
out = block_stage(out, stage=3, output_branches=3, w=self.w, istraining=self.training, name='stage3')
out = block_stage(out, stage=3, output_branches=3, w=self.w, istraining=self.training, name='stage3')
################################################################################################################
# Fusion Layer 3
################################################################################################################
out = block_conv(out, ksize=3, ch_input=out.shape[-1], ch_output=self.w * (2 ** 3), stride=2,
istraining=self.training, name='transition3-1')
################################################################################################################
# stage 4
################################################################################################################
out = block_stage(out, stage=4, output_branches=4, w=self.w, istraining=self.training, name='stage4')
out = block_stage(out, stage=4, output_branches=4, w=self.w, istraining=self.training, name='stage4')
out = block_stage(out, stage=4, output_branches=1, w=self.w, istraining=self.training, name='stage4')
################################################################################################################
# Final Layer
################################################################################################################
self.output = block_conv(out, ksize=1, ch_input=out.shape[-1], ch_output=self.label_size, stride=1,
istraining=self.training, name='final')
def load_weights(self, weight_file, sess):
print(f"Weight Loading Start! -> {weight_file}")
saver = tf.compat.v1.train.Saver() # Network model Save
meta_saver = tf.compat.v1.train.import_meta_graph(weight_file + ".meta")
save_path = saver.restore(sess, weight_file)
print(f"Weight Loading is successful")
def calc_iou(boxes1, boxes2, scope='iou'):
"""calculate ious
Args:
boxes1: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ===> (x_center, y_center, w, h)
Return:
iou: 4-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
with tf.variable_scope(scope):
# transform (x_center, y_center, w, h) to (x1, y1, x2, y2)
boxes1_t = tf.stack([boxes1[..., 0] - boxes1[..., 2] / 2.0,
boxes1[..., 1] - boxes1[..., 3] / 2.0,
boxes1[..., 0] + boxes1[..., 2] / 2.0,
boxes1[..., 1] + boxes1[..., 3] / 2.0],
axis=-1)
boxes2_t = tf.stack([boxes2[..., 0] - boxes2[..., 2] / 2.0,
boxes2[..., 1] - boxes2[..., 3] / 2.0,
boxes2[..., 0] + boxes2[..., 2] / 2.0,
boxes2[..., 1] + boxes2[..., 3] / 2.0],
axis=-1)
# calculate the left up point & right down point
lu = tf.maximum(boxes1_t[..., :2], boxes2_t[..., :2])
rd = tf.minimum(boxes1_t[..., 2:], boxes2_t[..., 2:])
# intersection
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[..., 0] * intersection[..., 1]
# calculate the boxs1 square and boxs2 square
square1 = boxes1[..., 2] * boxes1[..., 3]
square2 = boxes2[..., 2] * boxes2[..., 3]
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(predicts, labels, scope='loss_layer'):
object_scale = 1.0
noobject_scale = 1.0
class_scale = 2.0
coord_scale = 5.0
with tf.variable_scope(scope):
# print(boundary1)
# print(predicts[:, :boundary1])
predict_classes = tf.reshape(predicts[:, :boundary1], [batchsize, grid, grid, label_size])
predict_scales = tf.reshape(predicts[:, boundary1:boundary2], [batchsize, grid, grid, box_per_cell])
predict_boxes = tf.reshape(predicts[:, boundary2:], [batchsize, grid, grid, box_per_cell, 4])
response = tf.reshape(labels[..., 0], [batchsize, grid, grid, 1]) # response = confidence score
boxes | |
"""This module provides configuration values used by the application."""
import logging
import os
from collections.abc import Mapping, Sequence
from logging import config as lc
from typing import Any, Optional, Union, final
import jinja2
import yaml
from pydantic import AnyHttpUrl, BaseModel, BaseSettings, EmailStr, HttpUrl, validator
from document.domain import model
@final
class Settings(BaseSettings):
"""
BaseSettings subclasses like this one allow values of constants to
be overridden by environment variables like those defined in env
files, e.g., ../../.env
"""
REPO_URL_DICT_KEY: str = "../download-scripture?repo_url"
RESOURCE_TYPES_JSONPATH: str = "$[*].contents[*].code"
RESOURCE_TYPES_FOR_LANG_JSONPATH: str = "$[?code='{}'].contents[*].code"
RESOURCE_CODES_JSONPATH: str = "$[*].contents[*].subcontents[*].code"
RESOURCE_CODES_FOR_LANG_JSONPATH: str = (
"$[?code='{}'].contents[*].subcontents[*].code"
)
LANGUAGE_FMT_STR: str = "<h1>Language: {}</h1>"
RESOURCE_TYPE_NAME_FMT_STR: str = "<h2>{}</h2>"
RESOURCE_TYPE_NAME_WITH_REF_FMT_STR: str = "<h3>{} {}:{}</h3>"
TN_RESOURCE_TYPE_NAME_WITH_ID_AND_REF_FMT_STR: str = (
'<h3 id="{}-{}-tn-ch-{}-v-{}">{} {}:{}</h3>'
)
HTML_ROW_BEGIN: str = model.HtmlContent("<div class='row'>")
HTML_ROW_END: str = model.HtmlContent("</div>")
HTML_COLUMN_BEGIN: str = model.HtmlContent("<div class='column'>")
HTML_COLUMN_END: str = model.HtmlContent("</div>")
BOOK_FMT_STR: str = "<h2>Book: {}</h2>"
BOOK_AS_GROUPER_FMT_STR: str = "<h1>Book: {}</h1>"
VERSE_FMT_STR: str = "<h3>Verse {}:{}</h3>"
TRANSLATION_NOTE_FMT_STR: str = "<h3>Translation note {}:{}</h3>"
CHAPTER_HEADER_FMT_STR: str = '<h2 class="c-num" id="{}-{}-ch-{}">Chapter {}</h2>'
TRANSLATION_QUESTION_FMT_STR: str = "<h3>Translation question {}:{}</h3>"
TRANSLATION_ACADEMY_FMT_STR: str = "<h3>Translation academy {}:{}</h3>"
UNORDERED_LIST_BEGIN_STR: model.HtmlContent = model.HtmlContent("<ul>")
UNORDERED_LIST_END_STR: model.HtmlContent = model.HtmlContent("</ul>")
TRANSLATION_WORD_LIST_ITEM_FMT_STR: model.HtmlContent = model.HtmlContent(
'<li><a href="#{}-{}">{}</a></li>'
)
TRANSLATION_WORDS_FMT_STR: str = "<h3>Translation words {}:{}</h3>"
TRANSLATION_WORDS_SECTION_STR: str = "<h2>Translation words</h2>"
TRANSLATION_WORD_VERSE_SECTION_HEADER_STR: model.HtmlContent = model.HtmlContent(
"<h4>Uses:</h4>"
)
TRANSLATION_WORD_VERSE_REF_ITEM_FMT_STR: str = (
'<li><a href="#{}-{}-ch-{}-v-{}">{} {}:{}</a></li>'
)
FOOTNOTES_HEADING: model.HtmlContent = model.HtmlContent("<h3>Footnotes</h3>")
OPENING_H3_FMT_STR: str = "<h3>{}"
OPENING_H3_WITH_ID_FMT_STR: str = '<h3 id="{}-{}">{}'
TRANSLATION_WORD_ANCHOR_LINK_FMT_STR: str = "[{}](#{}-{})"
TRANSLATION_WORD_PREFIX_ANCHOR_LINK_FMT_STR: str = "({}: [{}](#{}-{}))"
TRANSLATION_NOTE_ANCHOR_LINK_FMT_STR: str = "[{}](#{}-{}-tn-ch-{}-v-{})"
# FIXME Tighten up the '.' usage in the following regex
VERSE_ANCHOR_ID_FMT_STR: str = 'id="(.+?)-ch-(.+?)-v-(.+?)"'
VERSE_ANCHOR_ID_SUBSTITUTION_FMT_STR: str = r"id='{}-\1-ch-\2-v-\3'"
LOGGING_CONFIG_FILE_PATH: str = "backend/document/logging_config.yaml"
DOCKER_CONTAINER_PDF_OUTPUT_DIR: str = "/pdf_output"
USFM_RESOURCE_TYPES: Sequence[str] = [
"cuv",
"f10",
"nav",
"reg",
"udb",
"udb-wa",
"ulb",
"ulb-wa",
"usfm",
]
TN_RESOURCE_TYPES: Sequence[str] = ["tn", "tn-wa"]
TQ_RESOURCE_TYPES: Sequence[str] = ["tq", "tq-wa"]
TW_RESOURCE_TYPES: Sequence[str] = ["tw", "tw-wa"]
def logger(self, name: str) -> logging.Logger:
"""
Return a Logger for scope named by name, e.g., module, that can be
used for logging.
"""
with open(self.LOGGING_CONFIG_FILE_PATH, "r") as fin:
logging_config = yaml.safe_load(fin.read())
lc.dictConfig(logging_config)
return logging.getLogger(name)
def api_test_url(self) -> str:
"""Non-secure local URL for running the Fastapi server for testing."""
return "http://localhost:{}".format(self.API_LOCAL_PORT)
# Get API prefix. Useful to have a prefix for versioning of the API.
# TODO Consider using API_ROOT in router prefix
API_ROOT: str
API_LOCAL_PORT: int
API_REMOTE_PORT: int
# FIXME HTTPS shouldn't be hardcoded. fastapi will have a sane way
# to deal with this that I've yet to research.
def api_url(self) -> str:
"""Return the full base URL of the Fastapi server."""
host = os.environ.get("API_HOST", "localhost")
port = self.API_LOCAL_PORT if host == "localhost" else self.API_REMOTE_PORT
root = self.API_ROOT
return "https://{}:{}{}".format(host, port, root)
# Location where resource assets will be downloaded.
RESOURCE_ASSETS_DIR: str = "/working/temp"
# Indicate whether running in Docker container.
IN_CONTAINER: bool = False
def working_dir(self) -> str:
"""
The directory where the resources will be placed once
acquired.
"""
if self.IN_CONTAINER:
return self.RESOURCE_ASSETS_DIR
else:
return self.RESOURCE_ASSETS_DIR[1:]
# Location where generated PDFs will be written to.
DOCUMENT_OUTPUT_DIR: str = "/working/output"
def output_dir(self) -> str:
"""The directory where the generated documents are placed."""
dirname = ""
if self.IN_CONTAINER:
dirname = self.DOCUMENT_OUTPUT_DIR
else:
dirname = self.DOCUMENT_OUTPUT_DIR[1:]
return dirname
# For options see https://wkhtmltopdf.org/usage/wkhtmltopdf.txt
WKHTMLTOPDF_OPTIONS: Mapping[str, Optional[str]] = {
"page-size": "Letter",
# 'margin-top': '0.75in',
# 'margin-right': '0.75in',
# 'margin-bottom': '0.75in',
# 'margin-left': '0.75in',
"encoding": "UTF-8",
"load-error-handling": "ignore",
"outline": None, # Produce an outline
"outline-depth": "3", # Only go depth of 3 on the outline
"enable-internal-links": None, # enable internal links
"header-left": "[section]",
"header-right": "[subsection]",
"header-line": None, # Produce a line under the header
"footer-center": "[page]",
"footer-line": None, # Produce a line above the footer
}
# Return the message to show to user on successful generation of
# PDF.
SUCCESS_MESSAGE: str = "Success! Please retrieve your generated document using a GET REST request to /pdf/{document_request_key} where document_request_key is the finished_document_request_key in this payload."
# Return the message to show to user on failure generating PDF.
FAILURE_MESSAGE: str = "The document request could not be fulfilled either because the resources requested are not available either currently or at all or because the system does not yet support the resources requested."
# The location where the JSON data file that we use to lookup
# location of resources is located.
TRANSLATIONS_JSON_LOCATION: HttpUrl
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# individual USFM files (per bible book) may be found.
INDIVIDUAL_USFM_URL_JSONPATH: str = "$[?code='{}'].contents[?code='{}'].subcontents[?code='{}'].links[?format='usfm'].url"
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# resource URL, e.g., tn, tq, tw, ta, obs, ulb, udb, etc., may normally
# be found.
RESOURCE_URL_LEVEL1_JSONPATH: str = (
"$[?code='{}'].contents[?code='{}'].links[?format='zip'].url"
)
# The json path to the language's name.
RESOURCE_LANG_NAME_JSONPATH: str = "$[?code='{}'].name"
# The json path to the resource type's name.
RESOURCE_TYPE_NAME_JSONPATH: str = "$[?code='{}'].contents[?code='{}'].name"
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# resource URL, e.g., tn, tq, tw, ta, obs, ulb, udb, etc., may
# additionally/alternatively be found.
RESOURCE_URL_LEVEL2_JSONPATH: str = (
"$[?code='{}'].contents[*].subcontents[?code='{}'].links[?format='zip'].url"
)
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# resource git repo may be found.
RESOURCE_DOWNLOAD_FORMAT_JSONPATH: str = "$[?code='{}'].contents[?code='{}'].subcontents[?code='{}'].links[?format='Download'].url"
# BACKEND_CORS_ORIGINS is a JSON-formatted list of origins
# e.g: '["http://localhost", "http://localhost:4200",
# "http://localhost:8000"]'
BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []
@validator("BACKEND_CORS_ORIGINS", pre=True)
def assemble_cors_origins(cls, v: str | list[str]) -> list[str] | str:
if isinstance(v, str) and not v.startswith("["):
return [i.strip() for i in v.split(",")]
elif isinstance(v, (list, str)):
return v
raise ValueError(v)
# Return the file names, excluding suffix, of files that do not
# contain content but which may be in the same directory or
# subdirectories of a resource's acquired files.
MARKDOWN_DOC_FILE_NAMES: list[str] = ["readme", "license"]
ENGLISH_GIT_REPO_MAP: Mapping[str, str] = {
"ulb-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_ulb",
"udb-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_udb",
"tn-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_tn",
"tw-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_tw",
"tq-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_tq",
}
ENGLISH_RESOURCE_TYPE_MAP: Mapping[str, str] = {
"ulb-wa": "Unlocked Literal Bible (ULB)",
"udb-wa": "Unlocked Dynamic Bible (UDB)",
"tn-wa": "ULB Translation Helps",
"tq-wa": "ULB Translation Questions",
"tw-wa": "ULB Translation Words",
}
TEMPLATE_PATHS_MAP: Mapping[str, str] = {
"book_intro": "backend/templates/tn/book_intro_template.md",
"header_enclosing": "backend/templates/html/header_enclosing.html",
"footer_enclosing": "backend/templates/html/footer_enclosing.html",
"cover": "backend/templates/html/cover.html",
"email-html": "backend/templates/html/email.html",
"email": "backend/templates/text/email.txt",
}
# Return boolean indicating if caching of generated documents should be
# cached.
ASSET_CACHING_ENABLED: bool = True
# Caching window of time in which asset
# files on disk are considered fresh rather than re-acquiring (in
# the case of resource asset files) or re-generating them (in the
# case of the final PDF). In hours.
ASSET_CACHING_PERIOD: int
# Get the path to the logo image that will be used on the PDF cover,
# i.e., first, page.
LOGO_IMAGE_PATH: str = "icon-tn.png"
# It doesn't yet make sense to offer the (high level)
# assembly strategy _and_ the assembly sub-strategy to the end user
# as a document request parameter so we'll just choose an arbitrary
# sub-strategy here. This means that we can write code for multiple
# sub-strategies and choose one to put in play at a time here.
DEFAULT_ASSEMBLY_SUBSTRATEGY: model.AssemblySubstrategyEnum = (
model.AssemblySubstrategyEnum.VERSE
)
# Return a list of the Markdown section titles that our
# Python-Markdown remove_section_processor extension should remove.
MARKDOWN_SECTIONS_TO_REMOVE: list[str] = [
"Examples from the Bible stories",
"Links",
]
# Return the from email to use for sending email with generated PDF
# attachment to document request recipient. Look for the value to
# use in FROM_EMAIL environment variable, use default if it doesn't
# exist.
FROM_EMAIL_ADDRESS: EmailStr
# The to-email address to use for sending email with generated
# PDF attachment to document request recipient during testing - in
# production the to-email address is supplied by the user.
TO_EMAIL_ADDRESS: EmailStr
EMAIL_SEND_SUBJECT: str
# Return boolean representing if the system should execute the
# action of sending an email when appropriate to do so.
SEND_EMAIL: bool
@validator("SEND_EMAIL")
def send_email(cls, v: bool) -> bool:
return bool(v)
SMTP_PASSWORD: str
SMTP_HOST: str
SMTP_PORT: int
# Example fake user agent value required by domain host to allow serving
# files. Other values could possibly work. This value definitely
# works.
USER_AGENT: str | |
import logging
import os
import json
import time
import unittest
import sys
import requests
import msal
from tests.http_client import MinimalHttpClient
from msal.oauth2cli import AuthCodeReceiver
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG if "-v" in sys.argv else logging.INFO)
def _get_app_and_auth_code(
client_id,
client_secret=None,
authority="https://login.microsoftonline.com/common",
port=44331,
scopes=["https://graph.microsoft.com/.default"], # Microsoft Graph
**kwargs):
from msal.oauth2cli.authcode import obtain_auth_code
if client_secret:
app = msal.ConfidentialClientApplication(
client_id,
client_credential=client_secret,
authority=authority, http_client=MinimalHttpClient())
else:
app = msal.PublicClientApplication(
client_id, authority=authority, http_client=MinimalHttpClient())
redirect_uri = "http://localhost:%d" % port
ac = obtain_auth_code(port, auth_uri=app.get_authorization_request_url(
scopes, redirect_uri=redirect_uri, **kwargs))
assert ac is not None
return (app, ac, redirect_uri)
@unittest.skipIf(os.getenv("TRAVIS_TAG"), "Skip e2e tests during tagged release")
class E2eTestCase(unittest.TestCase):
def assertLoosely(self, response, assertion=None,
skippable_errors=("invalid_grant", "interaction_required")):
if response.get("error") in skippable_errors:
logger.debug("Response = %s", response)
# Some of these errors are configuration issues, not library issues
raise unittest.SkipTest(response.get("error_description"))
else:
if assertion is None:
assertion = lambda: self.assertIn(
"access_token", response,
"{error}: {error_description}".format(
# Do explicit response.get(...) rather than **response
error=response.get("error"),
error_description=response.get("error_description")))
assertion()
def assertCacheWorksForUser(
self, result_from_wire, scope, username=None, data=None):
# You can filter by predefined username, or let end user to choose one
accounts = self.app.get_accounts(username=username)
self.assertNotEqual(0, len(accounts))
account = accounts[0]
if ("scope" not in result_from_wire # This is the usual case
or # Authority server could reject some scopes
set(scope) <= set(result_from_wire["scope"].split(" "))
):
# Going to test acquire_token_silent(...) to locate an AT from cache
result_from_cache = self.app.acquire_token_silent(
scope, account=account, data=data or {})
self.assertIsNotNone(result_from_cache)
self.assertIsNone(
result_from_cache.get("refresh_token"), "A cache hit returns no RT")
self.assertEqual(
result_from_wire['access_token'], result_from_cache['access_token'],
"We should get a cached AT")
# Going to test acquire_token_silent(...) to obtain an AT by a RT from cache
self.app.token_cache._cache["AccessToken"] = {} # A hacky way to clear ATs
result_from_cache = self.app.acquire_token_silent(
scope, account=account, data=data or {})
self.assertIsNotNone(result_from_cache,
"We should get a result from acquire_token_silent(...) call")
self.assertIsNotNone(
# We used to assert it this way:
# result_from_wire['access_token'] != result_from_cache['access_token']
# but ROPC in B2C tends to return the same AT we obtained seconds ago.
# Now looking back, "refresh_token grant would return a brand new AT"
# was just an empirical observation but never a committment in specs,
# so we adjust our way to assert here.
(result_from_cache or {}).get("access_token"),
"We should get an AT from acquire_token_silent(...) call")
def assertCacheWorksForApp(self, result_from_wire, scope):
# Going to test acquire_token_silent(...) to locate an AT from cache
result_from_cache = self.app.acquire_token_silent(scope, account=None)
self.assertIsNotNone(result_from_cache)
self.assertEqual(
result_from_wire['access_token'], result_from_cache['access_token'],
"We should get a cached AT")
def _test_username_password(self,
authority=None, client_id=None, username=None, password=<PASSWORD>, scope=None,
client_secret=None, # Since MSAL 1.11, confidential client has ROPC too
**ignored):
assert authority and client_id and username and password and scope
self.app = msal.ClientApplication(
client_id, authority=authority, http_client=MinimalHttpClient(),
client_credential=client_secret)
result = self.app.acquire_token_by_username_password(
username, password, scopes=scope)
self.assertLoosely(result)
# self.assertEqual(None, result.get("error"), str(result))
self.assertCacheWorksForUser(
result, scope,
username=username if ".b2clogin.com" not in authority else None,
)
def _test_device_flow(
self, client_id=None, authority=None, scope=None, **ignored):
assert client_id and authority and scope
self.app = msal.PublicClientApplication(
client_id, authority=authority, http_client=MinimalHttpClient())
flow = self.app.initiate_device_flow(scopes=scope)
assert "user_code" in flow, "DF does not seem to be provisioned: %s".format(
json.dumps(flow, indent=4))
logger.info(flow["message"])
duration = 60
logger.info("We will wait up to %d seconds for you to sign in" % duration)
flow["expires_at"] = min( # Shorten the time for quick test
flow["expires_at"], time.time() + duration)
result = self.app.acquire_token_by_device_flow(flow)
self.assertLoosely( # It will skip this test if there is no user interaction
result,
assertion=lambda: self.assertIn('access_token', result),
skippable_errors=self.app.client.DEVICE_FLOW_RETRIABLE_ERRORS)
if "access_token" not in result:
self.skip("End user did not complete Device Flow in time")
self.assertCacheWorksForUser(result, scope, username=None)
result["access_token"] = result["refresh_token"] = "************"
logger.info(
"%s obtained tokens: %s", self.id(), json.dumps(result, indent=4))
def _test_acquire_token_interactive(
self, client_id=None, authority=None, scope=None, port=None,
username_uri="", # But you would want to provide one
data=None, # Needed by ssh-cert feature
**ignored):
assert client_id and authority and scope
self.app = msal.PublicClientApplication(
client_id, authority=authority, http_client=MinimalHttpClient())
result = self.app.acquire_token_interactive(
scope,
timeout=120,
port=port,
welcome_template= # This is an undocumented feature for testing
"""<html><body><h1>{id}</h1><ol>
<li>Get a username from the upn shown at <a href="{username_uri}">here</a></li>
<li>Get its password from https://aka.ms/GetLabUserSecret?Secret=msidlabXYZ
(replace the lab name with the labName from the link above).</li>
<li><a href="$auth_uri">Sign In</a> or <a href="$abort_uri">Abort</a></li>
</ol></body></html>""".format(id=self.id(), username_uri=username_uri),
data=data or {},
)
logger.debug(
"%s: cache = %s, id_token_claims = %s",
self.id(),
json.dumps(self.app.token_cache._cache, indent=4),
json.dumps(result.get("id_token_claims"), indent=4),
)
self.assertIn(
"access_token", result,
"{error}: {error_description}".format(
# Note: No interpolation here, cause error won't always present
error=result.get("error"),
error_description=result.get("error_description")))
self.assertCacheWorksForUser(result, scope, username=None, data=data or {})
return result # For further testing
class SshCertTestCase(E2eTestCase):
_JWK1 = """{"kty":"RSA", "n":"<KEY>", "e":"AQAB"}"""
_JWK2 = """{"kty":"RSA", "n":"<KEY>", "e":"AQAB"}"""
DATA1 = {"token_type": "ssh-cert", "key_id": "key1", "req_cnf": _JWK1}
DATA2 = {"token_type": "ssh-cert", "key_id": "key2", "req_cnf": _JWK2}
_SCOPE_USER = ["https://pas.windows.net/CheckMyAccess/Linux/user_impersonation"]
_SCOPE_SP = ["https://pas.windows.net/CheckMyAccess/Linux/.default"]
SCOPE = _SCOPE_SP # Historically there was a separation, at 2021 it is unified
def test_ssh_cert_for_service_principal(self):
# Any SP can obtain an ssh-cert. Here we use the lab app.
result = get_lab_app().acquire_token_for_client(self.SCOPE, data=self.DATA1)
self.assertIsNotNone(result.get("access_token"), "Encountered {}: {}".format(
result.get("error"), result.get("error_description")))
self.assertEqual("ssh-cert", result["token_type"])
@unittest.skipIf(os.getenv("TRAVIS"), "Browser automation is not yet implemented")
def test_ssh_cert_for_user(self):
result = self._test_acquire_token_interactive(
client_id="04b07795-8ddb-461a-bbee-02f9e1bf7b46", # Azure CLI is one
# of the only 2 clients that are PreAuthz to use ssh cert feature
authority="https://login.microsoftonline.com/common",
scope=self.SCOPE,
data=self.DATA1,
username_uri="https://msidlab.com/api/user?usertype=cloud",
) # It already tests reading AT from cache, and using RT to refresh
# acquire_token_silent() would work because we pass in the same key
self.assertIsNotNone(result.get("access_token"), "Encountered {}: {}".format(
result.get("error"), result.get("error_description")))
self.assertEqual("ssh-cert", result["token_type"])
logger.debug("%s.cache = %s",
self.id(), json.dumps(self.app.token_cache._cache, indent=4))
# refresh_token grant can fetch an ssh-cert bound to a different key
account = self.app.get_accounts()[0]
refreshed_ssh_cert = self.app.acquire_token_silent(
self.SCOPE, account=account, data=self.DATA2)
self.assertIsNotNone(refreshed_ssh_cert)
self.assertEqual(refreshed_ssh_cert["token_type"], "ssh-cert")
self.assertNotEqual(result["access_token"], refreshed_ssh_cert['access_token'])
THIS_FOLDER = os.path.dirname(__file__)
CONFIG = os.path.join(THIS_FOLDER, "config.json")
@unittest.skipUnless(os.path.exists(CONFIG), "Optional %s not found" % CONFIG)
class FileBasedTestCase(E2eTestCase):
# This covers scenarios that are not currently available for test automation.
# So they mean to be run on maintainer's machine for semi-automated tests.
@classmethod
def setUpClass(cls):
with open(CONFIG) as f:
cls.config = json.load(f)
def skipUnlessWithConfig(self, fields):
for field in fields:
if field not in self.config:
self.skipTest('Skipping due to lack of configuration "%s"' % field)
def test_username_password(self):
self.skipUnlessWithConfig(["client_id", "username", "password", "scope"])
self._test_username_password(**self.config)
def _get_app_and_auth_code(self, scopes=None, **kwargs):
return _get_app_and_auth_code(
self.config["client_id"],
client_secret=self.config.get("client_secret"),
authority=self.config.get("authority"),
port=self.config.get("listen_port", 44331),
scopes=scopes or self.config["scope"],
**kwargs)
def _test_auth_code(self, auth_kwargs, token_kwargs):
self.skipUnlessWithConfig(["client_id", "scope"])
(self.app, ac, redirect_uri) = self._get_app_and_auth_code(**auth_kwargs)
result = self.app.acquire_token_by_authorization_code(
ac, self.config["scope"], redirect_uri=redirect_uri, **token_kwargs)
logger.debug("%s.cache = %s",
self.id(), json.dumps(self.app.token_cache._cache, indent=4))
self.assertIn(
"access_token", result,
"{error}: {error_description}".format(
# Note: No interpolation here, cause error won't always present
error=result.get("error"),
error_description=result.get("error_description")))
self.assertCacheWorksForUser(result, self.config["scope"], username=None)
def test_auth_code(self):
self._test_auth_code({}, {})
def test_auth_code_with_matching_nonce(self):
self._test_auth_code({"nonce": "foo"}, {"nonce": "foo"})
def test_auth_code_with_mismatching_nonce(self):
self.skipUnlessWithConfig(["client_id", "scope"])
(self.app, ac, redirect_uri) = self._get_app_and_auth_code(nonce="foo")
with self.assertRaises(ValueError):
self.app.acquire_token_by_authorization_code(
ac, self.config["scope"], redirect_uri=redirect_uri, nonce="bar")
def test_client_secret(self):
self.skipUnlessWithConfig(["client_id", "client_secret"])
self.app = msal.ConfidentialClientApplication(
self.config["client_id"],
client_credential=self.config.get("client_secret"),
authority=self.config.get("authority"),
http_client=MinimalHttpClient())
scope = self.config.get("scope", [])
result = self.app.acquire_token_for_client(scope)
self.assertIn('access_token', result)
self.assertCacheWorksForApp(result, scope)
def test_client_certificate(self):
self.skipUnlessWithConfig(["client_id", "client_certificate"])
client_cert = self.config["client_certificate"]
assert "private_key_path" in client_cert and "thumbprint" in client_cert
with open(os.path.join(THIS_FOLDER, client_cert['private_key_path'])) as f:
private_key = f.read() # Should be in PEM format
self.app = msal.ConfidentialClientApplication(
self.config['client_id'],
{"private_key": private_key, "thumbprint": client_cert["thumbprint"]},
http_client=MinimalHttpClient())
scope = self.config.get("scope", [])
result = self.app.acquire_token_for_client(scope)
self.assertIn('access_token', result)
self.assertCacheWorksForApp(result, scope)
def test_subject_name_issuer_authentication(self):
self.skipUnlessWithConfig(["client_id", "client_certificate"])
client_cert = self.config["client_certificate"]
assert "private_key_path" in client_cert and "thumbprint" in client_cert
if not "public_certificate" in client_cert:
self.skipTest("Skipping SNI test due to lack of public_certificate")
with open(os.path.join(THIS_FOLDER, client_cert['private_key_path'])) as f:
private_key = f.read() # Should be in PEM format
with open(os.path.join(THIS_FOLDER, client_cert['public_certificate'])) as f:
public_certificate = f.read()
self.app = msal.ConfidentialClientApplication(
self.config['client_id'], authority=self.config["authority"],
client_credential={
"private_key": private_key,
"thumbprint": self.config["thumbprint"],
"public_certificate": public_certificate,
},
http_client=MinimalHttpClient())
scope = self.config.get("scope", [])
result = self.app.acquire_token_for_client(scope)
self.assertIn('access_token', result)
self.assertCacheWorksForApp(result, scope)
@unittest.skipUnless(os.path.exists(CONFIG), "Optional %s not found" % CONFIG)
class DeviceFlowTestCase(E2eTestCase): # A leaf class so it will be run only once
@classmethod
def setUpClass(cls):
with open(CONFIG) as f:
cls.config = json.load(f)
def test_device_flow(self):
self._test_device_flow(**self.config)
def get_lab_app(
env_client_id="LAB_APP_CLIENT_ID",
env_client_secret="LAB_APP_CLIENT_SECRET",
):
"""Returns the lab app as an MSAL confidential client.
Get it from environment variables if defined, otherwise fall back to use MSI.
"""
logger.info(
"Reading ENV variables %s and %s for lab app defined at "
"https://docs.msidlab.com/accounts/confidentialclient.html",
env_client_id, env_client_secret)
if os.getenv(env_client_id) and os.getenv(env_client_secret):
# A shortcut mainly for running tests on developer's local development machine
# or it could be setup on Travis CI
# https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings
# Data came from here
# https://docs.msidlab.com/accounts/confidentialclient.html
client_id = os.getenv(env_client_id)
client_secret = os.getenv(env_client_secret)
else:
logger.info("ENV variables %s and/or %s are not defined. Fall back to MSI.",
| |
to cumulative buffers
self.cur_node[bs_idx, num_partial] = t[-1]
self.cur_cap[bs_idx, num_partial] = (
1.0 - self.demands[bs_idx].gather(dim=-1, index=t).sum()
)
self.cur_time_to_depot[bs_idx, num_partial] = self.time_to_depot[bs_idx, t[-1]]
# recalculate current time of vehicle
tm = self._recompute_cost(t, bs_idx, service_tm)
if recompute_cost:
costs.append(tm)
self.cur_time[bs_idx, num_partial] = tm
self.next_index_in_tour[bs_idx, num_partial] = len(t)
self.active_vehicles[bs_idx, t_idx] = True
num_partial += 1
except IndexError:
raise RuntimeError(f"Number of tours of provided solution "
f"is larger than max_num_vehicles!")
t_idx += 1
# singleton tour
else:
pass # nothing to do in this case
# check if number of partial tours <= max_concurrent_vehicles
assert num_partial <= self.max_concurrent_vehicles
must_assign = self.max_concurrent_vehicles-num_partial
if must_assign > 0:
# start a new tour for each non existing partial tour
for i in range(num_partial, num_partial+must_assign):
nxt_active = self._get_next_active_vehicle()[bs_idx]
self.active_vehicles[bs_idx, nxt_active] = 1
self.active_to_plan_idx[bs_idx, i] = nxt_active
# adapt visitation status
nz = self.tour_plan[bs_idx].nonzero(as_tuple=True)
self._visited[bs_idx, self.tour_plan[bs_idx, nz[0], nz[1]].long()] = 1
if recompute_cost:
total_costs.append(sum(costs))
# inc per sample
bs_idx += sample_fact
if recompute_cost:
self._total = torch.tensor(total_costs, dtype=self.fp_precision, device=self.device)
# re-expand if the number of samples changed during selection procedure
# POMO sampling will always do the expansion during the destruction procedure,
# but standard sampling needs it here explicitly
if n_samples != self.num_samples:
self._expand_sample_dimension(sample_fact)
#
self.active_to_plan_idx = self.active_vehicles.nonzero(as_tuple=True)[1].view(self.bs, -1)
# re-create graph
self.to_graph()
self._has_instance = True
self._is_reset = True
self._step = (self._visited.sum(-1) + self._finished.sum(-1)).max().cpu().item()
return self._get_observation()
def _recompute_cost(self, tour: Union[List, torch.Tensor], bs_idx: int, service_time: float):
# recalculate current time of vehicle
tm = 0
prev = 0
for nxt in tour:
# select from distance matrix
tm += self._dist_mat[bs_idx, prev, nxt]
# add waiting time and service time
tm += ((self.tw[bs_idx, nxt][0] - tm).clamp_(min=0) + service_time)
prev = nxt
return tm.cpu().item()
def destruct(self, **kwargs):
"""Tensor-based native destruction operator circumventing
expensive conversion to lists during solution export/import."""
raise NotImplementedError
@staticmethod
def _cumsum0(t: torch.Tensor) -> torch.Tensor:
"""calculate cumsum of t starting at 0."""
return torch.cat((
torch.zeros(1, dtype=t.dtype, device=t.device),
torch.cumsum(t, dim=-1)[:-1]
), dim=0)
@property
def depot_node(self) -> torch.Tensor:
"""idx of depot node is always 0."""
if self._zero.device != self.device:
self._zero = self._zero.to(device=self.device)
return self._zero[:, None].expand(-1, self.bs).view(-1)
@property
def idx_inc(self) -> torch.Tensor:
"""Returns the index increase necessary to
transform to BS x N running index."""
assert self.depot_idx is not None and len(self.depot_idx) == self.bs
return self.depot_idx
@property
def visited(self) -> torch.BoolTensor:
"""Returns mask for all nodes without depot (BS, N-1),
indicating if the respective node was already visited."""
return self._visited[:, 1:]
@property
def k_used(self) -> torch.Tensor:
"""Returns the number of vehicles used for each instance."""
_active = self.active_vehicles.clone()
_active[self.active_vehicles] = (self.cur_node != self.depot_node[:, None]).view(-1)
return (self._finished | _active).sum(-1)
@property
def total_cost(self) -> torch.Tensor:
"""return the current total cost of the solution."""
return self._total.clone()
@property
def num_samples(self):
return self._num_samples
def to_graph(self) -> None:
"""Create static nbh graph and dynamic tour graph components."""
if self.depot_idx is None:
# starting node indices of each batch instance are exactly depot
self.depot_idx = self._cumsum0(
torch.from_numpy(np.full(self.bs, self.graph_size))
.to(dtype=torch.long, device=self.device)
)
if self._tour_batch_idx is None:
self._tour_batch_idx = torch.arange(self.bs, device=self.device)[:, None].expand(-1, self.max_vehicle_number)
# nbh graph is static and only needs to be created at start of episode
if self.nbh_edges is None or self.nbh_weights is None:
nbh_edges, nbh_weights = [], []
for i, c in enumerate(self.coords):
e = self.nbh_sampler(c)
nbh_edges.append(e + self.idx_inc[i]) # increase node indices by running idx
# calculate weights
idx_coords = c[e]
nbh_weights.append(
dimacs_challenge_dist_fn(idx_coords[0], idx_coords[1])/self.org_service_horizon[i]
)
self.nbh_edges = torch.cat(nbh_edges, dim=-1)
self.nbh_weights = torch.cat(nbh_weights, dim=-1)
self.k_nbh_size = self.nbh_sampler.k
if self.tour_edges is None or self.tour_weights is None:
# initialize - no tours exist
# create just dummy edges from depot to depot
self.tour_edges = torch.cat((self.depot_idx[None, :], self.depot_idx[None, :]), dim=0)
self.tour_weights = torch.zeros(self.bs, dtype=self.fp_precision, device=self.device)
elif (self._step <= self.max_concurrent_vehicles+1) or (self._step % self.tour_graph_update_step == 0):
# infer edges from current routes
# select all routes which are either finished or active (partial solutions)
selection_mask = self._finished | self.active_vehicles
# increase to running idx and get corresponding node indices
#tours = (self.tour_plan + self.idx_inc[:, None, None])[selection_mask]
tours = (
self.tour_plan[selection_mask] +
self.idx_inc[:, None, None].expand(-1, selection_mask.size(-1), 1)[selection_mask]
)
if self.debug_lvl > 1:
assert (tours[:, -1] == self.depot_idx.repeat_interleave(selection_mask.sum(-1), dim=-1)).all()
sbl = tours.size(-1)
tours = tours.view(-1, sbl) # (BS, max_concurrent, seq_buffer_len) -> (-1, seq_buffer_len)
# create edges as node idx pairs
# automatically adds an edge from the last node back to the depot
tours = torch.cat((
torch.roll(tours, shifts=1, dims=-1)[:, None, :], # cyclic shift by 1
tours[:, None, :]
), axis=1).permute(1, 0, 2).reshape(2, -1)
tour_batch_idx = self._tour_batch_idx[selection_mask]
# remove dummies (depot self loops)
selection_mask = (tours[0, :] != tours[1, :])
self.tour_edges = tours[:, selection_mask]
# get weights
# TODO: better way than with tour_batch_idx which is only used here?!
tour_batch_idx = (
tour_batch_idx[:, None].expand(-1, sbl).reshape(-1)
)[selection_mask]
if self.inference:
# select from distance matrix
idx = self.tour_edges - self.idx_inc[tour_batch_idx]
self.tour_weights = self._dist_mat[tour_batch_idx][
torch.arange(tour_batch_idx.size(0), device=self.device), idx[0,], idx[1,]
]
else:
# compute on the fly
idx_coords = self.coords.view(-1, 2)[self.tour_edges]
self.tour_weights = (
dimacs_challenge_dist_fn(idx_coords[0], idx_coords[1]) /
self.org_service_horizon[tour_batch_idx]
)
else:
# no update to tour graph
self.tour_edges = torch.empty(0)
self.tour_weights = torch.empty(0)
def get_node_nbh(self, node_idx: torch.Tensor) -> torch.LongTensor:
"""Return the neighborhood of the specified nodes."""
assert node_idx.size(0) == self.bs
depot_mask = (node_idx == 0)
if depot_mask.any():
# first N elements in self.nbh_edges[0] are depot nbh
depot_nbh = self.nbh_edges.view(2, self.bs, -1)[:, :, :self.graph_size]
if self.ordered_idx is None:
# order the nodes in the depot nbh by their distance to depot
idx_coords = self.coords.view(-1, 2)[depot_nbh.reshape(2, -1)]
# here euclidean distance is sufficient
self.ordered_idx = torch.norm(idx_coords[0]-idx_coords[1], p=2, dim=-1)\
.view(self.bs, -1)\
.argsort(dim=-1, descending=False)
# first check visitation status
vis_mask = ~self._visited.gather(dim=-1, index=self.ordered_idx)
# get mask of the first 'nbh_size' closest unvisited nodes
_msk = vis_mask.cumsum(dim=-1) <= self.k_nbh_size
mask = torch.zeros_like(vis_mask)
mask[_msk] = vis_mask[_msk]
# if there are less than 'nbh_size' unvisited nodes, correct mask
# since we always need at least 'nbh_size' nodes for batching,
# they will just be masked in the selection procedure
missing_to_nbh_size = -mask.sum(-1) + self.k_nbh_size
missing = missing_to_nbh_size > 0
if missing.any():
# create mask of the first 'missing_to_nbh_size' positions to set to true
zmsk = ~mask[missing]
zmsk = (
zmsk.cumsum(-1) == missing_to_nbh_size[missing, None]
).fliplr().cumsum(-1).fliplr().to(torch.bool)
_msk = torch.zeros_like(mask)
_msk[missing] = zmsk
mask[_msk] = 1
# select corresponding node indices
select_idx = self.ordered_idx[mask].view(self.bs, -1)
depot_nbh = depot_nbh[0].gather(dim=-1, index=select_idx)
if depot_mask.all():
return (
depot_nbh[:, None, :].expand(self.bs, self.max_concurrent_vehicles, -1) -
self.idx_inc[:, None, None]
)
# get other node nbh
nbh = self.nbh_edges.view(2, self.bs, -1)[:, :, self.graph_size:]
nbh = (
nbh[0].view(self.bs, self.graph_size-1, self.k_nbh_size)
# here we just clamp to enable the gather operation on dummy depot node indices,
# they are then replaced below
.gather(dim=1, index=(torch.clamp(node_idx-1, min=0))[:, :, None].expand(self.bs, -1, self.k_nbh_size))
)
if depot_mask.any():
# replace depot nbh
nbh[depot_mask] = depot_nbh.repeat_interleave(depot_mask.sum(-1), dim=0)
return nbh - self.idx_inc[:, None, None]
def compute_distance_matrix(self, coords: torch.Tensor, normalize: bool = True) -> torch.Tensor:
"""Calculate (BS, N, N) distance (transit) matrix."""
if normalize:
return self._compute_normed_distance_matrix(coords, self.org_service_horizon)
else:
return dimacs_challenge_dist_fn(coords[:, :, None, :], coords[:, None, :, :])
@staticmethod
@torch.jit.script
def _compute_normed_distance_matrix(coords: torch.Tensor, denom: torch.Tensor) -> torch.Tensor:
return (
dimacs_challenge_dist_fn(coords[:, :, None, :], coords[:, None, :, :]) /
denom[:, None, None]
)
def _randint(self, bs: int, n: int, high: int, low: int = 0, replace: bool = False):
"""Draws n random integers between low (inc) and high (exc) for batch of size bs."""
if self._one.device != self.device:
self._one = self._one.to(device=self.device)
return torch.multinomial(
self._one[:, None, None].expand(-1, bs, high).view(bs, high),
n, replacement=replace) + low
def _get_nbh_and_mask(self) -> Tuple[torch.LongTensor, torch.BoolTensor]:
"""Returns the NBH of each node at which an active vehicle currently is positioned.
Moreover, creates a feasibility mask over this NBH by
- checking if node was already visited
- checking if remaining capacity of vehicle is sufficient
- checking if the node can still be served by the current vehicle within the respective TW
Returns:
nbh: (BS, max_concurrent, NBH),
mask: (BS, max_concurrent, NBH)
"""
# get node neighborhood of current nodes
nbh = self.get_node_nbh(self.cur_node)
# start creating mask (True where infeasible)
# self-loops are masked automatically, since they just have been set as visited in | |
# This code is an alternative implementation of the paper by
# <NAME>, <NAME>, and <NAME>. "Age Progression/Regression by Conditional Adversarial Autoencoder."
# IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017.
#
# Date: Mar. 24th, 2017
#
# Please cite above paper if you use this code
#
from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from scipy.io import savemat
from ops import *
import model as M
class FaceAging(object):
def __init__(self,
session, # TensorFlow session
size_image=128, # size the input images
size_kernel=5, # size of the kernels in convolution and deconvolution
size_batch=100, # mini-batch size for training and testing, must be square of an integer
num_input_channels=3, # number of channels of input images
num_encoder_channels=64, # number of channels of the first conv layer of encoder
num_z_channels=50, # number of channels of the layer z (noise or code)
num_categories=10, # number of categories (age segments) in the training dataset
num_gen_channels=1024, # number of channels of the first deconv layer of generator
enable_tile_label=True, # enable to tile the label
tile_ratio=1.0, # ratio of the length between tiled label and z
is_training=True, # flag for training or testing mode
save_dir='./save', # path to save checkpoints, samples, and summary
dataset_name='UTKFace' # name of the dataset in the folder ./data
):
self.session = session
self.image_value_range = (-1, 1)
self.size_image = size_image
self.size_kernel = size_kernel
self.size_batch = size_batch
self.num_input_channels = num_input_channels
self.num_encoder_channels = num_encoder_channels
self.num_z_channels = num_z_channels
self.num_categories = num_categories
self.num_gen_channels = num_gen_channels
self.enable_tile_label = enable_tile_label
self.tile_ratio = tile_ratio
self.is_training = is_training
self.save_dir = save_dir
self.dataset_name = dataset_name
# ************************************* input to graph ********************************************************
self.input_image = tf.placeholder(
tf.float32,
[self.size_batch, self.size_image, self.size_image, self.num_input_channels],
name='input_images'
)
self.age = tf.placeholder(
tf.float32,
[self.size_batch, self.num_categories],
name='age_labels'
)
self.gender = tf.placeholder(
tf.float32,
[self.size_batch, 2],
name='gender_labels'
)
# ************************************* build the graph *******************************************************
print('\n\tBuilding graph ...')
# encoder: input image --> z
self.z = self.encoder(
image=self.input_image
)
# generator: z + label --> generated image
self.G = self.generator(
z=self.z,
y=self.age,
gender=self.gender,
enable_tile_label=self.enable_tile_label,
tile_ratio=self.tile_ratio
)
# discriminator on G
self.D_G, self.D_G_logits = self.discriminator_img(
image=self.G,
y=self.age,
gender=self.gender,
is_training=self.is_training
)
# discriminator on input image
self.D_input, self.D_input_logits = self.discriminator_img(
image=self.input_image,
y=self.age,
gender=self.gender,
is_training=self.is_training,
reuse_variables=True
)
# ************************************* loss functions *******************************************************
# loss function of encoder + generator
#self.EG_loss = tf.nn.l2_loss(self.input_image - self.G) / self.size_batch # L2 loss
self.EG_loss = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss
# loss function of discriminator on image
self.D_img_loss_input = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_input_logits, labels=tf.ones_like(self.D_input_logits))
)
self.D_img_loss_G = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_G_logits, labels=tf.zeros_like(self.D_G_logits))
)
self.G_img_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_G_logits, labels=tf.ones_like(self.D_G_logits))
)
# *********************************** trainable variables ****************************************************
trainable_variables = tf.trainable_variables()
# variables of encoder
self.E_variables = [var for var in trainable_variables if 'E_' in var.name]
print(self.E_variables)
# variables of generator
self.G_variables = [var for var in trainable_variables if 'G_' in var.name]
# variables of discriminator on image
self.D_img_variables = [var for var in trainable_variables if 'D_img_' in var.name]
# for saving the graph and variables
self.saver = tf.train.Saver(max_to_keep=2)
def train(self,
num_epochs=200, # number of epochs
learning_rate=0.0002, # learning rate of optimizer
beta1=0.5, # parameter for Adam optimizer
decay_rate=1.0, # learning rate decay (0, 1], 1 means no decay
enable_shuffle=True, # enable shuffle of the dataset
use_trained_model=True, # use the saved checkpoint to initialize the network
use_init_model=True, # use the init model to initialize the network
weigts=(0.0001, 0, 0) # the weights of adversarial loss and TV loss
):
# *************************** load file names of images ******************************************************
file_names = glob(os.path.join('./data', self.dataset_name, '*.jpg'))
size_data = len(file_names)
np.random.seed(seed=2019)
if enable_shuffle:
np.random.shuffle(file_names)
# *********************************** optimizer **************************************************************
# over all, there are three loss functions, weights may differ from the paper because of different datasets
self.loss_EG = self.EG_loss + weigts[0] * self.G_img_loss # slightly increase the params
self.loss_Di = self.D_img_loss_input + self.D_img_loss_G
# set learning rate decay
self.EG_global_step = tf.Variable(0, trainable=False, name='global_step')
EG_learning_rate = tf.train.exponential_decay(
learning_rate=learning_rate,
global_step=self.EG_global_step,
decay_steps=size_data / self.size_batch * 2,
decay_rate=decay_rate,
staircase=True
)
# optimizer for encoder + generator
with tf.variable_scope('opt', reuse=tf.AUTO_REUSE):
self.EG_optimizer = tf.train.AdamOptimizer(
learning_rate=EG_learning_rate,
beta1=beta1
).minimize(
loss=self.loss_EG,
global_step=self.EG_global_step,
var_list=self.E_variables + self.G_variables
)
# optimizer for discriminator on image
self.D_img_optimizer = tf.train.AdamOptimizer(
learning_rate=EG_learning_rate,
beta1=beta1
).minimize(
loss=self.loss_Di,
var_list=self.D_img_variables
)
# ************* get some random samples as testing data to visualize the learning process *********************
sample_files = file_names[0:self.size_batch]
file_names[0:self.size_batch] = []
sample = [load_image(
image_path=sample_file,
image_size=self.size_image,
image_value_range=self.image_value_range,
is_gray=(self.num_input_channels == 1),
) for sample_file in sample_files]
if self.num_input_channels == 1:
sample_images = np.array(sample).astype(np.float32)[:, :, :, None]
else:
sample_images = np.array(sample).astype(np.float32)
sample_label_age = np.ones(
shape=(len(sample_files), self.num_categories),
dtype=np.float32
) * self.image_value_range[0]
sample_label_gender = np.ones(
shape=(len(sample_files), 2),
dtype=np.float32
) * self.image_value_range[0]
for i, label in enumerate(sample_files):
label = int(str(sample_files[i]).split('\\')[-1].split('_')[0])
if 0 <= label <= 5:
label = 0
elif 6 <= label <= 10:
label = 1
elif 11 <= label <= 15:
label = 2
elif 16 <= label <= 20:
label = 3
elif 21 <= label <= 30:
label = 4
elif 31 <= label <= 40:
label = 5
elif 41 <= label <= 50:
label = 6
elif 51 <= label <= 60:
label = 7
elif 61 <= label <= 70:
label = 8
else:
label = 9
sample_label_age[i, label] = self.image_value_range[-1]
gender = int(str(sample_files[i]).split('\\')[-1].split('_')[1])
sample_label_gender[i, gender] = self.image_value_range[-1]
# ******************************************* training *******************************************************
# initialize the graph
tf.global_variables_initializer().run()
# load check point
if use_trained_model:
if self.load_checkpoint():
print("\tSUCCESS ^_^")
else:
print("\tFAILED >_<!")
# load init model
if use_init_model:
if not os.path.exists('init_model/model-init.data-00000-of-00001'):
from init_model.zip_opt import join
try:
join('init_model/model_parts', 'init_model/model-init.data-00000-of-00001')
except:
raise Exception('Error joining files')
self.load_checkpoint(model_path='init_model')
# epoch iteration
num_batches = len(file_names) // self.size_batch
for epoch in range(num_epochs):
if enable_shuffle:
np.random.shuffle(file_names)
for ind_batch in range(num_batches):
start_time = time.time()
# read batch images and labels
batch_files = file_names[ind_batch*self.size_batch:(ind_batch+1)*self.size_batch]
batch = [load_image(
image_path=batch_file,
image_size=self.size_image,
image_value_range=self.image_value_range,
is_gray=(self.num_input_channels == 1),
) for batch_file in batch_files]
if self.num_input_channels == 1:
batch_images = np.array(batch).astype(np.float32)[:, :, :, None]
else:
batch_images = np.array(batch).astype(np.float32)
batch_label_age = np.ones(
shape=(len(batch_files), self.num_categories),
dtype=np.float
) * self.image_value_range[0]
batch_label_gender = np.ones(
shape=(len(batch_files), 2),
dtype=np.float
) * self.image_value_range[0]
for i, label in enumerate(batch_files):
label = int(str(batch_files[i]).split('\\')[-1].split('_')[0])
if 0 <= label <= 5:
label = 0
elif 6 <= label <= 10:
label = 1
elif 11 <= label <= 15:
label = 2
elif 16 <= label <= 20:
label = 3
elif 21 <= label <= 30:
label = 4
elif 31 <= label <= 40:
label = 5
elif 41 <= label <= 50:
label = 6
elif 51 <= label <= 60:
label = 7
elif 61 <= label <= 70:
label = 8
else:
label = 9
batch_label_age[i, label] = self.image_value_range[-1]
gender = int(str(batch_files[i]).split('\\')[-1].split('_')[1])
batch_label_gender[i, gender] = self.image_value_range[-1]
# update
_, _, EG_err, Gi_err, DiG_err, Di_err= self.session.run(
fetches = [
self.EG_optimizer,
self.D_img_optimizer,
self.EG_loss,
self.G_img_loss,
self.D_img_loss_G,
self.D_img_loss_input,
],
feed_dict={
self.input_image: batch_images,
self.age: batch_label_age,
self.gender: batch_label_gender,
}
)
print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tEG_err=%.4f" %
(epoch+1, num_epochs, ind_batch+1, num_batches, EG_err))
print("\tGi=%.4f\tDi=%.4f\tDiG=%.4f" % (Gi_err, Di_err, DiG_err))
# estimate left run time
elapse = time.time() - start_time
time_left = ((num_epochs - epoch - 1) * num_batches + (num_batches - ind_batch - 1)) * elapse
print("\tTime left: %02d:%02d:%02d" %
(int(time_left / 3600), int(time_left % 3600 / 60), time_left % 60))
# save sample images for each epoch
name = '{:02d}.png'.format(epoch+1)
self.sample(sample_images, sample_label_age, sample_label_gender, name)
self.test(sample_images, sample_label_gender, name)
# save checkpoint for each 5 epoch
if np.mod(epoch, 5) == 4:
self.save_checkpoint()
# save the trained model
self.save_checkpoint()
def encoder(self, image, reuse_variables=False):
with tf.variable_scope('E_',reuse=reuse_variables):
mod = M.Model(image)
mod.convLayer(5,64,stride=2,activation=M.PARAM_RELU)
mod.convLayer(5,128,stride=2,activation=M.PARAM_RELU)
mod.convLayer(5,256,stride=2,activation=M.PARAM_RELU)
mod.convLayer(5,512,stride=2,activation=M.PARAM_RELU)
mod.convLayer(5,1024,stride=2,activation=M.PARAM_RELU)
mod.flatten()
mod.fcLayer(self.num_z_channels,activation=M.PARAM_TANH)
return mod.get_current_layer()
def generator(self, z, y, gender, reuse_variables=False, enable_tile_label=True, tile_ratio=1.0):
if reuse_variables:
tf.get_variable_scope().reuse_variables()
num_layers = int(np.log2(self.size_image)) - int(self.size_kernel / 2)
if enable_tile_label:
duplicate = int(self.num_z_channels * tile_ratio / self.num_categories)
else:
duplicate = 1
z = concat_label(z, y, duplicate=duplicate)
if enable_tile_label:
duplicate = int(self.num_z_channels * tile_ratio / 2)
else:
duplicate = 1
z = concat_label(z, gender, duplicate=duplicate)
size_mini_map = int(self.size_image / 2 ** num_layers)
with tf.variable_scope('G_'):
mod = M.Model(z)
mod.fcLayer(self.num_gen_channels * size_mini_map * size_mini_map, activation=M.PARAM_RELU)
mod.reshape([-1, size_mini_map, size_mini_map, self.num_gen_channels])
mod.deconvLayer(5,512,stride=2,activation=M.PARAM_RELU) #8
mod.deconvLayer(5,256,stride=2,activation=M.PARAM_RELU) #16
mod.deconvLayer(5,128,stride=2,activation=M.PARAM_RELU) #32
mod.deconvLayer(5,64,stride=2,activation=M.PARAM_RELU) #64
mod.deconvLayer(5,32,stride=2,activation=M.PARAM_RELU) #128
feat | |
import numpy as np
from pyuvdata import UVData
import matplotlib
from matplotlib import gridspec
import copy
def apply_random_flags(uvd, flag_frac, seed=None, inplace=False,
zero_flagged_data=False):
"""
Randomly flag a set of frequency channels. Flags are applied
on top of any existing flags, and are applied to all
baselines, times, and polarizations.
Parameters
----------
uvd : UVData object
Input UVData object to be flagged.
flag_frac : float
Fraction of channels to flag. This is the fraction of
channels to apply flags to; the actual fraction of flagged
channels may be greater than this, depending on if there
were already flagged channels in the input UVData object.
seed : int, optional
Random seed to use. Default: None.
inplace : bool, optional
Whether to apply the flags to the input UVData object
in-place, or return a copy that includes the new flags.
Default: False.
zero_flagged_data : bool, optional
Whether to set the flagged channels in the data_array to
zero. This is useful for identifying functions that are
ignoring the mask. All flagged data will be zeroed, not
just the new flags added by this function.
Returns
-------
uvd : UVData object
Returns UVData object with flags applied.
"""
assert flag_frac < 1. and flag_frac >= 0., \
"flag_frac must be in the range 0, 1"
# Get all available bls
bls = np.unique(uvd.baseline_array)
# Get total no. of channels and randomly select channels to flag
freqs = uvd.freq_array
chans = np.arange(freqs.size)
nflagged = int(flag_frac * float(chans.size))
if seed is not None: np.random.seed(seed)
flagged = np.random.choice(chans, size=nflagged, replace=False)
# Whether to apply mask in-place, or return a copy
if inplace:
new_uvd = uvd
else:
new_uvd = copy.deepcopy(uvd)
# Apply flags
new_uvd.flag_array[:,:,flagged,:] = True
if zero_flagged_data:
new_uvd.data_array[new_uvd.flag_array] = 0.
return new_uvd
def flag_channels(uvd, spw_ranges, inplace=False):
"""
Flags a given range of channels entirely for a list of UVData objects
Parameters
----------
uvd : UVData
UVData object to be flagged.
spw_ranges : list
list of tuples of the form (min_channel, max_channel) defining which
channels to flag.
inplace : bool, optional
If True, then the input UVData objects' flag arrays are modified,
and if False, new UVData objects identical to the inputs but with
updated flags are created and returned (default is False).
Returns:
-------
uvd_new : list
Flagged UVData object.
"""
# Check inputs
if not isinstance(uvd, UVData):
raise TypeError("uvd must be a UVData object")
if not inplace:
uvd_new = copy.deepcopy(uvd)
# Loop over all spw ranges to be flagged
for spw in spw_ranges:
if not isinstance(spw, tuple):
raise TypeError("spw_ranges must be a list of tuples")
# Loop over pols
for pol in range(uvd.Npols):
unique_bls = np.unique(uvd.baseline_array)
# Loop over baselines
for bl in unique_bls:
bl_inds = np.where(np.in1d(uvd.baseline_array, bl))[0]
fully_flagged = np.ones(uvd.flag_array[bl_inds, 0,
spw[0]:spw[1],
pol].shape, dtype=bool)
if inplace:
uvd.flag_array[bl_inds, 0, spw[0]:spw[1], pol] = fully_flagged
else:
uvd_new.flag_array[bl_inds, 0, spw[0]:spw[1], pol] \
= fully_flagged
if inplace:
return uvd
else:
return uvd_new
def construct_factorizable_mask(uvd_list, spw_ranges, first='col',
greedy_threshold=0.3, n_threshold=1,
retain_flags=True, unflag=False, greedy=True,
inplace=False):
"""
Generates a factorizable mask using a 'greedy' flagging algorithm, run on a
list of UVData objects. In this context, factorizable means that the flag
array can be written as F(freq, time) = f(freq) * g(time), i.e. entire rows
or columns are flagged.
First, flags are added to the mask based on the minimum number of samples
available for each data point. Next, depending on the `first` argument,
either full columns or full rows that have flag fractions exceeding the
`greedy_threshold` are flagged. Finally, any rows or columns with remaining
flags are fully flagged. (Unflagging the entire array is also an option.)
Parameters
----------
uvd_list : list
list of UVData objects to operate on
spw_ranges : list
list of tuples of the form (min_channel, max_channel) defining which
spectral window (channel range) to flag. `min_channel` is inclusive,
but `max_channel` is exclusive.
first : str, optional
Either 'col' or 'row', defines which axis is flagged first based on
the `greedy_threshold`. Default: 'col'.
greedy_threshold : float, optional
The flag fraction beyond which a given row or column is flagged in the
first stage of greedy flagging. Default: 0.3.
n_threshold : float, optional
The minimum number of samples needed for a pixel to remain unflagged.
Default: 1.
retain_flags : bool, optional
If True, then data points that were originally flagged in the input
data remain flagged, even if they meet the `n_threshold`. Default: True.
unflag : bool, optional
If True, the entire mask is unflagged. No other operations (e.g. greedy
flagging) will be performed. Default: False.
greedy : bool, optional
If True, greedy flagging takes place. If False, only `n_threshold`
flagging is performed (so the resulting mask will not necessarily be
factorizable). Default: True.
inplace : bool, optional
Whether to return a new copy of the input UVData objects, or modify
them in-place. Default: False (return copies).
Returns
-------
uvdlist_new : list
if inplace=False, a new list of UVData objects with updated flags
"""
# Check validity of input args
if first not in ['col', 'row']:
raise ValueError("'first' must be either 'row' or 'col'.")
if not isinstance(uvd_list, list):
raise TypeError("uvd_list must be a list of UVData objects")
# Check validity of thresholds
allowed_types = (float, np.float, int, np.integer)
if not isinstance(greedy_threshold, allowed_types) \
or not isinstance(n_threshold, allowed_types):
raise TypeError("greedy_threshold and n_threshold must be float or int")
if greedy_threshold >= 1. or greedy_threshold <= 0.:
raise ValueError("greedy_threshold must be in interval [0, 1]")
# List of output objects
uvdlist_new = []
# Loop over datasets
for uvd in uvd_list:
if not isinstance(uvd, UVData):
raise TypeError("uvd_list must be a list of UVData objects")
if not inplace:
uvd_new = copy.deepcopy(uvd)
# Loop over defined spectral windows
for spw in spw_ranges:
if not isinstance(spw, tuple):
raise TypeError("spw_ranges must be a list of tuples")
# Unflag everything and return
if unflag:
if inplace:
uvd.flag_array[:, :, spw[0]:spw[1], :] = False
continue
else:
uvd_new.flag_array[:, :, spw[0]:spw[1], :] = False
uvdlist_new.append(uvd_new)
continue
# Greedy flagging algorithm
# Loop over polarizations
for n in range(uvd.Npols):
# iterate over unique baselines
ubl = np.unique(uvd.baseline_array)
for bl in ubl:
# Get baseline-times indices
bl_inds = np.where(np.in1d(uvd.baseline_array, bl))[0]
# create a new array of flags with only those indices
flags = uvd.flag_array[bl_inds, 0, :, n].copy()
nsamples = uvd.nsample_array[bl_inds, 0, :, n].copy()
Ntimes = int(flags.shape[0])
Nfreqs = int(flags.shape[1])
narrower_flags_window = flags[:, spw[0]:spw[1]]
narrower_nsamples_window = nsamples[:, spw[0]:spw[1]]
flags_output = np.zeros(narrower_flags_window.shape)
# If retaining flags, an extra condition is added to the
# threshold filter
if retain_flags:
flags_output[(narrower_nsamples_window >= n_threshold)
& (narrower_flags_window == False)] = False
flags_output[(narrower_nsamples_window < n_threshold)
| (narrower_flags_window == True)] = True
else:
flags_output[(narrower_nsamples_window >= n_threshold)] \
= False
flags_output[(narrower_nsamples_window < n_threshold)] \
= True
# Perform greedy flagging
if greedy:
if first == 'col':
# Flag all columns that exceed the greedy_threshold
col_indices = np.where(np.sum(flags_output, axis=0)
/ Ntimes > greedy_threshold)
flags_output[:, col_indices] = True
# Flag all remaining rows
remaining_rows = np.where(
np.sum(flags_output, axis=1) \
> len(list(col_indices[0])) )
flags_output[remaining_rows, :] = True
else:
# Flag all rows that exceed the greedy_threshold
row_indices = np.where(
np.sum(flags_output, axis=1)
/ (spw[1]-spw[0]) \
> greedy_threshold )
flags_output[row_indices, :] = True
# Flag all remaining columns
remaining_cols = np.where(
np.sum(flags_output, axis=0) \
> len(list(row_indices[0])) )
flags_output[:, remaining_cols] = True
# Update the UVData object's flag_array if inplace
if inplace:
dset.flag_array[bl_inds,0,spw[0]:spw[1],n] \
= flags_output
else:
uvd_new.flag_array[bl_inds,0,spw[0]:spw[1],n] \
= flags_output
if not inplace:
uvdlist_new.append(uvd_new)
# Return an updated list of UVData objects if not inplace
if | |
(is_singlefile and self.ext is MULTIFILE_EXT) or (not is_singlefile and self.ext is not MULTIFILE_EXT):
raise ValueError('Inconsistent object definition : is_singlefile and self.ext should be consistent')
def __str__(self) -> str:
return self.get_pretty_location()
def get_pretty_file_mode(self):
"""
Utility method to return a string representing the mode of this file, 'singlefile' or 'multifile'
:return:
"""
return 'singlefile' if self.is_singlefile else 'multifile'
def get_pretty_file_ext(self):
"""
Utility method to return a string representing the mode and extension of this file,
e.g 'singlefile, .txt' or 'multifile'
:return:
"""
return ('singlefile, ' + self.ext) if self.is_singlefile else 'multifile'
def get_pretty_location(self, blank_parent_part: bool = False, append_file_ext: bool = True,
compact_file_ext: bool = False):
"""
Utility method to return a string representing the location, mode and extension of this file.
:return:
"""
if append_file_ext:
if compact_file_ext:
suffix = self.ext if self.is_singlefile else ''
else:
suffix = ' (' + self.get_pretty_file_ext() + ')'
else:
suffix = ''
if blank_parent_part:
# TODO sep should be replaced with the appropriate separator in flat mode
idx = self.location.rfind(sep)
return (' ' * (idx-1-len(sep))) + '|--' + self.location[(idx+1):] + suffix
else:
return self.location + suffix
def get_pretty_child_location(self, child_name, blank_parent_part: bool = False):
"""
Utility method to return a string representation of the location of a child
:param child_name:
:param blank_parent_part:
:return:
"""
if blank_parent_part:
idx = len(self.location)
return (' ' * (idx-3)) + '|--' + child_name
else:
# TODO sep should be replaced with the appropriate separator in flat mode
return self.location + sep + child_name
@abstractmethod
def get_singlefile_path(self):
"""
Implementing classes should return the path of this file, in case of a singlefile. If multifile, they should
return an exception
:return:
"""
pass
@abstractmethod
def get_singlefile_encoding(self):
"""
Implementing classes should return the file encoding, in case of a singlefile. If multifile, they should
return an exception
:return:
"""
pass
@abstractmethod
def get_multifile_children(self) -> Dict[str, Any]: # actually, not Any but PersistedObject
"""
Implementing classes should return a dictionary of PersistedObjects, for each named child of this object.
:return:
"""
pass
class FolderAndFilesStructureError(Exception):
"""
Raised whenever the folder and files structure does not match with the one expected
"""
def __init__(self, contents):
super(FolderAndFilesStructureError, self).__init__(contents)
@staticmethod
def create_for_multifile_tuple(obj_on_fs: PersistedObject, expected_size: int, found_size: int):
return FolderAndFilesStructureError('Error trying to find a tuple of length ' + expected_size + ' at location '
+ str(obj_on_fs) + '. Nb of child files found is not correct, found '
+ found_size + ' files')
class FileMappingConfiguration(AbstractFileMappingConfiguration):
"""
Abstract class for all file mapping configurations. In addition to be an AbstractFileMappingConfiguration (meaning
that it can find objects at locations), it is able to create instances of PersistedObject, recursively.
"""
class RecursivePersistedObject(PersistedObject):
"""
Represents an object on the filesystem. It may be multifile or singlefile. When this object is created it
recursively scans all of its children if any, and builds the corresponding PersistedObjects. All of this is
logged on the provided logger if any.
"""
def __init__(self, location: str, file_mapping_conf: AbstractFileMappingConfiguration = None,
logger: Logger = None, log_only_last: bool = False):
"""
Creates a PersistedObject representing an object on the filesystem at location 'location'. It may be
multifile or singlefile. When this object is created it recursively scans all of its children if any, and
builds the corresponding PersistedObjects. All of this is logged on the provided logger if any.
:param location:
:param file_mapping_conf:
:param logger:
"""
# -- file mapping
check_var(file_mapping_conf, var_types=FileMappingConfiguration, var_name='file_mapping_conf')
self.file_mapping_conf = file_mapping_conf
# -- logger
check_var(logger, var_types=Logger, var_name='logger', enforce_not_none=False)
self.logger = logger
try:
# -- check single file or multifile thanks to the filemapping
is_singlefile, ext, self._contents_or_path = self.file_mapping_conf.get_unique_object_contents(location)
# -- store all information in the container(parent class)
super(FileMappingConfiguration.RecursivePersistedObject, self).__init__(location, is_singlefile, ext)
# -- log this for easy debug
if logger is not None:
logger.debug('(C) ' + self.get_pretty_location(
blank_parent_part=(log_only_last and not GLOBAL_CONFIG.full_paths_in_logs)))
# -- create and attach all the self.children if multifile
if not self.is_singlefile:
self.children = {name: FileMappingConfiguration.RecursivePersistedObject(loc,
file_mapping_conf=self.file_mapping_conf, logger=self.logger, log_only_last=True)
for name, loc in sorted(self._contents_or_path.items())}
except (ObjectNotFoundOnFileSystemError, ObjectPresentMultipleTimesOnFileSystemError,
IllegalContentNameError) as e:
# -- log the object that was being built, just for consistency of log messages
if logger is not None:
logger.debug(location)
raise e.with_traceback(e.__traceback__)
def get_singlefile_path(self):
"""
Implementation of the parent method
:return:
"""
if self.is_singlefile:
return self._contents_or_path
else:
raise NotImplementedError(
'get_file_path_no_ext does not make any sense on a multifile object. Use object.location'
' to get the file prefix')
def get_singlefile_encoding(self):
"""
Implementation of the parent method
:return:
"""
if self.is_singlefile:
return self.file_mapping_conf.encoding
else:
raise NotImplementedError('get_file_encoding does not make any sense on a multifile object. Check this '
'object\'s children to know their encoding')
def get_multifile_children(self) -> Dict[str, PersistedObject]:
"""
Implementation of the parent method
:return:
"""
if self.is_singlefile:
raise NotImplementedError(
'get_multifile_children does not mean anything on a singlefile object : a single file'
'object by definition has no children - check your code')
else:
return self.children
def __init__(self, encoding:str = None):
"""
Constructor, with the encoding registered to open the files.
:param encoding: the encoding used to open the files default is 'utf-8'
"""
super(FileMappingConfiguration, self).__init__(encoding)
def create_persisted_object(self, location: str, logger: Logger) -> PersistedObject:
"""
Creates a PersistedObject representing the object at location 'location', and recursively creates all of its
children
:param location:
:param logger:
:return:
"""
#print('Checking all files under ' + location)
logger.debug('Checking all files under [{loc}]'.format(loc=location))
obj = FileMappingConfiguration.RecursivePersistedObject(location=location, file_mapping_conf=self,
logger=logger)
#print('File checks done')
logger.debug('File checks done')
return obj
class WrappedFileMappingConfiguration(FileMappingConfiguration):
"""
A file mapping where multifile objects are represented by folders
"""
def __init__(self, encoding:str = None):
"""
Constructor, with the encoding registered to open the files.
:param encoding: the encoding used to open the files default is 'utf-8'
"""
super(WrappedFileMappingConfiguration, self).__init__(encoding=encoding)
def find_multifile_object_children(self, parent_location, no_errors: bool = False) -> Dict[str, str]:
"""
Implementation of the parent abstract method.
In this mode, root_path should be a valid folder, and each item is a subfolder (multifile) or a file
(singlefile):
location/
|-singlefile_sub_item1.<ext>
|-singlefile_sub_item2.<ext>
|-multifile_sub_item3/
|- ...
:param parent_location: the absolute file prefix of the parent item. it may be a folder (non-flat mode)
or a folder + a file name prefix (flat mode)
:param no_errors: a boolean used in internal recursive calls in order to catch errors. Should not be changed by
users.
:return: a dictionary of {item_name : item_prefix}
"""
# (1) Assert that folder_path is a folder
if not isdir(parent_location):
if no_errors:
return dict()
else:
raise ValueError('Cannot find a multifileobject at location \'' + parent_location + '\' : location is '
'not a valid folder')
else:
# (2) List folders (multifile objects or collections)
all_subfolders = [dir_ for dir_ in listdir(parent_location) if isdir(join(parent_location, dir_))]
items = {item_name: join(parent_location, item_name) for item_name in all_subfolders}
# (3) List singlefiles *without* their extension
items.update({
item_name: join(parent_location, item_name)
for item_name in [file_name[0:file_name.rindex(EXT_SEPARATOR)]
for file_name in listdir(parent_location)
if isfile(join(parent_location, file_name))
and EXT_SEPARATOR in file_name]
})
# (4) return all
return items
def is_multifile_object_without_children(self, location: str) -> bool:
"""
Returns True if an item with this location is present as a multifile object without children.
For this implementation, this means that there is a folder without any files in it
:param location:
:return:
"""
return isdir(location) and len(self.find_multifile_object_children(location)) == 0
def get_multifile_object_child_location(self, parent_item_prefix: str, child_name: str) -> str:
"""
Implementation of the parent abstract method.
In this mode the attribute is a file inside the parent object folder
:param parent_item_prefix: the absolute file prefix of the parent item.
:return: the file prefix for this attribute
"""
check_var(parent_item_prefix, var_types=str, var_name='parent_item_prefix')
check_var(child_name, var_types=str, var_name='item_name')
# assert that folder_path is a folder
if not isdir(parent_item_prefix):
raise ValueError(
'Cannot get attribute item in non-flat mode, parent item path is not a folder : ' + parent_item_prefix)
return join(parent_item_prefix, child_name)
def find_simpleobject_file_occurrences(self, location) -> Dict[str, str]:
"""
Implementation of the parent abstract method.
:param location:
:return: a dictionary of {ext : file_path}
"""
parent_dir = dirname(location)
if parent_dir is '':
parent_dir = '.'
base_prefix = basename(location)
possible_object_files = {object_file[len(base_prefix):]: join(parent_dir, | |
:py:class:`ndarray <numpy.ndarray>` of shape `(Q, Z)`
The columnized version of `X` (assumed to include padding)
X_shape : 4-tuple containing `(n_ex, in_rows, in_cols, in_ch)`
The original dimensions of `X` (not including padding)
W_shape: 4-tuple containing `(kernel_rows, kernel_cols, in_ch, out_ch)`
The dimensions of the weights in the present convolutional layer
pad : 4-tuple of `(left, right, up, down)`
Number of zero-padding rows/cols to add to `X`
stride : int
The stride of each convolution kernel
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
img : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
The reshaped `X_col` input matrix
"""
if not (isinstance(pad, tuple) and len(pad) == 4):
raise TypeError("pad must be a 4-tuple, but got: {}".format(pad))
s, d = stride, dilation
pr1, pr2, pc1, pc2 = pad
fr, fc, n_in, n_out = W_shape
n_ex, in_rows, in_cols, n_in = X_shape
X_pad = np.zeros((n_ex, n_in, in_rows + pr1 + pr2, in_cols + pc1 + pc2))
k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, pad, s, d)
X_col_reshaped = X_col.reshape(n_in * fr * fc, -1, n_ex)
X_col_reshaped = X_col_reshaped.transpose(2, 0, 1)
np.add.at(X_pad, (slice(None), k, i, j), X_col_reshaped)
pr2 = None if pr2 == 0 else -pr2
pc2 = None if pc2 == 0 else -pc2
return X_pad[:, :, pr1:pr2, pc1:pc2]
#######################################################################
# Convolution #
#######################################################################
def conv2D(X, W, stride, pad, dilation=0):
"""
A faster (but more memory intensive) implementation of the 2D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels in
`W`.
Notes
-----
Relies on the :func:`im2col` function to perform the convolution as a single
matrix multiplication.
For a helpful diagram, see <NAME>arden's 2015 blogpost [1].
References
----------
.. [1] Warden (2015). "Why GEMM is at the heart of deep learning,"
https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume (unpadded).
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer.
stride : int
The stride of each convolution kernel.
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding rows and colums to add *on both
sides* of the rows/columns in `X`. If 4-tuple, specifies the number of
rows/columns to add to the top, bottom, left, and right of the input
volume.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, out_ch)`
The covolution of `X` with `W`.
"""
s, d = stride, dilation
_, p = pad2D(X, pad, W.shape[:2], s, dilation=dilation)
pr1, pr2, pc1, pc2 = p
fr, fc, in_ch, out_ch = W.shape
n_ex, in_rows, in_cols, in_ch = X.shape
# update effective filter shape based on dilation factor
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
# compute the dimensions of the convolution output
out_rows = int((in_rows + pr1 + pr2 - _fr) / s + 1)
out_cols = int((in_cols + pc1 + pc2 - _fc) / s + 1)
# convert X and W into the appropriate 2D matrices and take their product
X_col, _ = im2col(X, W.shape, p, s, d)
W_col = W.transpose(3, 2, 0, 1).reshape(out_ch, -1)
Z = (W_col @ X_col).reshape(out_ch, out_rows, out_cols, n_ex).transpose(3, 1, 2, 0)
return Z
def conv1D(X, W, stride, pad, dilation=0):
"""
A faster (but more memory intensive) implementation of a 1D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels in
`W`.
Notes
-----
Relies on the :func:`im2col` function to perform the convolution as a single
matrix multiplication.
For a helpful diagram, see <NAME>'s 2015 blogpost [1].
References
----------
.. [1] Warden (2015). "Why GEMM is at the heart of deep learning,"
https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_in, in_ch)`
Input volume (unpadded)
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_width, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer
stride : int
The stride of each convolution kernel
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 1D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding colums to add *on both sides*
of the columns in X.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Z : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, l_out, out_ch)`
The convolution of X with W.
"""
_, p = pad1D(X, pad, W.shape[0], stride, dilation=dilation)
# add a row dimension to X to permit us to use im2col/col2im
X2D = np.expand_dims(X, axis=1)
W2D = np.expand_dims(W, axis=0)
p2D = (0, 0, p[0], p[1])
Z2D = conv2D(X2D, W2D, stride, p2D, dilation)
# drop the row dimension
return np.squeeze(Z2D, axis=1)
def deconv2D_naive(X, W, stride, pad, dilation=0):
"""
Perform a "deconvolution" (more accurately, a transposed convolution) of an
input volume `X` with a weight kernel `W`, incorporating stride, pad, and
dilation.
Notes
-----
Rather than using the transpose of the convolution matrix, this approach
uses a direct convolution with zero padding, which, while conceptually
straightforward, is computationally inefficient.
For further explanation, see [1].
References
----------
.. [1] Dumoulin & Visin (2016). "A guide to convolution arithmetic for deep
learning." https://arxiv.org/pdf/1603.07285v1.pdf
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, in_rows, in_cols, in_ch)`
Input volume (not padded)
W: :py:class:`ndarray <numpy.ndarray>` of shape `(kernel_rows, kernel_cols, in_ch, out_ch)`
A volume of convolution weights/kernels for a given layer
stride : int
The stride of each convolution kernel
pad : tuple, int, or 'same'
The padding amount. If 'same', add padding to ensure that the output of
a 2D convolution with a kernel of `kernel_shape` and stride `stride`
produces an output volume of the same dimensions as the input. If
2-tuple, specifies the number of padding rows and colums to add *on both
sides* of the rows/columns in `X`. If 4-tuple, specifies the number of
rows/columns to add to the top, bottom, left, and right of the input
volume.
dilation : int
Number of pixels inserted between kernel elements. Default is 0.
Returns
-------
Y : :py:class:`ndarray <numpy.ndarray>` of shape `(n_ex, out_rows, out_cols, n_out)`
The decovolution of (padded) input volume `X` with `W` using stride `s` and
dilation `d`.
"""
if stride > 1:
X = dilate(X, stride - 1)
stride = 1
# pad the input
X_pad, p = pad2D(X, pad, W.shape[:2], stride=stride, dilation=dilation)
n_ex, in_rows, in_cols, n_in = X_pad.shape
fr, fc, n_in, n_out = W.shape
s, d = stride, dilation
pr1, pr2, pc1, pc2 = p
# update effective filter shape based on dilation factor
_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d
# compute deconvolution output dims
out_rows = s * (in_rows - 1) - pr1 - pr2 + _fr
out_cols = s * (in_cols - 1) - pc1 - pc2 + _fc
out_dim = (out_rows, out_cols)
# add additional padding to achieve the target output dim
_p = calc_pad_dims_2D(X_pad.shape, out_dim, W.shape[:2], s, d)
X_pad, pad = pad2D(X_pad, _p, W.shape[:2], stride=s, dilation=dilation)
# perform the forward convolution using the flipped weight matrix (note
# we set pad to 0, since we've already added padding)
Z = conv2D(X_pad, np.rot90(W, 2), s, 0, d)
pr2 = None if pr2 == 0 else -pr2
pc2 = None if pc2 == 0 else -pc2
return Z[:, pr1:pr2, pc1:pc2, :]
def conv2D_naive(X, W, stride, pad, dilation=0):
"""
A slow but more straightforward implementation of a 2D "convolution"
(technically, cross-correlation) of input `X` with a collection of kernels | |
<reponame>Borda/pyBPDL
"""
tools for registering images to reconstructed image using Atlas
SEE:
* http://insightsoftwareconsortium.github.io/SimpleITK-Notebooks/
* https://bic-berkeley.github.io/psych-214-fall-2016/dipy_registration.html
Copyright (C) 2017-2020 <NAME> <<EMAIL>>
"""
import logging
import time
# import multiprocessing as mproc
from functools import partial
import numpy as np
# from scipy.ndimage import filters
from dipy.align import VerbosityLevels
from dipy.align.imwarp import DiffeomorphicMap, SymmetricDiffeomorphicRegistration
from dipy.align.metrics import SSDMetric
from imsegm.utilities.experiments import get_nb_workers, WrapExecuteSequence
from scipy import interpolate, ndimage
NB_WORKERS = get_nb_workers(0.8)
LIST_SDR_PARAMS = (
'metric',
'level_iters',
'step_length',
'ss_sigma_factor',
'opt_tol',
'inv_iter',
'inv_tol',
'callback',
)
DIPY_DEAMONS_PARAMS = dict(
step_length=0.1,
level_iters=[30, 50],
inv_iter=20,
ss_sigma_factor=0.1,
opt_tol=1.e-2,
)
def register_demons_sym_diffeom(
img_sense, img_ref, smooth_sigma=1., params=DIPY_DEAMONS_PARAMS, inverse=False, verbose=False
):
""" Register the image and reconstruction from atlas
on the end we smooth the final deformation by a gaussian filter
:param ndarray img_sense:
:param ndarray img_ref:
:param float smooth_sigma:
:param dict params:
:param bool verbose: whether show debug time measurements
:return tuple(ndarray,ndarray):
>>> np.random.seed(0)
>>> img_ref = np.zeros((10, 10), dtype=int)
>>> img_ref[2:6, 1:7] = 1
>>> img_ref[5:9, 4:10] = 1
>>> img_ref
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> from skimage.morphology import erosion, dilation
>>> img_ref_fuz = np.zeros((10, 10), dtype=float)
>>> img_ref_fuz[dilation(img_ref, np.ones((3, 3))) == 1] = 0.1
>>> img_ref_fuz[img_ref == 1] = 0.5
>>> img_ref_fuz[erosion(img_ref, np.ones((3, 3))) == 1] = 1.0
>>> img_ref_fuz
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0. , 0. ],
[ 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1, 0. , 0. ],
[ 0.1, 0.5, 1. , 1. , 1. , 1. , 0.5, 0.1, 0. , 0. ],
[ 0.1, 0.5, 1. , 1. , 1. , 1. , 0.5, 0.1, 0.1, 0.1],
[ 0.1, 0.5, 0.5, 0.5, 0.5, 1. , 0.5, 0.5, 0.5, 0.5],
[ 0.1, 0.1, 0.1, 0.1, 0.5, 1. , 1. , 1. , 1. , 1. ],
[ 0. , 0. , 0. , 0.1, 0.5, 1. , 1. , 1. , 1. , 1. ],
[ 0. , 0. , 0. , 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0. , 0. , 0. , 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]])
>>> d_deform = register_demons_sym_diffeom(img_ref_fuz, img_ref,
... smooth_sigma=1.5, inverse=True, verbose=True)
>>> img_warp = warp2d_transform_image(img_ref, d_deform, method='nearest',
... inverse=True)
>>> np.round(img_warp.astype(float), 1)
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> img_sense = np.zeros(img_ref.shape, dtype=int)
>>> img_sense[4:9, 3:10] = 1
>>> img_sense
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> d_deform = register_demons_sym_diffeom(img_sense, img_ref, smooth_sigma=0.)
>>> img_warp = warp2d_transform_image(img_sense, d_deform)
>>> np.round(img_warp.astype(float), 1) # doctest: +SKIP
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0.3, 0.5, 0.3, 0.1, 0. , 0. , 0. , 0. , 0. ],
[ 0. , 1. , 1. , 1. , 1. , 0.8, 0.4, 0.1, 0. , 0. ],
[ 0. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.5, 0. ],
[ 0. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ],
[ 0. , 0.2, 1. , 1. , 1. , 1. , 1. , 1. , 1. , 1. ],
[ 0. , 0. , 0.6, 0.9, 1. , 1. , 1. , 1. , 1. , 1. ],
[ 0. , 0. , 0.2, 0.4, 0.5, 0.8, 1. , 1. , 1. , 1. ],
[ 0. , 0. , 0. , 0.2, 0.2, 0.3, 0.4, 0.6, 0.7, 1. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> np.round(img_warp - img_sense, 1) # doctest: +SKIP
"""
if img_ref.max() == 0 or img_sense.max() == 0:
logging.debug(
'skip image registration (demons): max values for RECONST=%d and SENSE=%d', img_ref.max(), img_sense.max()
)
return {'mapping': None, 'mapping-inv': None, 'package': 'dipy'}
sdr_params = {k: params[k] for k in params if k in LIST_SDR_PARAMS}
sdr = SmoothSymmetricDiffeomorphicRegistration(
metric=SSDMetric(img_ref.ndim), smooth_sigma=smooth_sigma, **sdr_params
)
sdr.verbosity = VerbosityLevels.NONE
t = time.time()
mapping = sdr.optimize(img_ref.astype(float), img_sense.astype(float))
if verbose:
logging.debug('demons took: %d s', time.time() - t)
mapping.forward = smooth_deform_field(mapping.forward, sigma=smooth_sigma)
mapping.backward = smooth_deform_field(mapping.backward, sigma=smooth_sigma)
# img_warped = mapping.transform(img_moving, 'linear')
# mapping_inv = sdr.moving_to_ref
if inverse:
mapping_inv = DiffeomorphicMap(
img_ref.ndim, img_ref.shape, None, img_ref.shape, None, img_ref.shape, None, None
)
mapping_inv.forward = smooth_deform_field(sdr.moving_to_ref.forward, sigma=smooth_sigma)
mapping_inv.backward = smooth_deform_field(sdr.moving_to_ref.backward, sigma=smooth_sigma)
else:
mapping_inv = None
if verbose:
logging.debug('smoothing and warping took: %d s', time.time() - t)
dict_deform = {'mapping': mapping, 'mapping-inv': mapping_inv, 'package': 'dipy'}
return dict_deform
def smooth_deform_field(field, sigma):
"""smooth deformation field
:param field:
:param sigma:
:return:
>>> np.random.seed(0)
>>> field = np.random.random((10, 5, 1))
>>> np.std(field) # doctest: +ELLIPSIS
0.27...
>>> field_smooth = smooth_deform_field(field, 0.5)
>>> np.std(field_smooth) # doctest: +ELLIPSIS
0.17...
"""
if sigma <= 0:
return np.array(field)
field_smooth = np.empty(field.shape, dtype=field.dtype)
# TODO: use different smoothing which would be fast also for large regul.
for i in range(field.shape[-1]):
field_smooth[..., i] = ndimage.gaussian_filter(field[..., i], sigma=sigma, order=0, mode='constant')
return field_smooth
def warp2d_transform_image(img, dict_deform, method='linear', inverse=False):
img_warped = img.copy()
if dict_deform['package'] == 'dipy':
use_mapping = 'mapping-inv' if inverse else 'mapping'
if dict_deform[use_mapping] is None:
logging.debug('missing (%s) transformation', use_mapping)
return img_warped
if inverse:
img_warped = dict_deform['mapping-inv'].transform_inverse(img, method)
else:
img_warped = dict_deform['mapping'].transform(img, method)
else:
logging.error('missing warp interpreter')
return img_warped
def warp2d_apply_deform_field(img, deform, method='linear'):
""" warping reconstructed image using atlas and weight
to the expected image image domain
:param ndarray img:
:param ndarray deform:
:return ndarray:
>>> img1 = np.zeros((8, 12), dtype=int)
>>> img1[2:6, 3:9] = 1
>>> deform = np.ones(img1.shape + (2,))
>>> deform[:, :, 1] *= -2
>>> warp2d_apply_deform_field(img1, deform)
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[ 0., 0., 0., | |
= node._moments.get_converter(moments_class)
except AttributeError:
from .constant import Constant
return Constant(
moments_class.from_values(node, **kwargs),
node
)
else:
node = converter(node)
converter = node._moments.get_instance_converter(**kwargs)
if converter is not None:
from .converters import NodeConverter
return NodeConverter(converter, node)
return node
def _compute_plates_to_parent(self, index, plates):
# Sub-classes may want to overwrite this if they manipulate plates
return plates
def _compute_plates_from_parent(self, index, plates):
# Sub-classes may want to overwrite this if they manipulate plates
return plates
def _compute_plates_multiplier_from_parent(self, index, plates_multiplier):
# TODO/FIXME: How to handle this properly?
return plates_multiplier
def _plates_to_parent(self, index):
return self._compute_plates_to_parent(index, self.plates)
def _plates_from_parent(self, index):
return self._compute_plates_from_parent(index,
self.parents[index].plates)
def _plates_multiplier_from_parent(self, index):
return self._compute_plates_multiplier_from_parent(
index,
self.parents[index].plates_multiplier
)
@property
def plates_multiplier(self):
""" Plate multiplier is applied to messages to parents """
return self.__plates_multiplier
@plates_multiplier.setter
def plates_multiplier(self, value):
# TODO/FIXME: Check that multiplier is consistent with plates
self.__plates_multiplier = value
return
def get_shape(self, ind):
return self.plates + self.dims[ind]
def _add_child(self, child, index):
"""
Add a child node.
Parameters
----------
child : node
index : int
The parent index of this node for the child node.
The child node recognizes its parents by their index
number.
"""
self.children.add((child, index))
def _remove_child(self, child, index):
"""
Remove a child node.
"""
self.children.remove((child, index))
def get_mask(self):
return self.mask
## def _get_message_mask(self):
## return self.mask
def _set_mask(self, mask):
# Sub-classes may overwrite this method if they have some other masks to
# be combined (for instance, observation mask)
self.mask = mask
def _update_mask(self):
# Combine masks from children
mask = np.array(False)
for (child, index) in self.children:
mask = np.logical_or(mask, child._mask_to_parent(index))
# Set the mask of this node
self._set_mask(mask)
if not misc.is_shape_subset(np.shape(self.mask), self.plates):
raise ValueError("The mask of the node %s has updated "
"incorrectly. The plates in the mask %s are not a "
"subset of the plates of the node %s."
% (self.name,
np.shape(self.mask),
self.plates))
# Tell parents to update their masks
for parent in self.parents:
parent._update_mask()
def _compute_weights_to_parent(self, index, weights):
"""Compute the mask used for messages sent to parent[index].
The mask tells which plates in the messages are active. This method is
used for obtaining the mask which is used to set plates in the messages
to parent to zero.
Sub-classes may want to overwrite this method if they do something to
plates so that the mask is somehow altered.
"""
return weights
def _mask_to_parent(self, index):
"""
Get the mask with respect to parent[index].
The mask tells which plate connections are active. The mask is "summed"
(logical or) and reshaped into the plate shape of the parent. Thus, it
can't be used for masking messages, because some plates have been summed
already. This method is used for propagating the mask to parents.
"""
mask = self._compute_weights_to_parent(index, self.mask) != 0
# Check the shape of the mask
plates_to_parent = self._plates_to_parent(index)
if not misc.is_shape_subset(np.shape(mask), plates_to_parent):
raise ValueError("In node %s, the mask being sent to "
"parent[%d] (%s) has invalid shape: The shape of "
"the mask %s is not a sub-shape of the plates of "
"the node with respect to the parent %s. It could "
"be that this node (%s) is manipulating plates "
"but has not overwritten the method "
"_compute_weights_to_parent."
% (self.name,
index,
self.parents[index].name,
np.shape(mask),
plates_to_parent,
self.__class__.__name__))
# "Sum" (i.e., logical or) over the plates that have unit length in
# the parent node.
parent_plates = self.parents[index].plates
s = misc.axes_to_collapse(np.shape(mask), parent_plates)
mask = np.any(mask, axis=s, keepdims=True)
mask = misc.squeeze_to_dim(mask, len(parent_plates))
return mask
def _message_to_child(self):
u = self.get_moments()
# Debug: Check that the message has appropriate shape
for (ui, dim) in zip(u, self.dims):
ndim = len(dim)
if ndim > 0:
if np.shape(ui)[-ndim:] != dim:
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The variable axes of the moments %s are not equal to "
"the axes %s defined by the node %s. A possible reason "
"is that the plates of the node are inferred "
"incorrectly from the parents, and the method "
"_plates_from_parents should be implemented."
% (self.__class__.__name__,
np.shape(ui)[-ndim:],
dim,
self.name))
if not misc.is_shape_subset(np.shape(ui)[:-ndim],
self.plates):
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The plate axes of the moments %s are not a subset of "
"the plate axes %s defined by the node %s."
% (self.__class__.__name__,
np.shape(ui)[:-ndim],
self.plates,
self.name))
else:
if not misc.is_shape_subset(np.shape(ui), self.plates):
raise RuntimeError(
"A bug found by _message_to_child for %s: "
"The plate axes of the moments %s are not a subset of "
"the plate axes %s defined by the node %s."
% (self.__class__.__name__,
np.shape(ui),
self.plates,
self.name))
return u
def _message_to_parent(self, index, u_parent=None):
# Compute the message, check plates, apply mask and sum over some plates
if index >= len(self.parents):
raise ValueError("Parent index larger than the number of parents")
# Compute the message and mask
(m, mask) = self._get_message_and_mask_to_parent(index, u_parent=u_parent)
mask = misc.squeeze(mask)
# Plates in the mask
plates_mask = np.shape(mask)
# The parent we're sending the message to
parent = self.parents[index]
# Plates with respect to the parent
plates_self = self._plates_to_parent(index)
# Plate multiplier of the parent
multiplier_parent = self._plates_multiplier_from_parent(index)
# Check if m is a logpdf function (for black-box variational inference)
if callable(m):
return m
def m_function(*args):
lpdf = m(*args)
# Log pdf only contains plate axes!
plates_m = np.shape(lpdf)
r = (self.broadcasting_multiplier(plates_self,
plates_m,
plates_mask,
parent.plates) *
self.broadcasting_multiplier(self.plates_multiplier,
multiplier_parent))
axes_msg = misc.axes_to_collapse(plates_m, parent.plates)
m[i] = misc.sum_multiply(mask_i, m[i], r,
axis=axes_msg,
keepdims=True)
# Remove leading singular plates if the parent does not have
# those plate axes.
m[i] = misc.squeeze_to_dim(m[i], len(shape_parent))
return m_function
raise NotImplementedError()
# Compact the message to a proper shape
for i in range(len(m)):
# Empty messages are given as None. We can ignore those.
if m[i] is not None:
try:
r = self.broadcasting_multiplier(self.plates_multiplier,
multiplier_parent)
except:
raise ValueError("The plate multipliers are incompatible. "
"This node (%s) has %s and parent[%d] "
"(%s) has %s"
% (self.name,
self.plates_multiplier,
index,
parent.name,
multiplier_parent))
ndim = len(parent.dims[i])
# Source and target shapes
if ndim > 0:
dims = misc.broadcasted_shape(np.shape(m[i])[-ndim:],
parent.dims[i])
from_shape = plates_self + dims
else:
from_shape = plates_self
to_shape = parent.get_shape(i)
# Add variable axes to the mask
mask_i = misc.add_trailing_axes(mask, ndim)
# Apply mask and sum plate axes as necessary (and apply plate
# multiplier)
m[i] = r * misc.sum_multiply_to_plates(np.where(mask_i, m[i], 0),
to_plates=to_shape,
from_plates=from_shape,
ndim=0)
return m
def _message_from_children(self, u_self=None):
msg = [np.zeros(shape) for shape in self.dims]
#msg = [np.array(0.0) for i in range(len(self.dims))]
isfunction = None
for (child,index) in self.children:
m = child._message_to_parent(index, u_parent=u_self)
if callable(m):
if isfunction is False:
raise NotImplementedError()
elif isfunction is None:
msg = m
else:
def join(m1, m2):
return (m1[0] + m2[0], m1[1] + m2[1])
msg = lambda x: join(m(x), msg(x))
isfunction = True
else:
if isfunction is True:
raise NotImplementedError()
else:
isfunction = False
for i in range(len(self.dims)):
if m[i] is not None:
# Check broadcasting shapes
sh = misc.broadcasted_shape(self.get_shape(i), np.shape(m[i]))
try:
# Try exploiting broadcasting rules
msg[i] += m[i]
except ValueError:
msg[i] = msg[i] + m[i]
return msg
def _message_from_parents(self, exclude=None):
return [list(parent._message_to_child())
if ind != exclude else
None
for (ind,parent) in enumerate(self.parents)]
def get_moments(self):
raise NotImplementedError()
def delete(self):
"""
Delete this node and the children
"""
for (ind, parent) in enumerate(self.parents):
parent._remove_child(self, ind)
for (child, _) in self.children:
child.delete()
@staticmethod
def broadcasting_multiplier(plates, *args):
return misc.broadcasting_multiplier(plates, *args)
## """
## Compute the plate multiplier for given shapes.
## The first shape is compared to all other shapes (using NumPy
## broadcasting rules). All the elements which are non-unit in the first
## shape but 1 in all other shapes are multiplied together.
## This method is used, for instance, for computing a correction factor for
## messages to parents: If this node has non-unit plates that are unit
## plates in the parent, those plates are summed. However, if the message
## has unit axis for that plate, it should be first broadcasted to the
## plates of this node and then summed to the plates of the parent. In
| |
)
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_SEND_REFERRAL_TO.format(referral.id, self.lodgement_number, '{}({})'.format(user.get_full_name(), user.email)), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_SEND_REFERRAL_TO.format(referral.id, self.lodgement_number, '{}({})'.format(user.get_full_name(), user.email)), request)
# send email
send_referral_email_notification(referral,request)
else:
raise exceptions.ProposalReferralCannotBeSent()
except:
raise
def assign_officer(self,request,officer):
with transaction.atomic():
try:
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if not self.can_assess(officer):
raise ValidationError('The selected person is not authorised to be assigned to this proposal')
if self.processing_status == 'with_approver':
if officer != self.assigned_approver:
self.assigned_approver = officer
self.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_ASSIGN_TO_APPROVER.format(self.lodgement_number, '{}({})'.format(officer.get_full_name(),officer.email)), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_ASSIGN_TO_APPROVER.format(self.lodgement_number, '{}({})'.format(officer.get_full_name(), officer.email)), request)
else:
if officer != self.assigned_officer:
self.assigned_officer = officer
self.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_ASSIGN_TO_ASSESSOR.format(self.lodgement_number, '{}({})'.format(officer.get_full_name(), officer.email)), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_ASSIGN_TO_ASSESSOR.format(self.lodgement_number, '{}({})'.format(officer.get_full_name(), officer.email)), request)
except:
raise
def assing_approval_level_document(self, request):
with transaction.atomic():
try:
approval_level_document = request.data['approval_level_document']
if approval_level_document != 'null':
try:
document = self.documents.get(input_name=str(approval_level_document))
except ProposalDocument.DoesNotExist:
document = self.documents.get_or_create(input_name=str(approval_level_document), name=str(approval_level_document))[0]
document.name = str(approval_level_document)
# commenting out below tow lines - we want to retain all past attachments - reversion can use them
#if document._file and os.path.isfile(document._file.path):
# os.remove(document._file.path)
document._file = approval_level_document
document.save()
d=ProposalDocument.objects.get(id=document.id)
self.approval_level_document = d
comment = 'Approval Level Document Added: {}'.format(document.name)
else:
self.approval_level_document = None
comment = 'Approval Level Document Deleted: {}'.format(request.data['approval_level_document_name'])
#self.save()
self.save(version_comment=comment) # to allow revision to be added to reversion history
self.log_user_action(ProposalUserAction.ACTION_APPROVAL_LEVEL_DOCUMENT.format(self.lodgement_number), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_APPROVAL_LEVEL_DOCUMENT.format(self.lodgement_number), request)
return self
except:
raise
def save_approval_level_comment(self, request):
with transaction.atomic():
try:
approval_level_comment = request.data['approval_level_comment']
self.approval_level_comment=approval_level_comment
self.save()
self.log_user_action(ProposalUserAction.ACTION_APPROVAL_LEVEL_COMMENT.format(self.lodgement_number), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_APPROVAL_LEVEL_COMMENT.format(self.lodgement_number), request)
return self
except:
raise
def unassign(self,request):
with transaction.atomic():
try:
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if self.processing_status == 'with_approver':
if self.assigned_approver:
self.assigned_approver = None
self.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_UNASSIGN_APPROVER.format(self.lodgement_number), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_UNASSIGN_APPROVER.format(self.lodgement_number), request)
else:
if self.assigned_officer:
self.assigned_officer = None
self.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_UNASSIGN_ASSESSOR.format(self.lodgement_number), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_UNASSIGN_ASSESSOR.format(self.lodgement_number), request)
except:
raise
def move_to_status(self,request,status, approver_comment):
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if status in ['with_assessor','with_assessor_requirements','with_approver']:
if self.processing_status == 'with_referral' or self.can_user_edit:
raise ValidationError('You cannot change the current status at this time')
if self.processing_status != status:
if self.processing_status =='with_approver':
if approver_comment:
self.approver_comment = approver_comment
self.save()
send_proposal_approver_sendback_email_notification(request, self)
self.processing_status = status
self.save()
# Create a log entry for the proposal
if self.processing_status == self.PROCESSING_STATUS_WITH_ASSESSOR:
self.log_user_action(ProposalUserAction.ACTION_BACK_TO_PROCESSING.format(self.lodgement_number), request)
elif self.processing_status == self.PROCESSING_STATUS_WITH_ASSESSOR_REQUIREMENTS:
self.log_user_action(ProposalUserAction.ACTION_ENTER_REQUIREMENTS.format(self.lodgement_number), request)
else:
raise ValidationError('The provided status cannot be found.')
def reissue_approval(self,request,status):
with transaction.atomic():
if not self.processing_status=='approved' :
raise ValidationError('You cannot change the current status at this time')
elif self.application_type.name == 'Site Transfer' and self.__approver_group() in request.user.apiaryapprovergroup_set.all():
# track changes to apiary sites and proposal requirements in save() methods instead
self.processing_status = status
#self.self_clone = copy.deepcopy(self)
#self.self_clone.id = None
#self.self_clone.save()
self.save()
#self.proposal_apiary.self_clone = copy.deepcopy(self.proposal_apiary)
#self.proposal_apiary.self_clone.id = None
#self.proposal_apiary.self_clone.save()
self.proposal_apiary.reissue_originating_approval = False
self.proposal_apiary.reissue_target_approval = False
self.proposal_apiary.save()
self.proposal_apiary.originating_approval.reissued = True
self.proposal_apiary.originating_approval.save()
self.proposal_apiary.target_approval.reissued = True
self.proposal_apiary.target_approval.save()
elif self.approval and self.approval.can_reissue:
# Apiary logic in first condition
if self.apiary_group_application_type and self.__approver_group() in request.user.apiaryapprovergroup_set.all():
self.processing_status = status
self.save()
self.approval.reissued=True
self.approval.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_REISSUE_APPROVAL.format(self.lodgement_number), request)
elif self.__approver_group() in request.user.proposalapprovergroup_set.all():
self.processing_status = status
self.save()
self.approval.reissued=True
self.approval.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_REISSUE_APPROVAL.format(self.lodgement_number), request)
else:
raise ValidationError('Cannot reissue Approval')
else:
raise ValidationError('Cannot reissue Approval')
def proposed_decline(self,request,details):
with transaction.atomic():
try:
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if self.processing_status != 'with_assessor':
raise ValidationError('You cannot propose to decline if it is not with assessor')
reason = details.get('reason')
ProposalDeclinedDetails.objects.update_or_create(
proposal = self,
defaults={'officer': request.user, 'reason': reason, 'cc_email': details.get('cc_email',None)}
)
self.proposed_decline_status = True
approver_comment = ''
self.move_to_status(request,'with_approver', approver_comment)
# Log proposal action
self.log_user_action(ProposalUserAction.ACTION_PROPOSED_DECLINE.format(self.lodgement_number), request)
# Log entry for organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_PROPOSED_DECLINE.format(self.lodgement_number), request)
send_approver_decline_email_notification(reason, request, self)
except:
raise
def final_decline(self,request,details):
with transaction.atomic():
try:
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if self.processing_status != 'with_approver':
raise ValidationError('You cannot decline if it is not with approver')
proposal_decline, success = ProposalDeclinedDetails.objects.update_or_create(
proposal = self,
defaults={'officer':request.user,'reason':details.get('reason'),'cc_email':details.get('cc_email',None)}
)
self.proposed_decline_status = True
self.processing_status = 'declined'
self.customer_status = 'declined'
self.save()
if self.proposal_apiary:
# Update apiary site status
self.proposal_apiary.final_decline()
# Log proposal action
self.log_user_action(ProposalUserAction.ACTION_DECLINE.format(self.lodgement_number), request)
# Log entry for organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_DECLINE.format(self.lodgement_number), request)
send_proposal_decline_email_notification(self,request, proposal_decline)
except:
raise
def preview_approval(self,request,details):
from disturbance.components.approvals.models import PreviewTempApproval
from disturbance.components.approvals.models import Approval
with transaction.atomic():
try:
if self.processing_status != 'with_approver':
raise ValidationError('Licence preview only available when processing status is with_approver. Current status {}'.format(self.processing_status))
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
#if not self.applicant.organisation.postal_address:
if not self.relevant_applicant_address:
raise ValidationError('The applicant needs to have set their postal address before approving this proposal.')
lodgement_number = self.previous_application.approval.lodgement_number if self.proposal_type in ['renewal', 'amendment'] else '' # renewals/amendments keep same licence number
# Apiary Site Transfer logic
form_data_str = request.POST.get('formData')
form_data = json.loads(form_data_str)
#if isinstance(form_data, list):
originating_approval_id = form_data.get('originating_approval_id')
target_approval_id = form_data.get('target_approval_id')
licence_buffer = None
if originating_approval_id:
preview_approval = Approval.objects.get(id=originating_approval_id)
licence_buffer = preview_approval.generate_apiary_site_transfer_doc(
request.user,
site_transfer_proposal=self,
preview=True
)
elif target_approval_id:
preview_approval = Approval.objects.get(id=target_approval_id)
licence_buffer = preview_approval.generate_apiary_site_transfer_doc(
request.user,
site_transfer_proposal=self,
preview=True
)
# All other logic
else:
preview_approval = PreviewTempApproval.objects.create(
current_proposal = self,
issue_date = timezone.now(),
expiry_date = datetime.datetime.strptime(details.get('due_date'), '%d/%m/%Y').date(),
start_date = datetime.datetime.strptime(details.get('start_date'), '%d/%m/%Y').date(),
#submitter = self.submitter,
#org_applicant = self.applicant if isinstance(self.applicant, Organisation) else None,
#proxy_applicant = self.applicant if isinstance(self.applicant, EmailUser) else None,
applicant = self.applicant,
proxy_applicant = self.proxy_applicant,
lodgement_number = lodgement_number,
apiary_approval = self.apiary_group_application_type,
)
# Generate the preview document - get the value of the BytesIO buffer
licence_buffer = preview_approval.generate_doc(request.user, preview=True)
# clean temp preview licence object
transaction.set_rollback(True)
return licence_buffer
except:
raise
def proposed_approval(self,request,details):
with transaction.atomic():
#import ipdb; ipdb.set_trace()
try:
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if self.processing_status != 'with_assessor_requirements':
raise ValidationError('You cannot propose for approval if it is not with assessor for requirements')
# Do not accept new start and expiry dates for Apiary group applications with a licence, unless the licence has been reissued
start_date = details.get('start_date').strftime('%d/%m/%Y') if details.get('start_date') else None
expiry_date = details.get('expiry_date').strftime('%d/%m/%Y') if details.get('expiry_date') else None
#if self.apiary_group_application_type:
if self.application_type.name == 'Apiary':
if self.approval and (self.approval.reissued or self.proposal_type == 'renewal'):
self.proposed_issuance_approval = {
'start_date' : start_date,
'expiry_date' : expiry_date,
'details' : details.get('details'),
'cc_email' : details.get('cc_email'),
}
elif self.proposed_issuance_approval:
self.proposed_issuance_approval = {
'start_date' : self.proposed_issuance_approval.get('start_date') if self.proposed_issuance_approval.get('start_date') else details.get('start_date').strftime('%d/%m/%Y'),
'expiry_date' : self.proposed_issuance_approval.get('expiry_date') if self.proposed_issuance_approval.get('expiry_date') else details.get('expiry_date').strftime('%d/%m/%Y'),
'details' : details.get('details'),
'cc_email' : details.get('cc_email'),
}
else:
self.proposed_issuance_approval = {
'start_date' : start_date,
'expiry_date' : expiry_date,
'details' : details.get('details'),
'cc_email' : details.get('cc_email'),
}
# non-apiary Proposals & Apiary Site Transfers
else:
self.proposed_issuance_approval = {
'start_date' : start_date,
'expiry_date' : expiry_date,
'details' : details.get('details'),
'cc_email' : details.get('cc_email'),
}
self.proposed_decline_status = False
approver_comment = ''
self.move_to_status(request,'with_approver', approver_comment)
self.assigned_officer = None
apiary_sites = request.data.get('apiary_sites', None)
apiary_sites_list = []
if apiary_sites:
# When new apiary proposal
if self.application_type.name == ApplicationType.APIARY:
for apiary_site in apiary_sites:
my_site = ApiarySite.objects.get(id=apiary_site['id'])
# my_site.workflow_selected_status = apiary_site['checked']
self.proposal_apiary.set_workflow_selected_status(my_site, apiary_site.get('checked'))
if apiary_site.get('checked'):
apiary_sites_list.append(apiary_site.get('id'))
# my_site.save()
if apiary_site.get('checked') and 'coordinates_moved' in apiary_site:
relation = self.proposal_apiary.get_relation(my_site)
prev_coordinates = relation.wkb_geometry_processed.get_coords()
# Update coordinate (Assessor and Approver can move the proposed site location)
geom_str = GEOSGeometry('POINT(' + str(apiary_site['coordinates_moved']['lng']) + ' ' + str(apiary_site['coordinates_moved']['lat']) + ')', srid=4326)
# from disturbance.components.proposals.serializers_apiary import ApiarySiteSavePointPendingSerializer
# serializer = ApiarySiteSavePointPendingSerializer(my_site, data={'wkb_geometry_pending': geom_str}, context={'validate_distance': True})
from disturbance.components.proposals.serializers_apiary import ApiarySiteOnProposalProcessedGeometrySaveSerializer
serializer = ApiarySiteOnProposalProcessedGeometrySaveSerializer(relation, data={'wkb_geometry_processed': geom_str})
serializer.is_valid(raise_exception=True)
serializer.save()
# Log it
self.log_user_action(ProposalUserAction.APIARY_SITE_MOVED.format(apiary_site['id'], prev_coordinates, (apiary_site['coordinates_moved']['lng'], apiary_site['coordinates_moved']['lat'])), request)
# Site transfer
elif self.application_type.name == ApplicationType.SITE_TRANSFER:
for apiary_site in apiary_sites:
transfer_site = SiteTransferApiarySite.objects.get(
proposal_apiary=self.proposal_apiary,
apiary_site_on_approval__apiary_site__id=apiary_site.get('id')
)
transfer_site.internal_selected = apiary_site.get('checked') if transfer_site.customer_selected else False
if apiary_site.get('checked'):
apiary_sites_list.append(apiary_site.get('id'))
transfer_site.save()
self.save()
# Log proposal action
if self.apiary_group_application_type:
if self.application_type and self.application_type.name == ApplicationType.SITE_TRANSFER:
target_approval_lodgement_number = (self.proposal_apiary.target_approval.lodgement_number if
self.proposal_apiary.target_approval else '')
self.log_user_action(ProposalUserAction.ACTION_PROPOSED_APIARY_APPROVAL_SITE_TRANSFER.format(
self.lodgement_number,
self.proposal_apiary.originating_approval.lodgement_number,
target_approval_lodgement_number,
str(apiary_sites_list).lstrip('[').rstrip(']')
), request)
else:
self.log_user_action(ProposalUserAction.ACTION_PROPOSED_APIARY_APPROVAL.format(
self.lodgement_number,
self.proposed_issuance_approval.get('start_date'),
self.proposed_issuance_approval.get('expiry_date'),
str(apiary_sites_list).lstrip('[').rstrip(']')
), request)
else:
self.log_user_action(ProposalUserAction.ACTION_PROPOSED_APPROVAL.format(self.lodgement_number), request)
# Log entry for organisation
if self.applicant:
if self.apiary_group_application_type:
self.applicant.log_user_action(ProposalUserAction.ACTION_PROPOSED_APIARY_APPROVAL.format(
self.lodgement_number,
self.proposed_issuance_approval.get('start_date'),
self.proposed_issuance_approval.get('expiry_date'),
str(apiary_sites_list).lstrip('[').rstrip(']')
#', '.join(apiary_sites_list)
),request)
else:
self.applicant.log_user_action(ProposalUserAction.ACTION_PROPOSED_APPROVAL.format(self.lodgement_number), request)
send_approver_approve_email_notification(request, self)
except:
raise
def final_approval_temp_use(self, request):
with transaction.atomic():
try:
if not self.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
if self.processing_status != | |
<reponame>likun-stat/scalemixture_spline<filename>scratch.py
import os
os.chdir("/Users/LikunZhang/Desktop/PyCode/")
import scalemixture_py.integrate as utils
import scalemixture_py.priors as priors
import scalemixture_py.generic_samplers as sampler
import numpy as np
import cProfile
import matplotlib.pyplot as plt
from scipy.stats import uniform
from scipy.stats import norm
# ------------ 1. Simulation settings -------------
range = 1 # Matern range
nu = 3/2 # Matern smoothness
n_s = 100 # Number of sites
n_t = 64 # Number of time points
tau_sqd = 10 # Nugget SD
delta = 0.55 # For R
prob_below=0.9
prob_above=0.999
# -------------- 2. Generate covariance matrix -----------------
# Calculate distance between rows of 'Y', and return as distance matrix
np.random.seed(seed=1234)
from scipy.spatial import distance
Stations = np.c_[uniform.rvs(0,5,n_s),uniform.rvs(0,5,n_s)]
# plt.scatter(Stations[:,0],Stations[:,1])
S = distance.squareform(distance.pdist(Stations))
Cor = utils.corr_fn(S, np.array([range,nu]))
eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
V = eig_Cor[1]
d = eig_Cor[0]
R = utils.rPareto(n_t,1,1)
X = np.empty((n_s,n_t))
X[:] = np.nan
X_s = np.empty((n_s,n_t))
X_s[:] = np.nan
Z = np.empty((n_s,n_t))
Z[:] = np.nan
for idx, r in enumerate(R):
Z_t = utils.eig2inv_times_vector(V, np.sqrt(d), norm.rvs(size=n_s))
Z_to_W_s = 1/(1-norm.cdf(Z_t))
tmp = (r**(delta/(1-delta)))*Z_to_W_s
X_s[: ,idx] = tmp
X[:,idx] = tmp + np.sqrt(tau_sqd)*norm.rvs(size=n_s)
Z[:,idx] = Z_t
# ------------ 3. Marginal transformation -----------------
Lon_lat = Stations
Design_mat = np.c_[np.repeat(1,n_s), Lon_lat[:,1]]
n_covariates = Design_mat.shape[1]
beta_loc0 = np.array([0.2,-1])
loc0 = Design_mat @beta_loc0
beta_loc1 = np.array([0.1, -0.1])
loc1 = Design_mat @beta_loc1
Time = np.arange(n_t)
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
beta_scale = np.array([0.1,1])
scale = Design_mat @beta_scale
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
beta_shape = np.array([-0.02,0.2])
shape = Design_mat @beta_shape
Shape = np.tile(shape, n_t)
Shape = Shape.reshape((n_s,n_t),order='F')
Y = utils.scalemix_me_2_gev(X, delta, tau_sqd, Loc, Scale, Shape)
unifs = utils.pgev(Y, Loc, Scale, Shape)
cen = unifs < prob_below
cen_above = unifs > prob_above
thresh_X = utils.qmixture_me_interp(prob_below, delta = delta, tau_sqd = tau_sqd)
thresh_X_above = utils.qmixture_me_interp(prob_above, delta = delta, tau_sqd = tau_sqd)
# ------------ 4. Save initial values -----------------
initial_values = {'delta':delta,
'tau_sqd':tau_sqd,
'prob_below':prob_below,
'prob_above':prob_above,
'Dist':S,
'theta_c':np.array([range,nu]),
'X':X,
'Z':Z,
'R':R,
'Design_mat':Design_mat,
'beta_loc0':beta_loc0,
'beta_loc1':beta_loc1,
'Time':Time,
'beta_scale':beta_scale,
'beta_shape':beta_shape,
}
n_updates = 1001
sigma_m = {'delta':2.4**2,
'tau_sqd':2.4**2,
'theta_c':2.4**2/2,
'Z_onetime':np.repeat(np.sqrt(tau_sqd),n_s),
'R_1t':2.4**2,
'beta_loc0':2.4**2/n_covariates,
'beta_loc1':2.4**2/n_covariates,
'beta_scale':2.4**2/n_covariates,
'beta_shape':2.4**2/n_covariates,
}
prop_sigma = {'theta_c':np.eye(2),
'beta_loc0':np.eye(n_covariates),
'beta_loc1':np.eye(n_covariates),
'beta_scale':np.eye(n_covariates),
'beta_shape':np.eye(n_covariates)
}
from pickle import dump
with open('./test_scalemix.pkl', 'wb') as f:
dump(Y, f)
dump(cen, f)
dump(cen_above,f)
dump(initial_values, f)
dump(sigma_m, f)
dump(prop_sigma, f)
## ---------------------------------------------------------
## ----------------------- For delta -----------------------
## ---------------------------------------------------------
def test(delta):
return utils.delta_update_mixture_me_likelihood(Y, delta, R, Z, cen, cen_above, prob_below, prob_above,
Loc, Scale, Shape, tau_sqd)
Delta = np.arange(0.54,0.56,step=0.001)
Lik = np.zeros(len(Delta))
for idx, delt in enumerate(Delta):
Lik[idx] = test(delt)
plt.plot(Delta, Lik, color='gray', linestyle='solid')
plt.axvline(0.55, color='r', linestyle='--');
# cProfile.run('Res = sampler.static_metr(R, 0.55, utils.delta_update_mixture_me_likelihood, priors.interval_unif, np.array([0.1,0.7]),1000, np.nan, 0.005, True, Y, X_s, cen, prob_below, Loc, Scale, Shape, tau_sqd)')
random_generator = np.random.RandomState()
Res = sampler.static_metr(Y, 0.55, utils.delta_update_mixture_me_likelihood, priors.interval_unif,
np.array([0.1,0.7]),1000,
random_generator,
np.nan, 5.3690987e-03, True,
R, Z, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape, tau_sqd)
plt.plot(np.arange(1000),Res['trace'][0,:],linestyle='solid')
Res = sampler.adaptive_metr(Y, 0.55, utils.delta_update_mixture_me_likelihood, priors.interval_unif,
np.array([0.1,0.7]),5000,
random_generator,
np.nan, False, False,
.234, 10, .8, 10,
R, Z, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape, tau_sqd)
plt.plot(np.arange(5000),Res['trace'][0,:],linestyle='solid')
plt.hlines(0.55, 0, 5000, colors='r', linestyles='--');
## -------------------------------------------------------
## ----------------------- For tau -----------------------
## -------------------------------------------------------
def test(tau_sqd):
return utils.tau_update_mixture_me_likelihood(Y, tau_sqd, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape, delta)
Tau = np.arange(8.5,11.5,step=0.1)
Lik = np.zeros(len(Tau))
for idx, t in enumerate(Tau):
Lik[idx] = test(t)
plt.plot(Tau, Lik, linestyle='solid')
plt.axvline(tau_sqd, color='r', linestyle='--');
cProfile.run('Res = sampler.static_metr(R, 4, utils.tau_update_mixture_me_likelihood, priors.invGamma_prior, np.array([0.1,0.1]),1000, np.nan, 1, True, Y, X_s, cen, prob_below, Loc, Scale, Shape, delta)')
Res = sampler.static_metr(Y, 4, utils.tau_update_mixture_me_likelihood, priors.invGamma_prior,
np.array([0.1,0.1]),1000,
random_generator,
np.nan, 2.03324631, True,
X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape, delta)
plt.plot(np.arange(1000),Res['trace'][0,:],linestyle='solid')
Res = sampler.adaptive_metr(Y, 4, utils.tau_update_mixture_me_likelihood, priors.invGamma_prior,
np.array([0.1,0.1]),5000,
random_generator,
np.nan, False, False,
.234, 10, .8, 10,
X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape, delta)
plt.plot(np.arange(5000),Res['trace'][0,:],linestyle='solid')
plt.hlines(tau_sqd, 0, 5000, colors='r', linestyles='--');
## --------------------------------------------------------------
## ----------------------- For GEV params -----------------------
## --------------------------------------------------------------
# (1) loc0: 0.2,-1
def test(x):
return utils.loc0_gev_update_mixture_me_likelihood(Design_mat, np.array([x,-1]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc1, Scale, Shape, Time, thresh_X, thresh_X_above)
Coef = np.arange(0.18,0.3,step=0.003)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
plt.axvline(0.2, color='r', linestyle='--');
def test(x):
return utils.loc0_gev_update_mixture_me_likelihood(Design_mat, np.array([0.2,x]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc1, Scale, Shape, Time, thresh_X, thresh_X_above)
Coef = np.arange(-1.2,-0.8,step=0.03)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
plt.axvline(-1, color='r', linestyle='--');
Res = sampler.adaptive_metr(Design_mat, np.array([0.2,-1]), utils.loc0_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
np.nan, True,
False, .234, 10, .8, 10,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc1, Scale, Shape, Time, thresh_X, thresh_X_above)
prop_Sigma=np.cov(Res['trace'])
Res = sampler.static_metr(Design_mat, np.array([0.2,-1]), utils.loc0_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
prop_Sigma, 1, True,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc1, Scale, Shape, Time, thresh_X, thresh_X_above)
plt.plot(np.arange(5000),Res['trace'][0,:], color='gray',linestyle='solid')
plt.hlines(0.2, 0, 5000, colors='r', linestyles='--');
plt.plot(np.arange(5000),Res['trace'][1,:], color='gray',linestyle='solid')
plt.hlines(-1, 0, 5000, colors='r', linestyles='--');
plt.plot(*Res['trace'])
plt.show()
def tmpf(x,y):
return utils.loc0_gev_update_mixture_me_likelihood(Design_mat, np.array([x,y]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc1, Scale, Shape, Time, thresh_X, thresh_X_above)
try_size = 50
x = np.linspace(0.18, 0.22, try_size)
y = np.linspace(-1.1, -0.92, try_size)
Z = np.empty((try_size,try_size))
for idy,yi in enumerate(y):
for idx,xi in enumerate(x):
Z[idy,idx] = tmpf(xi,yi)
plt.contourf(x, y, Z, 20, cmap='RdGy')
plt.colorbar();
## Not seem to be better
Res = sampler.adaptive_metr_ratio(Design_mat, np.array([0.2,-1]), utils.loc0_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
prop_Sigma, -0.2262189, 0.2827393557113686, True,
False, .234, 10, .8, 10,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc1, Scale, Shape, Time, thresh_X, thresh_X_above)
# (2) loc1: 0.1, -0.1
def test(x):
return utils.loc1_gev_update_mixture_me_likelihood(Design_mat, np.array([x,-0.1]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc0, Scale, Shape, Time, thresh_X, thresh_X_above)
Coef = np.arange(0.095,0.12,step=0.0005)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
def test(x):
return utils.loc1_gev_update_mixture_me_likelihood(Design_mat, np.array([0.1,x]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc0, Scale, Shape, Time, thresh_X, thresh_X_above)
Coef = np.arange(-0.11,-0.08,step=0.001)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
Res = sampler.adaptive_metr(Design_mat, np.array([0.1,-0.1]), utils.loc1_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
np.nan, True,
False, .234, 10, .8, 10,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc0, Scale, Shape, Time, thresh_X, thresh_X_above)
plt.plot(np.arange(5000),Res['trace'][0,:], color='gray',linestyle='solid')
plt.hlines(0.1, 0, 5000, colors='r', linestyles='--');
plt.plot(np.arange(5000),Res['trace'][1,:], color='gray',linestyle='solid')
plt.hlines(-0.1, 0, 5000, colors='r', linestyles='--');
plt.plot(*Res['trace'])
plt.show()
def tmpf(x,y):
return utils.loc1_gev_update_mixture_me_likelihood(Design_mat, np.array([x,y]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, loc0, Scale, Shape, Time, thresh_X, thresh_X_above)
try_size = 50
x = np.linspace(0.098, 0.103, try_size)
y = np.linspace(-0.103, -0.096, try_size)
Z = np.empty((try_size,try_size))
for idy,yi in enumerate(y):
for idx,xi in enumerate(x):
Z[idy,idx] = tmpf(xi,yi)
plt.contourf(x, y, Z, 20, cmap='RdGy')
plt.colorbar();
# (3) scale: 0.1,1
def test(x):
return utils.scale_gev_update_mixture_me_likelihood(Design_mat, np.array([x,1]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Shape, Time, thresh_X, thresh_X_above)
Coef = np.arange(0.095,0.12,step=0.0005)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
def test(x):
return utils.scale_gev_update_mixture_me_likelihood(Design_mat, np.array([0.1,x]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Shape, Time, thresh_X, thresh_X_above)
Coef = np.arange(0.975,1.2,step=0.001)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
Res = sampler.adaptive_metr(Design_mat, np.array([0.1,1]), utils.scale_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
np.nan, True,
False, .234, 10, .8, 10,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Shape, Time, thresh_X, thresh_X_above)
plt.plot(np.arange(5000),Res['trace'][0,:], color='gray',linestyle='solid')
plt.hlines(0.1, 0, 5000, colors='r', linestyles='--');
plt.plot(np.arange(5000),Res['trace'][1,:], color='gray',linestyle='solid')
plt.hlines(1, 0, 5000, colors='r', linestyles='--');
plt.plot(*Res['trace'])
plt.show()
def tmpf(x,y):
return utils.scale_gev_update_mixture_me_likelihood(Design_mat, np.array([x,y]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Shape, Time, thresh_X, thresh_X_above)
try_size = 50
x = np.linspace(0.097, 0.104, try_size)
y = np.linspace(0.996, 1.005, try_size)
Z = np.empty((try_size,try_size))
for idy,yi in enumerate(y):
for idx,xi in enumerate(x):
Z[idy,idx] = tmpf(xi,yi)
plt.contourf(x, y, Z, 20, cmap='RdGy')
plt.colorbar();
# (4) shape: -0.02,0.2
def test(x):
return utils.shape_gev_update_mixture_me_likelihood(Design_mat, np.array([x,0.2]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Scale, Time, thresh_X, thresh_X_above)
Coef = np.arange(-0.03,0.,step=0.0005)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
def test(x):
return utils.shape_gev_update_mixture_me_likelihood(Design_mat, np.array([-0.02,x]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Scale, Time, thresh_X, thresh_X_above)
Coef = np.arange(0.18,0.3,step=0.001)
Lik = np.zeros(len(Coef))
for idx, coef in enumerate(Coef):
Lik[idx] = test(coef)
plt.plot(Coef, Lik, linestyle='solid')
Res = sampler.adaptive_metr(Design_mat, np.array([-0.02,0.2]), utils.shape_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
np.nan, True,
False, .234, 10, .8, 10,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Scale, Time, thresh_X, thresh_X_above)
plt.plot(np.arange(5000),Res['trace'][0,:], color='gray',linestyle='solid')
plt.hlines(-0.02, 0, 5000, colors='r', linestyles='--');
plt.plot(np.arange(5000),Res['trace'][1,:], color='gray',linestyle='solid')
plt.hlines(0.2, 0, 5000, colors='r', linestyles='--');
plt.plot(*Res['trace'])
plt.show()
def tmpf(x,y):
return utils.shape_gev_update_mixture_me_likelihood(Design_mat, np.array([x,y]), Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Scale, Time, thresh_X, thresh_X_above)
try_size = 50
x = np.linspace(-0.0215, -0.0185, try_size)
y = np.linspace(0.1985, 0.2020, try_size)
Z = np.empty((try_size,try_size))
for idy,yi in enumerate(y):
for idx,xi in enumerate(x):
Z[idy,idx] = tmpf(xi,yi)
plt.contourf(x, y, Z, 20, cmap='RdGy')
plt.colorbar();
# (5) GEV altogether: 0.2,-1, 0.1,-0.1, 0.1,1, -0.02,0.2
def tmpf(x,y):
return utils.gev_update_mixture_me_likelihood(Design_mat, np.array([0.2,x, 0.1,-0.1, 0.1,y, -0.02,0.2]),
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Time, thresh_X, thresh_X_above)
try_size = 50
x = np.linspace(-1.6, -0.4, try_size)
y = np.linspace(0.4, 1.6, try_size)
Z = np.empty((try_size,try_size))
for idy,yi in enumerate(y):
for idx,xi in enumerate(x):
Z[idy,idx] = tmpf(xi,yi)
plt.contourf(x, y, Z, 20, cmap='RdGy')
plt.colorbar();
Res = sampler.adaptive_metr(Design_mat, np.array([-0.02,0.2]), utils.shape_gev_update_mixture_me_likelihood,
priors.unif_prior, 20, 5000,
random_generator,
np.nan, True,
False, .234, 10, .8, 10,
Y, X_s, cen, cen_above, prob_below, prob_above,
delta, tau_sqd, Loc, Scale, Time, thresh_X, thresh_X_above)
plt.plot(np.arange(5000),Res['trace'][0,:], color='gray',linestyle='solid')
plt.hlines(-0.02, 0, 5000, colors='r', linestyles='--');
plt.plot(np.arange(5000),Res['trace'][1,:], color='gray',linestyle='solid')
plt.hlines(0.2, 0, 5000, colors='r', linestyles='--');
plt.plot(*Res['trace'])
plt.show()
def tmpf(x,y):
return utils.shape_gev_update_mixture_me_likelihood(Design_mat, np.array([x,y]), Y, X_s, cen, | |
assumed
:return: list of follower objects
"""
followers = []
followers_batch = self.get_user_followers(username=username)
while len(followers_batch) > 0:
followers += followers_batch
followers_batch = self.get_user_followers(username=username)
return followers
def pin(self, board_id, image_url, description='', link='', title='', section_id=None):
"""
Perfoms a pin operation. If you want to upload local image use 'upload_pin'
:param board_id: id of the target board (current user should have rights to pin to it)
:param image_url: web url of an image (not local one)
:param description: pin description (can be blank)
:param link: link to include (can be blank)
:param title: title can be blank
:param section_id: board section should be previously defined and its optional
:return: python dict describing the pinterest response
"""
options = {
"board_id": board_id,
"image_url": image_url,
"description": description,
"link": link if link else image_url,
"scrape_metric": {"source": "www_url_scrape"},
"method": "scraped",
"title": title,
"section": section_id
}
source_url = '/pin/find/?url={}'.format(self.req_builder.url_encode(image_url))
data = self.req_builder.buildPost(options=options, source_url=source_url)
return self.post(url=PIN_RESOURCE_CREATE, data=data)
def upload_pin(self, board_id, image_file, description='', link='', title='', section_id=None):
"""
This method is simmilar to 'pin' except the image for the pin is local file.
"""
image_url = self._upload_image(image_file=image_file).json()['image_url']
return self.pin(board_id=board_id, description=description, image_url=image_url, link=link, title=title,
section_id=section_id)
def repin(self, board_id, pin_id, section_id=None):
"""
Repin/Save action
:param board_id: board id, current user should have right to pin to this board
:param pin_id: pin id to repin
:param section_id: board section should be previously defined and its optional
:return: python dict describing the pinterest response
"""
options = {
"board_id": board_id,
"pin_id": pin_id,
"section": section_id,
"is_buyable_pin": False
}
source_url = '/pin/{}/'.format(pin_id)
data = self.req_builder.buildPost(options=options, source_url=source_url)
return self.post(url=REPIN_RESOURCE_CREATE, data=data)
def _upload_image(self, image_file):
file_name = os.path.basename(image_file)
mime_type = mimetypes.guess_type(image_file)[0]
form_data = MultipartEncoder(fields={
'img': ('%s' % file_name, open(image_file, 'rb'), mime_type)
})
headers = {
'Content-Length': '%s' % form_data.len,
'Content-Type': form_data.content_type,
'X-UPLOAD-SOURCE': 'pinner_uploader'
}
return self.post(url=UPLOAD_IMAGE, data=form_data, headers=headers)
def delete_pin(self, pin_id):
"""
Deletes a pint the user owns
:param pin_id: pin id to delete
:return: python dict describing the pinterest response
"""
options = {"id": pin_id}
source_url = '/{}/'.format(self.username)
data = self.req_builder.buildPost(options=options, source_url=source_url)
return self.post(url=DELETE_PIN_RESOURCE, data=data)
def comment(self, pin_id, text):
"""
Put comment on a pin
:param pin_id: pin id to comment on
:param text: text of the comment
:return: python dict describing the pinterest response
"""
pin_data = self.load_pin(pin_id=pin_id)
options = {
"objectId": pin_data['aggregated_pin_data']['id'],
"pinId": pin_id,
"tags": "[]",
"text": text
}
data = self.req_builder.buildPost(options=options, source_url=pin_id)
return self.post(url=CREATE_COMMENT_RESOURCE, data=data)
def load_pin(self, pin_id):
"""
Loads full information about a pin
:param pin_id: pin id to load
:return: python dict describing the pinterest response
"""
resp = self.get(url=LOAD_PIN_URL_FORMAT.format(pin_id))
soup = BeautifulSoup(resp.text, 'html.parser')
scripts = soup.findAll('script')
pin_data = {}
for s in scripts:
if 'id' in s.attrs and s.attrs['id'] == 'initial-state':
pinJsonData = json.loads(s.contents[0])['resources']['data']['PinResource']
pinJsonData = pinJsonData[list(pinJsonData.keys())[0]]['data']
return pinJsonData
raise Exception("Pin data not found. Probably pintereset chagned their API")
def get_comments(self, pin_id, page_size=50):
"""
Get comments on a pin.
The response is batched, meaning this method should be called util empty list is returned
:param pin_id: target pin id
:param page_size: batch size
:return: list of comment objects
"""
pin_data = self.load_pin(pin_id=pin_id)
next_bookmark = self.bookmark_manager.get_bookmark(primary='pin_comments', secondary=pin_id)
if next_bookmark == '-end-':
return []
options = {
"isPrefetch": False,
"objectId": pin_data['aggregated_pin_data']['id'],
"page_size": page_size,
"redux_normalize_feed": True,
"bookmarks": [next_bookmark]
}
source_url = '/pin/{}/'.format(pin_id)
url = self.req_builder.buildGet(url=GET_PIN_COMMENTS_RESOURCE, options=options, source_url=source_url)
resp = self.get(url=url).json()
resp = resp['resource_response']
bookmark = '-end-'
if 'bookmark' in resp:
bookmark = resp['bookmark']
self.bookmark_manager.add_bookmark(primary='pin_comments', secondary=pin_id, bookmark=bookmark)
return resp['data']
def get_comments_all(self, pin_id):
"""
Obtains all comments of a pin.
NOTE: IF pin has too many comments this might cause memory issues.
In such cases use 'get_comments' which is batched
:param pin_id:
:return: list of comment objects
"""
results = []
search_batch = self.get_comments(pin_id=pin_id)
while len(search_batch) > 0:
results += search_batch
search_batch = self.get_comments(pin_id=pin_id)
return results
def delete_comment(self, pin_id, comment_id):
"""
Deletes a comment
:param pin_id: pin id to search the comment in
:param comment_id: comment id
:return:
"""
options = {"commentId": comment_id}
source_url = "/pin/{}/".format(pin_id)
data = self.req_builder.buildPost(options=options, source_url=source_url)
return self.post(url=DELETE_COMMENT, data=data)
def invite(self, board_id, user_id):
"""
Invite a user to one of the current user's boards
:param board_id: board to invite to
:param user_id: user to invite
:return: python dict describing the pinterest response
"""
options = {"board_id": board_id, "invited_user_ids": [user_id]}
data = self.req_builder.buildPost(options=options)
return self.post(url=BOARD_INVITE_RESOURCE, data=data)
def get_board_invites(self, board_id, page_size=100):
"""
Returns a list of users invited to the specified board.
This method is batched and needs to be called until empty list is returned.
:param board_id: id of target board
:param page_size: batch size
:return: list of board objects
"""
options = {
"isPrefetch": False,
"board_id": board_id,
"sort": "viewer_first",
"field_set_key": "boardEdit",
"status_filters": "new,accepted,contact_request_not_approved,pending_approval",
"include_inactive": True,
"page_size": page_size
}
url = self.req_builder.buildGet(url=BOARD_INVITES_RESOURCE, options=options)
resp = self.get(url=url).json()
return resp['resource_response']['data']
def get_board_invites_all(self, board_id):
"""
Obtains all invites of a board.
NOTE: If board has too many invites this might cause memory issues.
In such cases use 'get_board_invites' which is batched
:param board_id:
:return: list of board invite objects
"""
results = []
search_batch = self.get_board_invites(board_id=board_id)
while len(search_batch) > 0:
results += search_batch
search_batch = self.get_board_invites(board_id=board_id)
return results
def delete_invite(self, board_id, invited_user_id, also_block=False):
"""
Deletes invite for a board
:param board_id: board id
:param invited_user_id: invited user id
:param also_block: you can also block the user (default false)
:return: python dict describing the pinterest response
"""
options = {
"ban": also_block,
"board_id": board_id,
"field_set_key": "boardEdit",
"invited_user_id": invited_user_id
}
data = self.req_builder.buildPost(options=options)
return self.post(url=BOARD_DELETE_INVITE_RESOURCE, data=data)
def visual_search(self, pin_data, x=None, y=None, w=None, h=None, padding=10):
"""
Gives access to pinterest search api
This method is batched, meaning is needs to be called until empty list is returned.
:param pin_data: pin data
:param x: x position of the cropped part of the image used for searching
:param y: y position of the cropped part of the image used for searching
:param w: width of the cropped part of the image used for searching
:param h: height of the cropped part of the image used for searching
:param padding: Default padding for cropped image.
:return: python dict describing the pinterest response
"""
orig = pin_data['images']['orig']
width = orig['width']
height = orig['height']
image_signature = pin_data['image_signature']
pin_id = pin_data['id']
x = padding if x is None else x
y = padding if y is None else y
w = width - padding * 2 if w is None else w
h = height - padding * 2 if h is None else h
source_url = '/pin/{}/visual-search/?x={}&y={}&w={}&h={}'.format(pin_id, x, y, w, h)
next_bookmark = self.bookmark_manager.get_bookmark(primary='visual_search', secondary=source_url)
if next_bookmark == '-end-':
return []
options = {
"isPrefetch": False,
"pin_id": pin_id,
"image_signature": image_signature,
"crop": {
"x": x / width,
"y": y / height,
"w": w / width,
"h": h / height
},
"bookmarks": [next_bookmark],
"no_fetch_context_on_resource": False
}
url = self.req_builder.buildGet(url=VISUAL_LIVE_SEARCH_RESOURCE, options=options, source_url=source_url)
resp = self.get(url=url).json()
bookmark = resp['resource']['options']['bookmarks'][0]
self.bookmark_manager.add_bookmark(primary='visual_search', secondary=source_url, bookmark=bookmark)
return resp['resource_response']['data']['results']
def search(self, scope, query, page_size=250):
"""
Gives access to pinterest search api
This method is batched, meaning is needs to be called until empty list is returned.
NOTE: there is a max number of results set by Pinterest -> 1000
:param scope: can be pins, buyable_pins, my_pins, videos, boards
:param query: search phrase
:param page_size: batch size
:return: list of search results
"""
next_bookmark = self.bookmark_manager.get_bookmark(primary='search', secondary=query)
if next_bookmark == '-end-':
return []
terms = query.split(' ')
escaped_query = "%20".join(terms)
term_meta_arr = []
for t in terms:
term_meta_arr.append('term_meta[]=' + t)
term_arg = "%7Ctyped&".join(term_meta_arr)
source_url = '/search/{}/?q={}&rs=typed&{}%7Ctyped'.format(scope, escaped_query, term_arg)
options = {
"isPrefetch": False,
"auto_correction_disabled": False,
"query": query,
"redux_normalize_feed": True,
"rs": "typed",
"scope": scope,
"page_size": page_size,
"bookmarks": [next_bookmark]
}
url = self.req_builder.buildGet(url=BASE_SEARCH_RESOURCE, options=options, source_url=source_url)
resp = self.get(url=url).json()
bookmark = resp['resource']['options']['bookmarks'][0]
self.bookmark_manager.add_bookmark(primary='search', secondary=query, bookmark=bookmark)
return resp['resource_response']['data']['results']
def board_recommendations(self, board_id='', page_size=50):
"""
This gives the list of pins you see when you open a board and click on 'More Ideas'
This method is batched and needs to be called until empty list is returned in order to obtain all
of the results.
:param board_id: target board id
:param page_size: | |
<reponame>ashishpatel26/interpret<gh_stars>0
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
# TODO: Add unit tests for internal EBM interfacing
from sys import platform
import ctypes as ct
from numpy.ctypeslib import ndpointer
import numpy as np
import os
import struct
import logging
from contextlib import closing
log = logging.getLogger(__name__)
class Native:
"""Layer/Class responsible for native function calls."""
_native = None
def _initialize(self, is_debug, log_level):
self.is_debug = is_debug
self.log_level = log_level
self.lib = ct.cdll.LoadLibrary(Native._get_ebm_lib_path(debug=is_debug))
self._harden_function_signatures()
self._set_logging(level=log_level)
@staticmethod
def get_native_singleton(is_debug=False, log_level=None):
log.debug("Check if EBM lib is loaded")
if Native._native is None:
log.info("EBM lib loading.")
native = Native()
native._initialize(is_debug=is_debug, log_level=log_level)
Native._native = native
else:
log.debug("EBM lib already loaded")
return Native._native
# enum FeatureType : int64_t
# Ordinal = 0
FeatureTypeOrdinal = 0
# Nominal = 1
FeatureTypeNominal = 1
class EbmNativeFeature(ct.Structure):
_fields_ = [
# FeatureEbmType featureType;
("featureType", ct.c_int64),
# BoolEbmType hasMissing;
("hasMissing", ct.c_int64),
# int64_t countBins;
("countBins", ct.c_int64),
]
class EbmNativeFeatureGroup(ct.Structure):
_fields_ = [
# int64_t countFeaturesInGroup;
("countFeaturesInGroup", ct.c_int64)
]
# const int32_t TraceLevelOff = 0;
TraceLevelOff = 0
# const int32_t TraceLevelError = 1;
TraceLevelError = 1
# const int32_t TraceLevelWarning = 2;
TraceLevelWarning = 2
# const int32_t TraceLevelInfo = 3;
TraceLevelInfo = 3
# const int32_t TraceLevelVerbose = 4;
TraceLevelVerbose = 4
_LogFuncType = ct.CFUNCTYPE(None, ct.c_int32, ct.c_char_p)
def __init__(self):
pass
def _harden_function_signatures(self):
""" Adds types to function signatures. """
self.lib.SetLogMessageFunction.argtypes = [
# void (* fn)(int32 traceLevel, const char * message) logMessageFunction
self._LogFuncType
]
self.lib.SetLogMessageFunction.restype = None
self.lib.SetTraceLevel.argtypes = [
# int32 traceLevel
ct.c_int32
]
self.lib.SetTraceLevel.restype = None
self.lib.GenerateRandomNumber.argtypes = [
# int32_t randomSeed
ct.c_int32,
# int64_t stageRandomizationMix
ct.c_int32,
]
self.lib.GenerateRandomNumber.restype = ct.c_int32
self.lib.SampleWithoutReplacement.argtypes = [
# int32_t randomSeed
ct.c_int32,
# int64_t countTrainingSamples
ct.c_int64,
# int64_t countSamples
ct.c_int64,
# int64_t * trainingCountsOut
ndpointer(dtype=ct.c_int64, ndim=1, flags="C_CONTIGUOUS"),
]
self.lib.SampleWithoutReplacement.restype = None
self.lib.GenerateQuantileBinCuts.argtypes = [
# int32_t randomSeed
ct.c_int32,
# int64_t countSamples
ct.c_int64,
# double * featureValues
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t countSamplesPerBinMin
ct.c_int64,
# int64_t isHumanized
ct.c_int64,
# int64_t * countBinCutsInOut
ct.POINTER(ct.c_int64),
# double * binCutsLowerBoundInclusiveOut
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t * countMissingValuesOut
ct.POINTER(ct.c_int64),
# double * minNonInfinityValueOut
ct.POINTER(ct.c_double),
# int64_t * countNegativeInfinityOut
ct.POINTER(ct.c_int64),
# double * maxNonInfinityValueOut
ct.POINTER(ct.c_double),
# int64_t * countPositiveInfinityOut
ct.POINTER(ct.c_int64),
]
self.lib.GenerateQuantileBinCuts.restype = ct.c_int64
self.lib.GenerateUniformBinCuts.argtypes = [
# int64_t countSamples
ct.c_int64,
# double * featureValues
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t * countBinCutsInOut
ct.POINTER(ct.c_int64),
# double * binCutsLowerBoundInclusiveOut
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t * countMissingValuesOut
ct.POINTER(ct.c_int64),
# double * minNonInfinityValueOut
ct.POINTER(ct.c_double),
# int64_t * countNegativeInfinityOut
ct.POINTER(ct.c_int64),
# double * maxNonInfinityValueOut
ct.POINTER(ct.c_double),
# int64_t * countPositiveInfinityOut
ct.POINTER(ct.c_int64),
]
self.lib.GenerateUniformBinCuts.restype = None
self.lib.GenerateWinsorizedBinCuts.argtypes = [
# int64_t countSamples
ct.c_int64,
# double * featureValues
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t * countBinCutsInOut
ct.POINTER(ct.c_int64),
# double * binCutsLowerBoundInclusiveOut
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t * countMissingValuesOut
ct.POINTER(ct.c_int64),
# double * minNonInfinityValueOut
ct.POINTER(ct.c_double),
# int64_t * countNegativeInfinityOut
ct.POINTER(ct.c_int64),
# double * maxNonInfinityValueOut
ct.POINTER(ct.c_double),
# int64_t * countPositiveInfinityOut
ct.POINTER(ct.c_int64),
]
self.lib.GenerateWinsorizedBinCuts.restype = ct.c_int64
self.lib.SuggestGraphBounds.argtypes = [
# int64_t countBinCuts
ct.c_int64,
# double lowestBinCut
ct.c_double,
# double highestBinCut
ct.c_double,
# double minValue
ct.c_double,
# double maxValue
ct.c_double,
# double * lowGraphBoundOut
ct.POINTER(ct.c_double),
# double * highGraphBoundOut
ct.POINTER(ct.c_double),
]
self.lib.SuggestGraphBounds.restype = None
self.lib.Discretize.argtypes = [
# int64_t countSamples
ct.c_int64,
# double * featureValues
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t countBinCuts
ct.c_int64,
# double * binCutsLowerBoundInclusive
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# int64_t * discretizedOut
ndpointer(dtype=ct.c_int64, ndim=1, flags="C_CONTIGUOUS"),
]
self.lib.Discretize.restype = ct.c_int64
self.lib.Softmax.argtypes = [
# int64_t countTargetClasses
ct.c_int64,
# int64_t countSamples
ct.c_int64,
# double * logits
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
# double * probabilitiesOut
ndpointer(dtype=ct.c_double, ndim=1, flags="C_CONTIGUOUS"),
]
self.lib.Softmax.restype = ct.c_int64
self.lib.InitializeBoostingClassification.argtypes = [
# int32_t randomSeed
ct.c_int32,
# int64_t countTargetClasses
ct.c_int64,
# int64_t countFeatures
ct.c_int64,
# EbmNativeFeature * features
ct.POINTER(self.EbmNativeFeature),
# int64_t countFeatureGroups
ct.c_int64,
# EbmNativeFeatureGroup * featureGroups
ct.POINTER(self.EbmNativeFeatureGroup),
# int64_t * featureGroupIndexes
ndpointer(dtype=ct.c_int64, ndim=1),
# int64_t countTrainingSamples
ct.c_int64,
# int64_t * trainingBinnedData
ndpointer(dtype=ct.c_int64, ndim=2, flags="C_CONTIGUOUS"),
# int64_t * trainingTargets
ndpointer(dtype=ct.c_int64, ndim=1),
# double * trainingPredictorScores
# scores can either be 1 or 2 dimensional
ndpointer(dtype=ct.c_double, flags="C_CONTIGUOUS"),
# int64_t countValidationSamples
ct.c_int64,
# int64_t * validationBinnedData
ndpointer(dtype=ct.c_int64, ndim=2, flags="C_CONTIGUOUS"),
# int64_t * validationTargets
ndpointer(dtype=ct.c_int64, ndim=1),
# double * validationPredictorScores
# scores can either be 1 or 2 dimensional
ndpointer(dtype=ct.c_double, flags="C_CONTIGUOUS"),
# int64_t countInnerBags
ct.c_int64,
# double * optionalTempParams
ct.POINTER(ct.c_double),
]
self.lib.InitializeBoostingClassification.restype = ct.c_void_p
self.lib.InitializeBoostingRegression.argtypes = [
# int32_t randomSeed
ct.c_int32,
# int64_t countFeatures
ct.c_int64,
# EbmNativeFeature * features
ct.POINTER(self.EbmNativeFeature),
# int64_t countFeatureGroups
ct.c_int64,
# EbmNativeFeatureGroup * featureGroups
ct.POINTER(self.EbmNativeFeatureGroup),
# int64_t * featureGroupIndexes
ndpointer(dtype=ct.c_int64, ndim=1),
# int64_t countTrainingSamples
ct.c_int64,
# int64_t * trainingBinnedData
ndpointer(dtype=ct.c_int64, ndim=2, flags="C_CONTIGUOUS"),
# double * trainingTargets
ndpointer(dtype=ct.c_double, ndim=1),
# double * trainingPredictorScores
ndpointer(dtype=ct.c_double, ndim=1),
# int64_t countValidationSamples
ct.c_int64,
# int64_t * validationBinnedData
ndpointer(dtype=ct.c_int64, ndim=2, flags="C_CONTIGUOUS"),
# double * validationTargets
ndpointer(dtype=ct.c_double, ndim=1),
# double * validationPredictorScores
ndpointer(dtype=ct.c_double, ndim=1),
# int64_t countInnerBags
ct.c_int64,
# double * optionalTempParams
ct.POINTER(ct.c_double),
]
self.lib.InitializeBoostingRegression.restype = ct.c_void_p
self.lib.GenerateModelFeatureGroupUpdate.argtypes = [
# void * ebmBoosting
ct.c_void_p,
# int64_t indexFeatureGroup
ct.c_int64,
# double learningRate
ct.c_double,
# int64_t countTreeSplitsMax
ct.c_int64,
# int64_t countSamplesRequiredForChildSplitMin
ct.c_int64,
# double * trainingWeights
# ndpointer(dtype=ct.c_double, ndim=1),
ct.c_void_p,
# double * validationWeights
# ndpointer(dtype=ct.c_double, ndim=1),
ct.c_void_p,
# double * gainOut
ct.POINTER(ct.c_double),
]
self.lib.GenerateModelFeatureGroupUpdate.restype = ct.POINTER(ct.c_double)
self.lib.ApplyModelFeatureGroupUpdate.argtypes = [
# void * ebmBoosting
ct.c_void_p,
# int64_t indexFeatureGroup
ct.c_int64,
# double * modelFeatureGroupUpdateTensor
ndpointer(dtype=ct.c_double, flags="C_CONTIGUOUS"),
# double * validationMetricOut
ct.POINTER(ct.c_double),
]
self.lib.ApplyModelFeatureGroupUpdate.restype = ct.c_int64
self.lib.GetBestModelFeatureGroup.argtypes = [
# void * ebmBoosting
ct.c_void_p,
# int64_t indexFeatureGroup
ct.c_int64,
]
self.lib.GetBestModelFeatureGroup.restype = ct.POINTER(ct.c_double)
self.lib.GetCurrentModelFeatureGroup.argtypes = [
# void * ebmBoosting
ct.c_void_p,
# int64_t indexFeatureGroup
ct.c_int64,
]
self.lib.GetCurrentModelFeatureGroup.restype = ct.POINTER(ct.c_double)
self.lib.FreeBoosting.argtypes = [
# void * ebmBoosting
ct.c_void_p
]
self.lib.FreeBoosting.restype = None
self.lib.InitializeInteractionClassification.argtypes = [
# int64_t countTargetClasses
ct.c_int64,
# int64_t countFeatures
ct.c_int64,
# EbmNativeFeature * features
ct.POINTER(self.EbmNativeFeature),
# int64_t countSamples
ct.c_int64,
# int64_t * binnedData
ndpointer(dtype=ct.c_int64, ndim=2, flags="C_CONTIGUOUS"),
# int64_t * targets
ndpointer(dtype=ct.c_int64, ndim=1),
# double * predictorScores
# scores can either be 1 or 2 dimensional
ndpointer(dtype=ct.c_double, flags="C_CONTIGUOUS"),
# double * optionalTempParams
ct.POINTER(ct.c_double),
]
self.lib.InitializeInteractionClassification.restype = ct.c_void_p
self.lib.InitializeInteractionRegression.argtypes = [
# int64_t countFeatures
ct.c_int64,
# EbmNativeFeature * features
ct.POINTER(self.EbmNativeFeature),
# int64_t countSamples
ct.c_int64,
# int64_t * binnedData
ndpointer(dtype=ct.c_int64, ndim=2, flags="C_CONTIGUOUS"),
# double * targets
ndpointer(dtype=ct.c_double, ndim=1),
# double * predictorScores
ndpointer(dtype=ct.c_double, ndim=1),
# double * optionalTempParams
ct.POINTER(ct.c_double),
]
self.lib.InitializeInteractionRegression.restype = ct.c_void_p
self.lib.CalculateInteractionScore.argtypes = [
# void * ebmInteraction
ct.c_void_p,
# int64_t countFeaturesInGroup
ct.c_int64,
# int64_t * featureIndexes
ndpointer(dtype=ct.c_int64, ndim=1),
# int64_t countSamplesRequiredForChildSplitMin
ct.c_int64,
# double * interactionScoreOut
ct.POINTER(ct.c_double),
]
self.lib.CalculateInteractionScore.restype = ct.c_int64
self.lib.FreeInteraction.argtypes = [
# void * ebmInteraction
ct.c_void_p
]
self.lib.FreeInteraction.restype = None
def _set_logging(self, level=None):
# NOTE: Not part of code coverage. It runs in tests, but isn't registered for some reason.
def native_log(trace_level, message): # pragma: no cover
try:
message = message.decode("ascii")
if trace_level == self.TraceLevelError:
log.error(message)
elif trace_level == self.TraceLevelWarning:
log.warning(message)
elif trace_level == self.TraceLevelInfo:
log.info(message)
elif trace_level == self.TraceLevelVerbose:
log.debug(message)
except: # pragma: no cover
# we're being called from C, so we can't raise exceptions
pass
if level is None:
root = logging.getLogger("interpret")
level = root.getEffectiveLevel()
level_dict = {
logging.DEBUG: self.TraceLevelVerbose,
logging.INFO: self.TraceLevelInfo,
logging.WARNING: self.TraceLevelWarning,
logging.ERROR: self.TraceLevelError,
logging.CRITICAL: self.TraceLevelError,
logging.NOTSET: self.TraceLevelOff,
"DEBUG": self.TraceLevelVerbose,
"INFO": self.TraceLevelInfo,
"WARNING": self.TraceLevelWarning,
"ERROR": self.TraceLevelError,
"CRITICAL": self.TraceLevelError,
"NOTSET": self.TraceLevelOff,
}
# it's critical that we put typed_log_func into self,
# otherwise it will be garbage collected
self._typed_log_func = self._LogFuncType(native_log)
self.lib.SetLogMessageFunction(self._typed_log_func)
self.lib.SetTraceLevel(level_dict[level])
@staticmethod
def _get_ebm_lib_path(debug=False):
""" Returns filepath of core EBM library.
Returns:
A string representing filepath.
"""
bitsize = struct.calcsize("P") * 8
is_64_bit = bitsize == 64
script_path = os.path.dirname(os.path.abspath(__file__))
package_path = os.path.join(script_path, "..", "..")
debug_str = "_debug" if debug else ""
log.info("Loading native on {0} | debug = {1}".format(platform, debug))
if platform == "linux" or platform == "linux2" and is_64_bit:
return os.path.join(
package_path, "lib", "lib_ebm_native_linux_x64{0}.so".format(debug_str)
)
elif platform == "win32" and is_64_bit:
return os.path.join(
package_path, "lib", "lib_ebm_native_win_x64{0}.dll".format(debug_str)
)
elif platform == "darwin" and is_64_bit:
return os.path.join(
package_path, "lib", "lib_ebm_native_mac_x64{0}.dylib".format(debug_str)
)
else: # pragma: no cover
msg = "Platform {0} at {1} bit not supported for EBM".format(
platform, bitsize
)
log.error(msg)
raise Exception(msg)
@staticmethod
def make_ndarray(c_pointer, shape, dtype, writable=False, copy_data=True):
""" Returns an ndarray based from a C array.
| |
def __init__(self, starred=None, all=None, category=None, drive_id=None, fields=None,
image_thumbnail_process=None, image_url_process=None, limit=None, marker=None, order_by=None, order_direction=None,
parent_file_id=None, status=None, type=None, url_expire_sec=None, video_thumbnail_process=None):
# starred
self.starred = starred # type: bool
# all
self.all = all # type: bool
# category
self.category = category # type: str
# drive_id
self.drive_id = drive_id # type: str
# fields
self.fields = fields # type: str
# image_thumbnail_process
self.image_thumbnail_process = image_thumbnail_process # type: str
# image_url_process
self.image_url_process = image_url_process # type: str
# limit
self.limit = limit # type: int
# marker
self.marker = marker # type: str
# order_by
self.order_by = order_by # type: str
# order_direction
self.order_direction = order_direction # type: str
# ParentFileID
self.parent_file_id = parent_file_id # type: str
# status
self.status = status # type: str
# type
self.type = type # type: str
# url_expire_sec
self.url_expire_sec = url_expire_sec # type: int
# video_thumbnail_process
# type:string
self.video_thumbnail_process = video_thumbnail_process # type: str
def validate(self):
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.limit is not None:
self.validate_pattern(self.limit, 'limit', '[0-9]{1,3}')
self.validate_maximum(self.limit, 'limit', 100)
self.validate_minimum(self.limit, 'limit', 0)
self.validate_required(self.parent_file_id, 'parent_file_id')
if self.parent_file_id is not None:
self.validate_max_length(self.parent_file_id, 'parent_file_id', 50)
self.validate_pattern(self.parent_file_id, 'parent_file_id', '[a-z0-9.-_]{1,50}')
if self.url_expire_sec is not None:
self.validate_maximum(self.url_expire_sec, 'url_expire_sec', 14400)
self.validate_minimum(self.url_expire_sec, 'url_expire_sec', 10)
def to_map(self):
result = {}
if self.starred is not None:
result['Starred'] = self.starred
if self.all is not None:
result['all'] = self.all
if self.category is not None:
result['category'] = self.category
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.fields is not None:
result['fields'] = self.fields
if self.image_thumbnail_process is not None:
result['image_thumbnail_process'] = self.image_thumbnail_process
if self.image_url_process is not None:
result['image_url_process'] = self.image_url_process
if self.limit is not None:
result['limit'] = self.limit
if self.marker is not None:
result['marker'] = self.marker
if self.order_by is not None:
result['order_by'] = self.order_by
if self.order_direction is not None:
result['order_direction'] = self.order_direction
if self.parent_file_id is not None:
result['parent_file_id'] = self.parent_file_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.url_expire_sec is not None:
result['url_expire_sec'] = self.url_expire_sec
if self.video_thumbnail_process is not None:
result['video_thumbnail_process'] = self.video_thumbnail_process
return result
def from_map(self, map={}):
if map.get('Starred') is not None:
self.starred = map.get('Starred')
if map.get('all') is not None:
self.all = map.get('all')
if map.get('category') is not None:
self.category = map.get('category')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('fields') is not None:
self.fields = map.get('fields')
if map.get('image_thumbnail_process') is not None:
self.image_thumbnail_process = map.get('image_thumbnail_process')
if map.get('image_url_process') is not None:
self.image_url_process = map.get('image_url_process')
if map.get('limit') is not None:
self.limit = map.get('limit')
if map.get('marker') is not None:
self.marker = map.get('marker')
if map.get('order_by') is not None:
self.order_by = map.get('order_by')
if map.get('order_direction') is not None:
self.order_direction = map.get('order_direction')
if map.get('parent_file_id') is not None:
self.parent_file_id = map.get('parent_file_id')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('type') is not None:
self.type = map.get('type')
if map.get('url_expire_sec') is not None:
self.url_expire_sec = map.get('url_expire_sec')
if map.get('video_thumbnail_process') is not None:
self.video_thumbnail_process = map.get('video_thumbnail_process')
return self
class ListMyDriveRequest(TeaModel):
"""
List my drive request
"""
def __init__(self, headers=None, limit=None, marker=None):
self.headers = headers # type: Dict[str, str]
# 每页大小限制
self.limit = limit # type: int
# 翻页标记, 接口返回的标记值
self.marker = marker # type: str
def validate(self):
if self.limit is not None:
self.validate_maximum(self.limit, 'limit', 100)
self.validate_minimum(self.limit, 'limit', 1)
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.limit is not None:
result['limit'] = self.limit
if self.marker is not None:
result['marker'] = self.marker
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('limit') is not None:
self.limit = map.get('limit')
if map.get('marker') is not None:
self.marker = map.get('marker')
return self
class ListShareLinkRequest(TeaModel):
"""
list_share_link request
"""
def __init__(self, creator=None, limit=None, marker=None):
# creator
self.creator = creator # type: str
# limit
self.limit = limit # type: int
# marker
self.marker = marker # type: str
def validate(self):
if self.limit is not None:
self.validate_maximum(self.limit, 'limit', 100)
self.validate_minimum(self.limit, 'limit', 1)
def to_map(self):
result = {}
if self.creator is not None:
result['creator'] = self.creator
if self.limit is not None:
result['limit'] = self.limit
if self.marker is not None:
result['marker'] = self.marker
return result
def from_map(self, map={}):
if map.get('creator') is not None:
self.creator = map.get('creator')
if map.get('limit') is not None:
self.limit = map.get('limit')
if map.get('marker') is not None:
self.marker = map.get('marker')
return self
class ListShareRequest(TeaModel):
"""
list share request
"""
def __init__(self, headers=None, creator=None, drive_id=None, limit=None, marker=None, owner=None,
share_file_path=None):
self.headers = headers # type: Dict[str, str]
# creator
self.creator = creator # type: str
self.drive_id = drive_id # type: str
# limit
self.limit = limit # type: int
# marker
self.marker = marker # type: str
# Owner
self.owner = owner # type: str
# share_file_path
self.share_file_path = share_file_path # type: str
def validate(self):
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.limit is not None:
self.validate_maximum(self.limit, 'limit', 100)
self.validate_minimum(self.limit, 'limit', 1)
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.creator is not None:
result['creator'] = self.creator
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.limit is not None:
result['limit'] = self.limit
if self.marker is not None:
result['marker'] = self.marker
if self.owner is not None:
result['owner'] = self.owner
if self.share_file_path is not None:
result['share_file_path'] = self.share_file_path
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('creator') is not None:
self.creator = map.get('creator')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('limit') is not None:
self.limit = map.get('limit')
if map.get('marker') is not None:
self.marker = map.get('marker')
if map.get('owner') is not None:
self.owner = map.get('owner')
if map.get('share_file_path') is not None:
self.share_file_path = map.get('share_file_path')
return self
class ListStoreFileRequest(TeaModel):
"""
list store file
"""
def __init__(self, headers=None, limit=None, marker=None, parent_file_path=None, store_id=None, type=None):
self.headers = headers # type: Dict[str, str]
# limit
self.limit = limit # type: int
# marker
self.marker = marker # type: str
# parent_file_path
self.parent_file_path = parent_file_path # type: str
# store_id
self.store_id = store_id # type: str
# type
self.type = type # type: str
def validate(self):
if self.limit is not None:
self.validate_maximum(self.limit, 'limit', 1000)
self.validate_minimum(self.limit, 'limit', 1)
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.limit is not None:
result['limit'] = self.limit
if self.marker is not None:
result['marker'] = self.marker
if self.parent_file_path is not None:
result['parent_file_path'] = self.parent_file_path
if self.store_id is not None:
result['store_id'] = self.store_id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('limit') is not None:
self.limit = map.get('limit')
if map.get('marker') is not None:
self.marker = map.get('marker')
if map.get('parent_file_path') is not None:
self.parent_file_path = map.get('parent_file_path')
if map.get('store_id') is not None:
self.store_id = map.get('store_id')
if map.get('type') is not None:
self.type = map.get('type')
return self
class ListStoreRequest(TeaModel):
"""
list storage file
"""
def __init__(self, domain_id=None):
# domain_id
self.domain_id = domain_id # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.domain_id is not None:
result['domain_id'] = self.domain_id
return result
def from_map(self, map={}):
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
return self
class ListUploadedPartRequest(TeaModel):
"""
列举uploadID对应的已上传分片
"""
def __init__(self, drive_id=None, file_id=None, limit=None, part_number_marker=None, upload_id=None):
# drive_id
self.drive_id = drive_id # type: str
# file_id
self.file_id = file_id # type: str
# limit
self.limit = limit # type: int
# part_number_marker
self.part_number_marker = part_number_marker # type: int
# upload_id
self.upload_id = upload_id # type: str
def validate(self):
self.validate_required(self.drive_id, 'drive_id')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
self.validate_required(self.file_id, 'file_id')
if self.file_id is not None:
self.validate_max_length(self.file_id, 'file_id', 50)
self.validate_pattern(self.file_id, 'file_id', '[a-z0-9.-_]{1,50}')
if self.limit is not None:
self.validate_pattern(self.limit, 'limit', '[0-9]+')
self.validate_maximum(self.limit, 'limit', 1000)
self.validate_minimum(self.limit, 'limit', 1)
if self.part_number_marker is not None:
self.validate_pattern(self.part_number_marker, 'part_number_marker', '[0-9]+')
self.validate_minimum(self.part_number_marker, 'part_number_marker', 1)
def to_map(self):
result = {}
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.file_id is not None:
result['file_id'] = self.file_id
if self.limit is not None:
result['limit'] = self.limit
if self.part_number_marker is not None:
result['part_number_marker'] = self.part_number_marker
if self.upload_id is not None:
result['upload_id'] = self.upload_id
return result
def from_map(self, map={}):
if map.get('drive_id') is not None:
self.drive_id | |
<reponame>BrunoGomesCoelho/Eat-Move-Learn
from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, adjacent_positions, \
row_col, translate, min_distance
from kaggle_environments import make
import gym
from gym import spaces
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from enum import Enum, auto
import numpy as np
import os
import random as rand
class CellState(Enum):
EMPTY = 0
FOOD = auto()
HEAD = auto()
BODY = auto()
TAIL = auto()
MY_HEAD = auto()
MY_BODY = auto()
MY_TAIL = auto()
ANY_GOOSE = auto()
class ObservationProcessor:
def __init__(self, rows, columns, hunger_rate, min_food, debug=False, center_head=True):
self.debug = debug
self.rows, self.columns = rows, columns
self.hunger_rate = hunger_rate
self.min_food = min_food
self.previous_action = -1
self.last_action = -1
self.last_min_distance_to_food = self.rows * self.columns # initial max value to mark no food seen so far
self.center_head = center_head
# ***** BEGIN: utility functions ******
def opposite(self, action):
if action == Action.NORTH:
return Action.SOUTH
if action == Action.SOUTH:
return Action.NORTH
if action == Action.EAST:
return Action.WEST
if action == Action.WEST:
return Action.EAST
raise TypeError(str(action) + " is not a valid Action.")
def _adjacent_positions(self, position):
return adjacent_positions(position, self.columns, self.rows)
def _min_distance_to_food(self, position, food=None):
food = food if food != None else self.food
return min_distance(position, food, self.columns)
def _row_col(self, position):
return row_col(position, self.columns)
def _translate(self, position, direction):
return translate(position, direction, self.columns, self.rows)
def _preprocess_env(self, obs):
observation = Observation(obs)
self.my_index = observation.index
if len(observation.geese[self.my_index]) > 0:
self.my_head = observation.geese[self.my_index][0]
self.my_tail = observation.geese[self.my_index][-1]
self.my_body = [pos for pos in observation.geese[self.my_index][1:-1]]
else:
self.my_head = -1
self.my_tail = -1
self.my_body = []
self.geese = [g for i, g in enumerate(observation.geese) if i != self.my_index and len(g) > 0]
self.geese_cells = [pos for g in self.geese for pos in g if len(g) > 0]
self.occupied = [p for p in self.geese_cells]
self.occupied.extend([p for p in observation.geese[self.my_index]])
self.heads = [g[0] for i, g in enumerate(observation.geese) if i != self.my_index and len(g) > 0]
self.bodies = [pos for i, g in enumerate(observation.geese) for pos in g[1:-1] if
i != self.my_index and len(g) > 2]
self.tails = [g[-1] for i, g in enumerate(observation.geese) if i != self.my_index and len(g) > 1]
self.food = [f for f in observation.food]
self.adjacent_to_heads = [pos for head in self.heads for pos in self._adjacent_positions(head)]
self.adjacent_to_bodies = [pos for body in self.bodies for pos in self._adjacent_positions(body)]
self.adjacent_to_tails = [pos for tail in self.tails for pos in self._adjacent_positions(tail)]
self.adjacent_to_geese = self.adjacent_to_heads + self.adjacent_to_bodies
self.danger_zone = self.adjacent_to_geese
# Cell occupation
self.cell_states = [CellState.EMPTY.value for _ in range(self.rows * self.columns)]
for g in self.geese:
for pos in g:
self.cell_states[pos] = CellState.ANY_GOOSE.value
for pos in self.heads:
self.cell_states[pos] = CellState.ANY_GOOSE.value
for pos in self.my_body:
self.cell_states[pos] = CellState.ANY_GOOSE.value
self.cell_states[self.my_tail] = CellState.ANY_GOOSE.value
# detect dead-ends
self.dead_ends = []
for pos_i, _ in enumerate(self.cell_states):
if self.cell_states[pos_i] != CellState.EMPTY.value:
continue
adjacent = self._adjacent_positions(pos_i)
adjacent_states = [self.cell_states[adj_pos] for adj_pos in adjacent if adj_pos != self.my_head]
num_blocked = sum(adjacent_states)
if num_blocked >= (CellState.ANY_GOOSE.value * 3):
self.dead_ends.append(pos_i)
# check for extended dead-ends
new_dead_ends = [pos for pos in self.dead_ends]
while new_dead_ends != []:
for pos in new_dead_ends:
self.cell_states[pos] = CellState.ANY_GOOSE.value
self.dead_ends.append(pos)
new_dead_ends = []
for pos_i, _ in enumerate(self.cell_states):
if self.cell_states[pos_i] != CellState.EMPTY.value:
continue
adjacent = self._adjacent_positions(pos_i)
adjacent_states = [self.cell_states[adj_pos] for adj_pos in adjacent if adj_pos != self.my_head]
num_blocked = sum(adjacent_states)
if num_blocked >= (CellState.ANY_GOOSE.value * 3):
new_dead_ends.append(pos_i)
def safe_position(self, future_position):
return (future_position not in self.occupied) and (future_position not in self.adjacent_to_heads) and (
future_position not in self.dead_ends)
def valid_position(self, future_position):
return (future_position not in self.occupied) and (future_position not in self.dead_ends)
def free_position(self, future_position):
return (future_position not in self.occupied)
# ***** END: utility functions ******
def process_env_obs(self, obs):
self._preprocess_env(obs)
EMPTY = .4
HEAD = -1
BODY = MY_BODY = -.8
TAIL = MY_TAIL = -.5
MY_HEAD = 0
FOOD = 1
RISK = -.5
# Example: {'remainingOverageTime': 12, 'step': 0, 'geese': [[62], [50]], 'food': [7, 71], 'index': 0}
# observation = [[CellState.EMPTY.value for _ in range(self.columns)] for _ in range(self.rows)]
observation = [[EMPTY for _ in range(self.columns)] for _ in range(self.rows)]
# Other agents
for pos in self.heads:
r, c = self._row_col(pos)
observation[r][c] = HEAD # CellState.HEAD.value
for pos in self.bodies:
r, c = self._row_col(pos)
observation[r][c] = BODY # CellState.BODY.value
for pos in self.tails:
r, c = self._row_col(pos)
observation[r][c] = TAIL # CellState.TAIL.value
# Me
r, c = self._row_col(self.my_head)
observation[r][c] = MY_HEAD # -1 #CellState.MY_HEAD.value
if self.my_head != self.my_tail:
r, c = self._row_col(self.my_tail)
observation[r][c] = MY_TAIL # CellState.MY_TAIL.value
for pos in self.my_body:
r, c = self._row_col(pos)
observation[r][c] = MY_BODY # CellState.MY_BODY.value
# Food
for pos in self.food:
r, c = self._row_col(pos)
observation[r][c] = FOOD # CellState.FOOD.value
if (self.previous_action != -1):
aux_previous_pos = self._translate(self.my_head, self.opposite(self.previous_action))
r, c = self._row_col(aux_previous_pos)
if observation[r][c] > 0:
observation[r][c] = MY_BODY * .5 # Marked to avoid opposite moves
# Add risk mark
for pos in self.adjacent_to_heads:
r, c = self._row_col(pos)
if observation[r][c] > 0:
observation[r][c] = RISK
# Add risk mark
for pos in self.dead_ends:
r, c = self._row_col(pos)
if observation[r][c] > 0:
observation[r][c] = RISK / 2
if self.center_head:
# NOTE: assumes odd number of rows and columns
head_row, head_col = self._row_col(self.my_head)
v_center = (self.columns // 2) # col 5 on 0-10 (11 columns)
v_roll = v_center - head_col
h_center = (self.rows // 2) # row 3 on 0-7 (7 rows)
h_roll = h_center - head_row
observation = np.roll(observation, v_roll, axis=1)
observation = np.roll(observation, h_roll, axis=0)
return np.array([observation])
class MyNN(nn.Module):
def __init__(self):
super(MyNN, self).__init__()
"""use names generated on adapted saved_dict
dict_keys(['layer0.weight', 'layer0.bias', 'layer2.weight', 'layer2.bias', ...])
net_arch as seen before:
(q_net): QNetwork(
(features_extractor): FlattenExtractor(
(flatten): Flatten(start_dim=1, end_dim=-1)
)
(q_net): Sequential(
(0): Linear(...)
(1): ReLU()
...
)
)
"""
self.even_layers = []
net_arch = [77] + [100, 100, 300, 100, 100, 100, 100, 100] + [4]
for inp, out in zip(net_arch[:-1], net_arch[1:]):
self.even_layers.append(nn.Linear(inp, out))
"""
#net_arch = [2000, 1000, 500, 1000, 500, 100]
self.layer0 = nn.Linear(77, 2000)
self.layer2 = nn.Linear(2000, 1000)
self.layer4 = nn.Linear(1000, 500)
self.layer6 = nn.Linear(500, 1000)
self.layer8 = nn.Linear(1000, 500)
self.layer10 = nn.Linear(500, 100)
self.layer12 = nn.Linear(100, 4)
"""
def forward(self, x):
x = nn.Flatten()(x) # no feature extractor means flatten (check policy arch on DQN creation)
for layer in self.even_layers[:-1]:
x = F.relu(layer(x))
x = self.even_layers[-1](x)
return x
"""
for layer in [self.layer0, self.layer2, self.layer4, self.layer6, self.layer8, self.layer10]:
x = F.relu(layer(x))
x = self.layer12(x)
"""
return x
def my_dqn(observation, configuration):
global model, obs_prep, last_action, last_observation, previous_observation
# tgz_agent_path = '/kaggle_simulations/agent/'
# normal_agent_path = '/kaggle/working'
tgz_agent_path = './'
normal_agent_path = './'
model_name = os.path.join('Data',"ppo_v_1oppponents4thTry")
num_previous_observations = 0
epsilon = 0
init = False
debug = False
try:
model
except NameError:
init = True
else:
if model == None:
init = True
initializing
if init:
# initializations
defaults = [configuration.rows,
configuration.columns,
configuration.hunger_rate,
configuration.min_food]
model = MyNN()
last_action = -1
last_observation = []
previous_observation = []
file_name = os.path.join(normal_agent_path, f'{model_name}.pt')
if not os.path.exists(file_name):
file_name = os.path.join(tgz_agent_path, f'{model_name}.pt')
model.load_state_dict(th.load(file_name), strict=False)
obs_prep = ObservationProcessor(configuration.rows, configuration.columns, configuration.hunger_rate,
configuration.min_food)
# maintaint list of last observations
if num_previous_observations > 0 and len(last_observation) > 0:
# Not initial step, t=0
previous_observation.append(last_observation)
# Keep list constrained to max length
if len(previous_observation) > num_previous_observations:
del previous_observation[0]
# Convert to grid encoded with CellState values
aux_observation = [obs_prep.process_env_obs(observation)]
last_observation = aux_observation
if num_previous_observations > 0 and len(previous_observation) == 0:
# Initial step, t=0
previous_observation = [last_observation for _ in range(num_previous_observations)]
if num_previous_observations > 0:
aux_observation = np.concatenate((*previous_observation, last_observation), axis=0)
else:
aux_observation = last_observation
# predict with aux_observation.shape = (last_observations x rows x cols)
tensor_obs = th.Tensor([aux_observation])
n_out = model(tensor_obs) # Example: tensor([[0.2742, 0.2653, 0.2301, 0.2303]], grad_fn=<SoftmaxBackward>)
# choose probabilistic next move based on prediction outputs
# with epsilon probability of fully random, always avoid opposite of last move
actions = [action.value for action in Action]
weights = list(n_out[0].detach().numpy())
if last_action != -1:
# Avoid dying by stupidity xD
remove_index = actions.index(obs_prep.opposite(Action(last_action)).value)
del actions[remove_index]
del weights[remove_index]
random = False
min_value = abs(min(weights))
weights = [min_value + w + 1e-5 for w in weights] # Total of weights must be greater than zero
# Reduce weight to penalize bad moves (collisions, etc...)
| |
line in sys.stdin])
# Then iterate over it:
for line in full_stdin.splitlines():
print(line)
# Long Hand:
import sys
L = []
for line in sys.stdin:
L += [line]
full_stdin = "".join(L)
def option_2__raw_input():
"""If We're given line count:"""
# Longhand:
L = []
for _ in range(int(input())):
L += [input()]
full_stdin = "\n".join(L)
# Shorthand:
full_stdin = "\n".join([input() for _ in range(int(input()))])
"""If not:"""
try:
while True:
print(input())
except EOFError:
pass
# Method to (probably) determine if your code runs on HackerRank
import os
if "USER" not in os.environ:
print("Running on HackerRank")
raise SystemExit # so no else needed for non-Hacker Rank code below. Allows you to easily copy & paste code between IDE/HackerRank.
# Repo with good solutions + explanations:
# https://github.com/clfm/HackerRank
def IterTools____Cartesian_Permutation_combination(): # TAG_Iterators
# 807293e9da914ce69ea32f34e5ffd1f7 (cheatsheet link)
def itertools_isslice():
# Iterate over a section of an array in a memory friendly way.
from itertools import islice
L = [1,2,3,4]
for i in islice(L, 1, 3): # itertools.isslice(Iter, [start], stop [, step])
print(i) # 2, 3
def itertools_chain():
# Iterate over connected iterables in a memory friendly way chain(*args)
from itertools import chain
L1 = [1,2,3]
L2 = [4,5,6]
L3 = [7,8,9]
for i in chain(L1, L2, L3):
print(i) # 1 ... 9
from itertools import combinations, permutations
def cartesian_product():
from itertools import product
list(product([1,2], ["a", "b"])) # [(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')] # https://www.hackerrank.com/challenges/itertools-product/problem
def premutations():
from itertools import permutations
# permutations(<ITER>, select_count)
list(permutations("HACK", 2)) # [('H', 'A'), ('H', 'C'), .... https://www.hackerrank.com/challenges/itertools-permutations/problem
def combinations():
from itertools import combinations
list(combinations("ABC", 2))
# [('A', 'B'), ('A', 'C'), ('B', 'C')]
def combinations_vs_permutations():
# Permutaiton order matters. (mnemonic: 1) pad lock is permutation lock. Lottery is permutation.
# Combination order doesn't matter.
from itertools import combinations, permutations
list(combinations("ABC", 2))
# [('A', 'B'), ('A', 'C'), ('B', 'C')]
list(permutations("ABC", 2)) # n!
# [('A', 'B'), ('B', 'A'), ... ('A', 'C'),('B', 'C'), ('C', 'A'), ('C', 'B')]
# https://betterexplained.com/articles/easy-permutations-and-combinations/
def combination_with_replacement():
# Combinations with Replacement: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem
from itertools import combinations_with_replacement
list(combinations_with_replacement([1, 2], 2))
#[(1, 1), (1, 2), (2, 2)]
list(combinations([1, 2], 2))
#[(1, 2)]
from itertools import groupby
# GroupBy # https://www.hackerrank.com/challenges/compress-the-string/problem
for i in groupby("99995599"):
print((int(i[0]), len(list(i[1]))))
#(9, 4) 9 occured 4 times...
#(5, 2)
#(9, 2)
# Useful iterators to reduce in loops:
# https://docs.python.org/2/library/itertools.html#itertools.imap
def Misc__things_learned():
def none_comparison_via_is():
obj = None
if obj == None:
print("Incorrect")
if obj is None: #is not
print("Correct")
# is -> Object Identity
# == Equality between two objects #Height of two friends.
# 'is' is a bit faster since '==' does a dictionary lookup.
# Ref: http://jaredgrubb.blogspot.com/2009/04/python-is-none-vs-none.html
# E.g Problem: https://www.hackerrank.com/challenges/find-the-merge-point-of-two-joined-linked-lists/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=linked-lists
def gotcha_default_args_evaluated_only_once():
# https://docs.python-guide.org/writing/gotchas/
# ex:
def caching_square_func(i, cached=dict()):
if i in cached:
print(i, "already cached.")
return cached[i]
else:
print(i, "will be cached")
isqr = i * i
cached[i] = isqr
return isqr
# for i in [2,3,2,3]:
# caching_square_func(i)
def gotcha_late_binding_closure():
i = 5
f = lambda x: x + i # i is 'read/evaluated' at function call time rather than define time.
i = 10
f(1)
#11
# Hack/solution. Bind arg at def time:
j = 5
g = lambda x, j=j: x + j # << j=j
j = 10
g(1)
# 6
# More advanced: https://docs.python-guide.org/writing/gotchas/
def evaluated_first_assigned_after():
a = 1
b = 2
c = 3
a, b, c = b, c, 4
a, b, c
#(2, 3, 4)
def benchmarking_and_timing_and_memory_profiling():
#### Manual:
from time import time
t0 = time()
# < do something.
print(('%10s %5.3f seconds' % ("test", time() - t0))) # elapsed
#### Via function
import timeit
def func():
max([1, 2, 3]) # code to benchmark
timeit.timeit(func, number=10000) # test it 10,000 times. Default number is 1000000
# from memory_profiler import profile # add '@profile' to method and run.
# cd ~/git/study/python2 && mprof run problemPy2.py && mprof plot
def recursion_limited_to_991():
# Python's recursion is limited to under 1000 items. Thus use data structures instead.
def goDeep(x):
print(x)
goDeep(x + 1)
goDeep(1)
#989
#990
#991
#..
# RuntimeError: maximum recursion depth exceeded
class COLLECTIONS():
def Counter(self):
# Count items in an iterable.
# Sorted by count & order encountered.
from collections import Counter #note capital C
c = Counter(['a', 'a', 'a', 'b', 'a', 'b', 'c']) # -> Counter({'a': 4, 'b': 2, 'c': 1}) #key, value
list(c.items()) # [('a', 4), ('c', 1), ('b', 2)]
list(c) # get a list of unique keys. ['a,'b','c']
list(c.keys()) # if X in c.keys()
list(c.values()) # less useful.
c.update([9,8]) # add keys & count them to counter.
c.most_common()[:1] # get X number of most common keys. O(n log k) [:1] = most common. [-1:] least common.
# complexity ref: https://stackoverflow.com/questions/29240807/python-collections-counter-most-common-complexity/29240949
c.clear()
c[1] -= 1 # dercement index 1 by 1.
c[1] # print value of 1.
del c[1]
# prob (easy): https://www.hackerrank.com/challenges/collections-counter/problem
# prob (medium): https://www.hackerrank.com/challenges/most-commons/problem
# ref: https://docs.python.org/2/library/collections.html#collections.Counter
def defaultdic(self):
# TAG_optional
# Like dictionary, except provides default value if item not in list.
from collections import defaultdict
# d = defaultdict(CALLABLE) Ex: lambda:-1, int, list, set, # int -> 0
# E.g 1:
seen = defaultdict(lambda: 0)
seen['yellow'] += 1
# E.g 2:
d = defaultdict(list)
d['key'].append(1)
d['key'].append(2)
d['leo'].append(42)
list(d.items())
#[('key', [1, 2]), ('leo', [42])]
# Problem: https://www.hackerrank.com/challenges/defaultdict-tutorial/problem
# Ref: https://docs.python.org/2/library/collections.html#collections.Counter
def namedtuple(self):
# Useful for storing named values.
from collections import namedtuple
Point = namedtuple('Point', ['x', 'y'])
p = Point(1, y=3) # pass args via args or kwargs
print(p.x, p.y) # 1 # 3
Person = namedtuple("Person", 'name,age') # comma separated.
leo = Person("leo", 31)
# Person(name='leo', age=31)
Car = namedtuple("Car", "model age mph") # space separated.
# ..
# Problem: https://www.hackerrank.com/challenges/py-collections-namedtuple/problem
def OrderedDic(self):
# Ordered Dictionary:
from collections import OrderedDict
od = OrderedDict()
od[2] = 'a'
od[1] = 'b'
od[3] = 'c'
# -> OrderedDict([(2, 'a'), (1, 'b'), (3, 'c')])
list(od) # [(2, 'a'), (1, 'b'), (3, 'c')]
od.move_to_end(1,last=False) # last=False moves to front.
next(iter(od.items())) # first item.
od.popitem(last=False) # pop first/last item.
def OrderedCounter_custom(self):
# Combination of Counter and OrderedDict
# Properties:d
# - Provides count of each key.
# - remembers order in which keys were added.
# - application: consider pre-sorting input.
from collections import Counter, OrderedDict
class OrderedCounter(Counter, OrderedDict):
pass
o = OrderedCounter(sorted("zzaabbbiiiiiiiiiiff"))
# OrderedCounter({'i': 10, 'b': 3, 'a': 2, 'f': 2, 'z': 2})
o.most_common()[:3]
# [('i', 10), ('b', 3), ('a', 2)]
# Prob: https://www.hackerrank.com/challenges/most-commons/problem
# ref: https://www.hackerrank.com/challenges/most-commons/forum/comments/220882
# ref2: https://codefisher.org/catch/blog/2015/06/16/how-create-ordered-counter-class-python/
def dequeue(self):
# Double sided queue. Efficient left/right append/poping.
from collections import deque
d = deque([1,2,3])
d.append(4)
d.appendleft(0)
d.pop()
d.popleft()
len(d) # ..
# Prob (easy): https://www.hackerrank.com/challenges/py-collections-deque/problem
# Prob (medium): https://www.hackerrank.com/challenges/piling-up/problem
############## RAW
class Algorithms_Sort:
def BubbleSort(self):
# Leo's Attempt: (If in doubt, can re-implement)
# Mnemonic: Keep moving items till no swap occured anymore.
# 3 4 2 1
# 3 2 4 1
# 2 3 4 1
# 2 3 1 4
# 2 1 3 4
# 1 2 3 4
def leo_bubble_sort(l):
swap_performed = True
while swap_performed:
swap_performed = False
for i in range(len(l) - 1):
if l[i] > l[i + 1]:
l[i], l[i + 1] = l[i + 1], l[i]
swap_performed = True
return l
# Better approach
# Mnemonic:
# Bubble sort "puts" elements gradually into their place.
# After every loop, last element(s) are their place, no need to revisit them.
# 3 4 2 1
# 3 2 4 1 # Loop 1. Last item is in it's place.
# 3 2 1 4
# 3 1 2 4 # Loop 2. Last 2 item in it's place.
# 1 3 2 4 # Loop 3. Last 3 items in it's place.
# 1 2 3 4
def bubble_sort(l):
for i in range(len(l) - 1):
for j in range(len(l) - 1 - i):
if l[j] > l[j + 1]:
l[j], l[j + 1] = l[j + 1], l[j]
return l
# Ref tutorial: https://www.youtube.com/watch?v=YHm_4bVOe1s
# Ref code: | |
import argparse
import sys
import os
import fabric
import random
import json
import io
import yaml
import shutil
import subprocess
from github import Github
import validators
from invoke import UnexpectedExit
from configparser import ConfigParser
LOC_INSTALLER = {
'python3.6': {
'installer': 'pip',
'filename': 'requirements.txt',
'packages': 'ipython pip'
},
'nodejs': {
'installer': 'npm',
'filename': 'package.json',
'packages': 'ipython pip'
}
}
# from sqlalchemy.orm import sessionmaker
# from sqlalchemy import create_engine
# export LOC_CONTAINERS=/Users/$(whoami)/Documents/lmokto/containers
# LOC_CONTAINERS = os.environ.get('LOC_CONTAINERS')
# export LOC_REPOSITORIES=/Users/$(whoami)/Documents/lmokto/repositories
# LOC_REPOSITORIES = os.environ.get('LOC_REPOSITORIES')
# export LOC_PROFILES=/Users/$(whoami)/Documents/lmokto/profiles
# LOC_PROFILES = os.environ.get('LOC_PROFILES')
# export LOC_ENVIRONMENTS=/Users/$(whoami)/Documents/lmokto/environments
# LOC_CONTAINERS = os.environ.get('LOC_ENVIRONMENTS')
# export LOC_CONDA=/Users/$(whoami)/opt/anaconda3/bin/conda
# LOC_CONDA = os.environ.get('LOC_CONDA')
# export LOC_SETTINGS=<PATH>/settings.ini
# LOC_SETTINGS = os.environ.get('LOC_SETTINGS')
# https://ipython.readthedocs.io/en/stable/interactive/magics.html
# https://jupyter-client.readthedocs.io/en/stable/wrapperkernels.html
# https://virtualenvwrapper.readthedocs.io/en/latest/install.html#basic-installation
# https://github.com/yunabe/tslab
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Box(object):
attrs = [
'name', 'profile', 'environment', 'repository',
'location', 'version', 'language'
]
_id = random.randint(1000, 9999)
def __init__(self, instance={}):
super().__init__()
if instance:
self.assignment(instance)
else:
[setattr(self, k, '') for k in self.attrs if k]
def retrieve(self):
return {
'sandbox': self.__dict__,
'_id': self._id
}
def update(self, response):
self.environment = response['environment']
self.profile = response['profile']
self.repository = response['repository']
self.location = response['location']
self.version = "1.0.0"
def assignment(self, instance):
sandbox = instance.get('sandbox', None)
self._id = instance.get('_id', 0)
if sandbox and self._id:
for k in self.attrs:
assert k in sandbox.keys()
box_attr = sandbox.get(k)
setattr(self, k, box_attr)
return True
raise ValueError('instance without id and sandbox')
class Sandboxes(Box):
pass
class ManagerContext(object):
def __init__(self, env):
super().__init__()
self.settings = self.get_settings(env)
self.boxes = []
def registry(self, box):
ids = [_b['_id'] for _b in self.boxes if _b]
if box['_id'] not in ids:
self.boxes.append(box)
return {'status': 'successful'}
return {'status': 'failed'}
def retrieve_sandboxes(self):
retrieves = {
'containers': []
}
loc_boxes = os.path.join(
self.settings.contexts.location,
self.settings.containers.boxes
)
boxes = os.listdir(loc_boxes)
for box in boxes:
name = box.replace('.json', '')
sandbox = self.import_sandbox(name)
retrieves['containers'].append(sandbox)
return retrieves
def import_sandbox(self, name):
try:
loc_boxes = os.path.join(
self.settings.contexts.location,
self.settings.containers.boxes
)
boxes = os.listdir(loc_boxes)
box_name = name + '.json'
if box_name in boxes:
loc_sandbox = os.path.join(
loc_boxes,
box_name
)
sandbox = json.load(open(loc_sandbox, 'r'))
return sandbox
return {'status': 'failed'}
except Exception as Error:
raise Error
def export_sandbox(self, box):
try:
box = self.get_sandbox('_id', box.get('_id'))
loc_file = os.path.join(
os.path.join(
self.settings.contexts.location,
self.settings.containers.boxes
),
'{filename}.json'.format(
filename=box['sandbox']['name']
)
)
with io.open(loc_file, 'w', encoding='utf8') as out:
str_ = json.dumps(
box, indent=4, sort_keys=True,
separators=(',', ': '), ensure_ascii=False
)
out.write(str(str_))
return {'status': 'successful'}
except Exception as Error:
return {'status': Error}
def get_sandbox(self, attr, value):
for box in self.boxes:
sandbox = box['sandbox']
if attr == '_id' and box['_id'] == value:
return box
elif sandbox[attr] == value:
return box
return {}
def get_settings(self, env):
try:
settings = ConfigParser(dict_type=AttrDict)
settings.read(env)
if settings._sections:
return settings._sections
raise TypeError
except TypeError:
raise ValueError('settings was not found')
def get_language(lang):
"""
:param lang:
return {}
"""
version = ''.join([s for s in lang if s.isnumeric() or s == '.'])
language = ''.join([s for s in lang if s.isalpha()])
return {
'name': language,
'version': version
}
def generate_yml(name, language, packages=[], install={}):
# https://github.com/conda/conda/blob/54e4a91d0da4d659a67e3097040764d3a2f6aa16/tests/conda_env/support/advanced-pip/environment.yml
try:
_lang = get_language(language)
language = '{0}={1}'.format(_lang['name'], _lang['version'])
installer = install.get('installer')
requirements = os.path.join(
install.get('dirname'),
install.get('filename')
)
if os.path.isfile(requirements):
file_install = '-r file:{filename}'.format(filename=requirements)
else:
file_install = 'ipython'
dependencies = [language, installer, {installer: [file_install]}]
dependencies.append(packages) if packages else None
filename = os.path.join(
LOC_ENVIRONMENTS,
'{env}.yml'.format(env=name)
)
if not os.path.isfile(filename):
with open(filename, 'w') as outfile:
yaml.dump({
'name': name,
'dependencies': dependencies
}, outfile, default_flow_style=False)
return filename
except Exception as Error:
raise Error
def conda_sync(repository, args):
"""
:param env:
:param pyversion:
:return: response
"""
try:
environment = repository['output']['repository']
installer = {
'dirname': repository['output']['export'],
'filename': LOC_INSTALLER[args.language]['filename'],
'installer': LOC_INSTALLER[args.language]['installer']
}
filename = generate_yml(
environment, args.language, args.packages, installer
)
output = terminal.local(
'{conda} env create -f {environment}'.format(
conda=LOC_CONDA, # location of context
environment=filename
)
)
response = generate_response(output, {
'environment': args.environment,
'export': LOC_ENVIRONMENTS,
'filename': filename
})
return response
except UnexpectedExit as Error:
raise Error
def conda_remove(name):
try:
output = terminal.local(
'{conda} remove --name {environment} --all --yes'.format(
conda=LOC_CONDA, # location of context
environment=name
)
)
response = generate_response(output, {'environment': name})
return response
except UnexpectedExit as Error:
raise Error
def conda_build(name, language, packages):
try:
language = get_language(language)
output = terminal.local(
'{conda} create -yn {env} {language}={version} {packages} --no-default-packages'.format(
conda=LOC_CONDA, # location of context
env=name, # name of environment
language=language['name'], # name of language
version=language['version'], # version of language
packages=packages # packages to install by default
)
)
response = generate_response(output, {
'environment': name,
'language': language,
'packages': packages
})
return response
except UnexpectedExit as Error:
raise Error
def conda_export(name):
"""
:param envname:
:return: response
"""
try:
loc_export = os.path.join(
manager.settings.contexts.location,
LOC_ENVIRONMENTS
)
filename = '{loc}/{env}.yml'.format(
env=name, # name of environment
loc=loc_export # destiny for export the environment.yml
)
output = terminal.local(
'{conda} env export -n {env} | grep -v "^prefix: " > {filename}'.format(
conda=LOC_CONDA, # path absolute the conda
env=name,
filename=filename # export filename
)
)
response = generate_response(output, {
'environment': name,
'export': loc_export,
'filename': filename
})
return response
except UnexpectedExit as Error:
raise Error
def profile_build(name):
"""
:param envname:
:return: response
"""
try:
loc_export = os.path.join(
manager.settings.contexts.location,
manager.settings.profiles.location
)
iprofile = '[{name}]'.format(name=name)
output = terminal.local(
'{conda} run -n {env} ipython profile create {profile} --ipython-dir {export}'.format(
conda=LOC_CONDA, # path absolute the conda
env=name, # name of environment
profile=iprofile, # name of profile
export=loc_export # location for save profile
)
)
response = generate_response(output, {
'env': name,
'profile': iprofile,
'export': loc_export
})
return response
except UnexpectedExit as Error:
raise Error
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def start_repository(folder, start_git):
"""
:param folder:
:return: response
"""
try:
loc_folder = os.path.join(
os.path.join(
manager.settings.contexts.location,
manager.settings.repositories.location
),
folder
)
mkdir_resp = mkdir_folder(loc_folder)
if start_git:
output = terminal.local('cd {folder} && git init'.format(
folder=loc_folder
))
else:
output = {'command': '', 'ok': True}
response = generate_response(output, {
'mkdir': mkdir_resp,
'export': loc_folder
})
return response
except UnexpectedExit as Error:
raise Error
def git_create(name):
try:
git = Github(manager.settings.repositories.token)
user = git.get_user()
repo = user.create_repo(name)
response = git_clone(repo.clone_url)
response['output']['url'] = repo.clone_url
return response
except Exception as Error:
return False
def git_clone(repository):
"""
:param repository:
:return: response
"""
try:
location = repository
repository = repository.split('/')[-1].replace('.git', '')
folder_path = os.path.join(
LOC_REPOSITORIES,
repository
)
if not os.path.isdir(folder_path):
output = terminal.local(
'git clone {url} {location}'.format(
url=location,
location=folder_path
)
)
response = generate_response(output, {
'url': repository,
'repository': repository,
'export': folder_path
})
return response
return 'Existing repository'
except UnexpectedExit as Error:
raise Error
def mkdir_folder(name):
"""
:param name:
:return: response
"""
try:
if not os.path.isdir(name):
output = terminal.local('mkdir {folder}'.format(folder=name))
response = generate_response(output, {
'export': name
})
return response
return 'Existing folder'
except UnexpectedExit as Error:
raise Error
def generate_response(response, output):
"""
:param response:
:return dictionary
"""
status, command = None, None
if isinstance(response, dict):
status = response.get('ok', None)
response.get('command', None)
elif isinstance(response, object):
status = getattr(response, 'ok', None)
command = getattr(response, 'command', None)
return {
'status': 'successful' if status else 'failed',
'command': command,
'output': output
}
def clean_response(response):
"""
:param response:
:return {}
"""
location = response['repository']['output']['export']
repository = response['repository']['output']['url']
environment = os.path.join(
response['environment']['output']['export'],
response['environment']['output']['environment']
)
profile = os.path.join(
response['profile']['output']['export'],
'profile_{0}'.format(response['profile']['output']['profile'])
)
response = {
'location': location,
'environment': environment + '.yml',
'profile': profile,
'repository': repository
}
loc_contexts = manager.settings.contexts.location
for k, v in response.items():
response[k] = v.replace(loc_contexts + '/', '')
return response
def languages(astring):
"""
@type astring: object
"""
_language = get_language(astring)
if not _language['name'] in ['python', 'nodejs']:
raise argparse.ArgumentTypeError('Language cannot soported')
return astring
class Args:
pass
def create_box(args, session=None):
try:
sandbox = Box()
sandbox.name = args.name
sandbox.language = args.language
# 1. crear carpeta y repositorio inicial (git init <nombre repositorio>)
starter = git_create(sandbox.name)
# 2. crear environment con conda y exporta yml environments/conda (OK)
build_env = conda_build(sandbox.name, sandbox.language, args.packages)
export_env = conda_export(sandbox.name)
# 3. crear profile en ipython en carpeta correspondiente (OK)
build_profile = profile_build(sandbox.name)
# 4. crear archivo <box>.json y guardarlo en carpeta containers/boxes (OK)
response = clean_response({
'repository': starter, 'environment': export_env, 'profile': build_profile
})
sandbox.update(response)
# 5. Export metadata to json file in boxes folder
box = sandbox.retrieve()
response = manager.registry(box)
manager.export_sandbox(box)
except Exception as Error:
raise (Error)
def remove_box(args, session=None):
try:
# search sandbox by name
name = args.name
box = manager.import_sandbox(name)
# build a box
sandbox = Box()
# | |
handling.
rpc = self._transport._wrapped_methods[
self._transport.update_certificate_map_entry
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("certificate_map_entry.name", request.certificate_map_entry.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
certificate_manager.CertificateMapEntry,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def delete_certificate_map_entry(
self,
request: Union[
certificate_manager.DeleteCertificateMapEntryRequest, dict
] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single CertificateMapEntry.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_delete_certificate_map_entry():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.DeleteCertificateMapEntryRequest(
name="name_value",
)
# Make the request
operation = client.delete_certificate_map_entry(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.DeleteCertificateMapEntryRequest, dict]):
The request object. Request for the
`DeleteCertificateMapEntry` method.
name (str):
Required. A name of the certificate map entry to delete.
Must be in the format
``projects/*/locations/*/certificateMaps/*/certificateMapEntries/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.DeleteCertificateMapEntryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, certificate_manager.DeleteCertificateMapEntryRequest
):
request = certificate_manager.DeleteCertificateMapEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_certificate_map_entry
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def list_dns_authorizations(
self,
request: Union[certificate_manager.ListDnsAuthorizationsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDnsAuthorizationsPager:
r"""Lists DnsAuthorizations in a given project and
location.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_list_dns_authorizations():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.ListDnsAuthorizationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_dns_authorizations(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.ListDnsAuthorizationsRequest, dict]):
The request object. Request for the
`ListDnsAuthorizations` method.
parent (str):
Required. The project and location from which the dns
authorizations should be listed, specified in the format
``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.certificate_manager_v1.services.certificate_manager.pagers.ListDnsAuthorizationsPager:
Response for the ListDnsAuthorizations method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.ListDnsAuthorizationsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.ListDnsAuthorizationsRequest):
request = certificate_manager.ListDnsAuthorizationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_dns_authorizations]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDnsAuthorizationsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_dns_authorization(
self,
request: Union[certificate_manager.GetDnsAuthorizationRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> certificate_manager.DnsAuthorization:
r"""Gets details of a single DnsAuthorization.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_get_dns_authorization():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.GetDnsAuthorizationRequest(
name="name_value",
)
# Make the request
response = client.get_dns_authorization(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.GetDnsAuthorizationRequest, dict]):
The request object. Request for the
`GetDnsAuthorization` method.
name (str):
Required. A name of the dns authorization to describe.
Must be in the format
``projects/*/locations/*/dnsAuthorizations/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.certificate_manager_v1.types.DnsAuthorization:
A DnsAuthorization resource describes
a way to perform domain authorization
for certificate issuance.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.GetDnsAuthorizationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.GetDnsAuthorizationRequest):
request = certificate_manager.GetDnsAuthorizationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_dns_authorization]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_dns_authorization(
self,
request: Union[certificate_manager.CreateDnsAuthorizationRequest, dict] = None,
*,
parent: str = None,
dns_authorization: certificate_manager.DnsAuthorization = None,
dns_authorization_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, | |
raise ValueError(
"Found kernel containing only non-finite values.\
Please increase kernel size")
median_corrected_arr = np.median(
kernel_cropped_arr[np.isfinite(kernel_cropped_arr)])
# Replacing bad data with finite median
projection[x_idx, y_idx] = median_corrected_arr
callback(arr.shape[0], 'Nonfinite median filter', ' prjs')
return arr
def sobel_filter(arr, axis=0, ncore=None):
"""
Apply Sobel filter to 3D array along specified axis.
Parameters
----------
arr : ndarray
Input array.
axis : int, optional
Axis along which sobel filtering is performed.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
3D array of same shape as input.
"""
arr = dtype.as_float32(arr)
out = np.empty_like(arr)
if ncore is None:
ncore = mproc.mp.cpu_count()
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(arr.shape[axis]):
slc[axis] = i
e.submit(filters.sobel, arr[slc], output=out[slc])
return out
def remove_nan(arr, val=0., ncore=None):
"""
Replace NaN values in array with a given value.
Parameters
----------
arr : ndarray
Input array.
val : float, optional
Values to be replaced with NaN values in array.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
Corrected array.
"""
arr = dtype.as_float32(arr)
val = np.float32(val)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(arr!=arr, val, arr)', out=arr)
return arr
def remove_neg(arr, val=0., ncore=None):
"""
Replace negative values in array with a given value.
Parameters
----------
arr : ndarray
Input array.
val : float, optional
Values to be replaced with negative values in array.
ncore : int, optional
Number of cores that will be assigned to jobs.
Returns
-------
ndarray
Corrected array.
"""
arr = dtype.as_float32(arr)
val = np.float32(val)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(arr<0, val, arr)', out=arr)
return arr
def remove_outlier(arr, dif, size=3, axis=0, ncore=None, out=None):
"""
Remove high intensity bright spots from a N-dimensional array by chunking
along the specified dimension, and performing (N-1)-dimensional median
filtering along the other dimensions.
Parameters
----------
arr : ndarray
Input array.
dif : float
Expected difference value between outlier value and
the median value of the array.
size : int
Size of the median filter.
axis : int, optional
Axis along which to chunk.
ncore : int, optional
Number of cores that will be assigned to jobs.
out : ndarray, optional
Output array for result. If same as arr, process
will be done in-place.
Returns
-------
ndarray
Corrected array.
"""
tmp = np.empty_like(arr)
ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[axis], ncore=ncore)
filt_size = [size] * arr.ndim
filt_size[axis] = 1
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(ncore):
slc[axis] = chnk_slices[i]
e.submit(filters.median_filter,
arr[tuple(slc)],
size=filt_size,
output=tmp[tuple(slc)])
arr = dtype.as_float32(arr)
tmp = dtype.as_float32(tmp)
dif = np.float32(dif)
with mproc.set_numexpr_threads(ncore):
out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)
return out
def remove_outlier1d(arr, dif, size=3, axis=0, ncore=None, out=None):
"""
Remove high intensity bright spots from an array, using a one-dimensional
median filter along the specified axis.
Parameters
----------
arr : ndarray
Input array.
dif : float
Expected difference value between outlier value and
the median value of the array.
size : int
Size of the median filter.
axis : int, optional
Axis along which median filtering is performed.
ncore : int, optional
Number of cores that will be assigned to jobs.
out : ndarray, optional
Output array for result. If same as arr, process
will be done in-place.
Returns
-------
ndarray
Corrected array.
"""
arr = dtype.as_float32(arr)
dif = np.float32(dif)
tmp = np.empty_like(arr)
other_axes = [i for i in range(arr.ndim) if i != axis]
largest = np.argmax([arr.shape[i] for i in other_axes])
lar_axis = other_axes[largest]
ncore, chnk_slices = mproc.get_ncore_slices(arr.shape[lar_axis],
ncore=ncore)
filt_size = [1] * arr.ndim
filt_size[axis] = size
with cf.ThreadPoolExecutor(ncore) as e:
slc = [slice(None)] * arr.ndim
for i in range(ncore):
slc[lar_axis] = chnk_slices[i]
e.submit(filters.median_filter,
arr[slc],
size=filt_size,
output=tmp[slc],
mode='mirror')
with mproc.set_numexpr_threads(ncore):
out = ne.evaluate('where(arr-tmp>=dif,tmp,arr)', out=out)
return out
def remove_outlier_cuda(arr, dif, size=3, axis=0):
"""
Remove high intensity bright spots from a 3D array along axis 0
dimension using GPU.
Parameters
----------
arr : ndarray
Input array.
dif : float
Expected difference value between outlier value and
the median value of the array.
size : int
Size of the median filter.
axis : int, optional
Axis along which outlier removal is performed.
Returns
-------
ndarray
Corrected array.
Example
-------
>>> import tomocuda
>>> tomocuda.remove_outlier_cuda(arr, dif, 5)
For more information regarding install and using tomocuda, check
https://github.com/kyuepublic/tomocuda for more information
"""
arr = dtype.as_float32(arr)
dif = np.float32(dif)
try:
import tomocuda
winAllow = range(2, 16)
if (axis != 0):
arr = np.swapaxes(arr, 0, axis)
if size in winAllow:
prjsize = arr.shape[0]
loffset = int(size / 2)
roffset = int((size - 1) / 2)
imsizex = arr.shape[2]
imsizey = arr.shape[1]
filter = tomocuda.mFilter(imsizex, imsizey, prjsize, size)
out = np.zeros(shape=(prjsize, imsizey, imsizex), dtype=np.float32)
for step in range(prjsize):
im_noisecu = arr[step].astype(np.float32)
im_noisecu = np.lib.pad(im_noisecu, ((loffset, roffset),
(loffset, roffset)),
'symmetric')
im_noisecu = im_noisecu.flatten()
filter.setCuImage(im_noisecu)
filter.run2DRemoveOutliner(size, dif)
results = filter.retreive()
results = results.reshape(imsizey, imsizex)
out[step] = results
if (axis != 0):
out = np.swapaxes(out, 0, axis)
else:
warnings.warn("Window size not support, using cpu outlier removal")
out = remove_outlier(arr, dif, size)
except ImportError:
warnings.warn("The tomocuda is not support, using cpu outlier removal")
out = remove_outlier(arr, dif, size)
return out
def remove_ring(rec,
center_x=None,
center_y=None,
thresh=300.0,
thresh_max=300.0,
thresh_min=-100.0,
theta_min=30,
rwidth=30,
int_mode='WRAP',
ncore=None,
nchunk=None,
out=None):
"""
Remove ring artifacts from images in the reconstructed domain.
Descriptions of parameters need to be more clear for sure.
Parameters
----------
arr : ndarray
Array of reconstruction data
center_x : float, optional
abscissa location of center of rotation
center_y : float, optional
ordinate location of center of rotation
thresh : float, optional
maximum value of an offset due to a ring artifact
thresh_max : float, optional
max value for portion of image to filter
thresh_min : float, optional
min value for portion of image to filer
theta_min : int, optional
Features larger than twice this angle (degrees) will be considered
a ring artifact. Must be less than 180 degrees.
rwidth : int, optional
Maximum width of the rings to be filtered in pixels
int_mode : str, optional
'WRAP' for wrapping at 0 and 360 degrees, 'REFLECT' for reflective
boundaries at 0 and 180 degrees.
ncore : int, optional
Number of cores that will be assigned to jobs.
nchunk : int, optional
Chunk size for each core.
out : ndarray, optional
Output array for result. If same as arr, process
will be done in-place.
Returns
-------
ndarray
Corrected reconstruction data
"""
rec = dtype.as_float32(rec)
if out is None:
out = rec.copy()
else:
out = dtype.as_float32(out)
dz, dy, dx = rec.shape
if center_x is None:
center_x = (dx - 1.0) / 2.0
if center_y is None:
center_y = (dy - 1.0) / 2.0
if int_mode.lower() == 'wrap':
int_mode = 0
elif int_mode.lower() == 'reflect':
int_mode = 1
else:
raise ValueError("int_mode should be WRAP or REFLECT")
if not 0 <= theta_min < 180:
raise ValueError("theta_min should be in the range [0 - 180)")
args = (center_x, center_y, dx, dy, dz, thresh_max, thresh_min, thresh,
theta_min, rwidth, int_mode)
axis_size = rec.shape[0]
ncore, nchunk = mproc.get_ncore_nchunk(axis_size, ncore, nchunk)
with cf.ThreadPoolExecutor(ncore) as e:
for offset in range(0, axis_size, nchunk):
slc = np.s_[offset:offset + nchunk]
e.submit(extern.c_remove_ring, out[slc], *args)
return out
def circ_mask(arr, axis, ratio=1, val=0., ncore=None):
"""
Apply circular mask to a 3D array.
Parameters
----------
arr : ndarray
Arbitrary 3D array.
axis : int
Axis along which mask will be performed.
ratio : int, optional
Ratio of the mask's diameter in pixels to
the smallest edge size along given axis.
val : int, optional
Value for the masked region.
Returns
-------
ndarray
Masked array.
"""
arr = dtype.as_float32(arr)
val = np.float32(val)
_arr = arr.swapaxes(0, axis)
dx, dy, dz = _arr.shape
mask = _get_mask(dy, dz, ratio)
with mproc.set_numexpr_threads(ncore):
ne.evaluate('where(mask, _arr, val)', out=_arr)
return _arr.swapaxes(0, axis)
def _get_mask(dx, dy, ratio):
"""
Calculate 2D boolean circular mask.
Parameters
----------
dx, dy : int
Dimensions of the 2D mask.
ratio : int
Ratio of the circle's diameter in pixels to
the smallest mask dimension.
Returns
-------
ndarray
2D boolean array.
"""
rad1 = dx / 2.
rad2 = dy | |
model which just
shifts the Lorentzian peak with a simple cosine by 2 MHz.
'''
if MC is None:
MC = self.instr_MC.get_instr()
s1 = swf.None_Sweep(name='Heterodyne Frequency',
parameter_name='Frequency',
unit='Hz')
s2 = swf.None_Sweep(name='Flux', parameter_name=fluxChan,
unit='A')
fluxcurrent = self.instr_FluxCtrl.get_instr()
power = 20*np.log10(self.ro_pulse_amp_CW())
mocked_values = []
try:
device = self.instr_device.get_instr()
qubit_names = device.qubits()
# Create linecuts:
for dac_value in dac_values:
fluxcurrent[fluxChan](dac_value)
h = 10**(power/20)*10e-3
new_values = h
for name in qubit_names:
if name != 'fakequbit':
qubit = device.find_instrument(name)
f0, response = qubit.calculate_mock_resonator_response(
power, freqs)
new_values += response
mocked_values = np.concatenate([mocked_values, new_values])
except AttributeError:
logging.warning('No device found! Using this mock only for for '
'resonator frequencies')
for dac_value in dac_values:
fluxcurrent[fluxChan](dac_value)
h = 10**(power/20)*10e-3
new_values = h + self.calculate_mock_resonator_response(power,
freqs)
mocked_values = np.concatenate([mocked_values, new_values])
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()),
np.size(mocked_values))
mocked_values = np.abs(mocked_values)
d = det.Mock_Detector(value_names=['Magnitude'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s1)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points(freqs)
MC.set_sweep_points_2D(dac_values)
MC.set_detector_function(d)
MC.run('Resonator_dac_scan'+self.msmt_suffix+label, mode='2D')
if analyze:
ma.TwoD_Analysis(label='Resonator_dac_scan', close_fig=close_fig,
normalize=False)
import pycqed.analysis_v2.dac_scan_analysis as dsa
dsa.Susceptibility_to_Flux_Bias(label='Resonator_dac_scan')
def measure_rabi(self, MC=None, amps=None,
analyze=True, close_fig=True, real_imag=True,
prepare_for_timedomain=True, all_modules=False):
"""
Measurement is the same with and without vsm; therefore there is only
one measurement method rather than two. In
calibrate_mw_pulse_amp_coarse, the required parameter is updated.
"""
if MC is None:
MC = self.instr_MC.get_instr()
if amps is None:
amps = np.linspace(0.1, 1, 31)
s = swf.None_Sweep(name='Channel Amplitude',
parameter_name='mw channel amp',
unit='a.u.')
f_rabi = 1/self.mock_mw_amp180()
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
signal_amp = (high_lvl - low_lvl)/2
offset = (high_lvl + low_lvl)/2
mocked_values = offset + signal_amp*np.cos(np.pi*f_rabi*amps)
mocked_values = self.values_to_IQ(mocked_values) # This adds noise too
d = det.Mock_Detector(value_names=['raw w0', 'raw w1'],
value_units=['V', 'V'], detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(amps)
MC.set_detector_function(d)
MC.run('mock_rabi'+self.msmt_suffix)
a = ma.Rabi_Analysis(label='rabi_', close_fig=close_fig)
return a.rabi_amplitudes['piPulse']
def measure_ramsey(self, times=None, MC=None,
artificial_detuning: float = None,
freq_qubit: float = None, label: str = '',
prepare_for_timedomain=True, analyze=True,
close_fig=True, update=True, detector=False,
double_fit=False):
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
stepsize = (self.T2_star()*4/61)//(abs(self.cfg_cycle_time())) \
* abs(self.cfg_cycle_time())
times = np.arange(0, self.T2_star()*4, stepsize)
if artificial_detuning is None:
artificial_detuning = 3/times[-1]
# Calibration points:
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
# if prepare_for_timedomain:
# self.prepare_for_timedomain()
if freq_qubit is None:
freq_qubit = self.freq_qubit()
self.instr_LO_mw.get_instr().set('frequency', freq_qubit -
self.mw_freq_mod.get() +
artificial_detuning)
s = swf.None_Sweep(name='T2_star', parameter_name='Time',
unit='s')
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
mock_freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
signal_amp = (high_lvl - low_lvl)/2
offset = (high_lvl + low_lvl)/2
phase = 0
oscillation_offset = 0
exponential_offset = offset
frequency = freq_qubit - mock_freq_qubit + artificial_detuning
# Mock values without calibration points
mocked_values = (signal_amp *
np.exp(-(times[0:-4] / self.mock_T2_star())) *
(np.cos(2*np.pi*frequency*times[0:-4] + phase) +
oscillation_offset) + exponential_offset)
# Calibration points
mocked_values = np.concatenate([mocked_values,
[low_lvl, low_lvl, high_lvl, high_lvl]])
# Add noise:
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()),
np.size(mocked_values))
mocked_values = self.values_to_IQ(mocked_values)
d = det.Mock_Detector(value_names=['raw w1', 'raw w0'],
value_units=['V', 'V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('mock_Ramsey' + self.msmt_suffix)
if analyze:
a = ma.Ramsey_Analysis(auto=True, closefig=True,
freq_qubit=freq_qubit,
artificial_detuning=artificial_detuning)
if update:
self.T2_star(a.T2_star['T2_star'])
if double_fit:
b = ma.DoubleFrequency()
res = {
'T2star1': b.tau1,
'T2star2': b.tau2,
'frequency1': b.f1,
'frequency2': b.f2
}
return res
else:
res = {
'T2star': a.T2_star['T2_star'],
'frequency': a.qubit_frequency,
}
return res
def measure_echo(self, times=None, MC=None, analyze=True, close_fig=True,
update=True, label: str = ''):
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
stepsize = (self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) \
* abs(self.cfg_cycle_time())
times = np.arange(0, self.T2_echo()*4, stepsize*2)
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
s = swf.None_Sweep(parameter_name='Time', unit='s')
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
signal_amp = (high_lvl - low_lvl)/2
offset = (high_lvl + low_lvl)/2
phase = np.pi
oscillation_offset = 0
exponential_offset = offset
frequency = 4/times[-1] # 4 oscillations
# Mock values without calibration points
mocked_values = (signal_amp *
np.exp(-(times[0:-4] / self.mock_T2_echo())) *
(np.cos(2*np.pi*frequency*times[0:-4] + phase) +
oscillation_offset) + exponential_offset)
mocked_values = self.values_to_IQ(mocked_values)
d = det.Mock_Detector(value_names=['raw w1', 'raw w0'],
value_units=['V', 'V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('mock_echo' + self.msmt_suffix)
if analyze:
# N.B. v1.5 analysis
a = ma.Echo_analysis_V15(label='echo', auto=True, close_fig=True)
if update:
self.T2_echo(a.fit_res.params['tau'].value)
return a.fit_res.params['tau'].value
def measure_T1(self, times=None, MC=None, analyze=True, close_fig=True,
update=True, prepare_for_timedomain=True):
'''
Very simple version that just returns a exponential decay based on
mock_T1. Might be improved by making it depend on how close your
pulse amp is.
'''
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
times = np.linspace(0, self.T1()*4, 31)
# Calibration points
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
s = swf.None_Sweep(parameter_name='Time', unit='s')
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
amplitude = high_lvl - low_lvl
mocked_values = amplitude*np.exp(-(times[0:-4]/self.mock_T1()))+low_lvl
mocked_values = np.concatenate(
[mocked_values, (low_lvl, low_lvl, high_lvl, high_lvl)])
mocked_values = self.values_to_IQ(mocked_values)
d = det.Mock_Detector(value_names=['raw w0', 'raw w1'],
value_units=['V', 'V'], detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('mock_T1'+self.msmt_suffix)
if analyze:
a = ma.T1_Analysis(auto=True, close_fig=True)
if update:
self.T1(a.T1)
return a.T1
def measure_ALLXY(self, MC=None, label: str = '', analyze=True,
close_fig=True):
"""
NOT IMPLEMENTED YET
"""
if MC is None:
MC = self.instr_MC.get_instr()
if analyze:
a = ma.ALLXY_Analysis(close_main_fig=close_fig)
return a.deviation_total
def measurement_signal(self, excited=False):
'''
Sets the readout signal level, depending on the readout frequency and
resonator frequency.
The 'excited' parameter indicates whether the qubit is in the excited
state, which results in a 2 chi shift of the resonator
'''
power = 20*np.log10(self.ro_pulse_amp_CW())
f_ro = self.ro_freq()
h = 10**(power/20)*10e-3 # Lorentian baseline [V]
f0, dip = self.calculate_mock_resonator_response(power, f_ro,
excited=excited)
signal = h + dip
if type(signal) is list:
signal = signal[0]
return signal
def values_to_IQ(self, mocked_values, theta=15):
theta = theta * np.pi/180
MockI = 1/np.sqrt(2)*np.real(mocked_values*np.exp(1j*theta))
MockQ = 1/np.sqrt(2)*np.real(mocked_values*np.exp(1j*(theta-np.pi/2)))
IQ_values = []
for I, Q in zip(MockI, MockQ):
I += np.random.normal(0, self.noise() /
np.sqrt(self.ro_acq_averages()), 1)
Q += np.random.normal(0, self.noise() /
np.sqrt(self.ro_acq_averages()), 1)
IQ_values.append([I, Q])
return IQ_values
def calculate_f_qubit_from_power_scan(self, f_bare, f_shifted,
g_coupling=65e6, RWA=False):
'''
Inputs are in Hz
f_bare: the resonator frequency without a coupled qubit
f_shifted: the reso freq shifted due to coupling of a qwubit
g_coupling: the coupling strengs
Output:
f_q: in Hz
'''
w_r = f_bare * 2 * np.pi
w_shift = f_shifted * 2*np.pi
g = 2*np.pi * g_coupling
shift = (w_shift - w_r)/g**2
# f_shift > 0 when f_qubit<f_res
# For the non-RWA result (only dispersive approximation)
if (RWA is False):
w_q = -1/(shift) + np.sqrt(1/(shift**2)+w_r**2)
# For the RWA approximation
else:
w_q = -1/shift + w_r
return w_q/(2.*np.pi)
def calculate_g_coupling_from_frequency_shift(self, f_bare, f_shifted,
f_qubit):
w_r = 2*np.pi * f_bare
w_shift = 2*np.pi * f_shifted
w_q = 2*np.pi*f_qubit
shift = w_shift-w_r
rhs = 1./(w_q-w_r) + 1./(w_q+w_r)
# rhs_RWA = 1./(w_q-w_r)
return np.sqrt(np.abs(shift/rhs))/(2*np.pi)
def calculate_mock_flux(self):
"""
Calculates total flux through SQUID loop by a weighted sum of all
contributions from all FBLs, and subtracting the sweetspot flux.
"""
fluxcurrent = self.instr_FluxCtrl.get_instr()
flux = 0
for FBL in fluxcurrent.channel_map:
current = fluxcurrent[FBL]()
flux += current/self.mock_fl_dc_I_per_phi0()[FBL]
flux -= self.mock_sweetspot_phi_over_phi0()
return flux
def calculate_mock_qubit_frequency(self):
'''
Cleaner way of calculating the qubit frequency, depending on:
- Flux (current)
- Ec, EJ
- Chi01
'''
phi_over_phi0 = self.calculate_mock_flux()
if self.mock_Ej1() > self.mock_Ej2():
alpha = self.mock_Ej1()/self.mock_Ej2()
else:
alpha = self.mock_Ej2()/self.mock_Ej1()
d = (alpha-1)/(alpha+1)
Ej_sum = self.mock_Ej1() + self.mock_Ej2()
Ej_eff = np.abs(Ej_sum*np.cos(np.pi*phi_over_phi0) *
np.sqrt(1 + d**2 * (np.tan(np.pi*phi_over_phi0))**2))
f_qubit = (np.sqrt(8*self.mock_Ec()*Ej_eff) -
self.mock_Ec() + self.mock_chi01())
return f_qubit
def calculate_mock_resonator_response(self, power, freqs, excited=False):
"""
Cleaner way of calculating resonator frequency, depending on power etc.
Makes it easier to use a mock device with multiple resonators
Returns resonant frequency and Lorentzian as a combination of both the
test and qubit resonators
TODO: Make hanger instead of Lorentzian
"""
res_power = 20*np.log10(self.mock_ro_pulse_amp_CW())
pow_shift = self.mock_pow_shift()
h = 10**(power/20)*10e-3 # Lorentzian baseline [V]
Q = self.mock_Q()
Qe = self.mock_Qe()
theta = self.mock_theta()
slope = self.mock_slope()
phi_I = self.mock_phi_I()
phi_0 = self.mock_phi_0()
Q_test = self.mock_test_Q()
Qe_test = self.mock_test_Qe()
theta_test = self.mock_test_theta()
slope_test = self.mock_test_slope()
phi_I_test = self.mock_test_phi_I()
phi_0_test = self.mock_test_phi_0()
| |
<filename>fhirclient/models/appointment.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Appointment) on 2019-01-22.
# 2019, SMART Health IT.
from . import domainresource
class Appointment(domainresource.DomainResource):
"""
A
b
o
o
k
i
n
g
o
f
a
h
e
a
l
t
h
c
a
r
e
e
v
e
n
t
a
m
o
n
g
p
a
t
i
e
n
t
(
s
)
,
p
r
a
c
t
i
t
i
o
n
e
r
(
s
)
,
r
e
l
a
t
e
d
p
e
r
s
o
n
(
s
)
a
n
d
/
o
r
d
e
v
i
c
e
(
s
)
f
o
r
a
s
p
e
c
i
f
i
c
d
a
t
e
/
t
i
m
e
.
T
h
i
s
m
a
y
r
e
s
u
l
t
i
n
o
n
e
o
r
m
o
r
e
E
n
c
o
u
n
t
e
r
(
s
)
.
"""
resource_type = "Appointment"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.appointmentType = None
"""
T
h
e
s
t
y
l
e
o
f
a
p
p
o
i
n
t
m
e
n
t
o
r
p
a
t
i
e
n
t
t
h
a
t
h
a
s
b
e
e
n
b
o
o
k
e
d
i
n
t
h
e
s
l
o
t
(
n
o
t
s
e
r
v
i
c
e
t
y
p
e
)
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.basedOn = None
"""
T
h
e
s
e
r
v
i
c
e
r
e
q
u
e
s
t
t
h
i
s
a
p
p
o
i
n
t
m
e
n
t
i
s
a
l
l
o
c
a
t
e
d
t
o
a
s
s
e
s
s
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.cancelationReason = None
"""
T
h
e
c
o
d
e
d
r
e
a
s
o
n
f
o
r
t
h
e
a
p
p
o
i
n
t
m
e
n
t
b
e
i
n
g
c
a
n
c
e
l
l
e
d
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.comment = None
"""
A
d
d
i
t
i
o
n
a
l
c
o
m
m
e
n
t
s
.
Type `str`. """
self.created = None
"""
T
h
e
d
a
t
e
t
h
a
t
t
h
i
s
a
p
p
o
i
n
t
m
e
n
t
w
a
s
i
n
i
t
i
a
l
l
y
c
r
e
a
t
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
"""
S
h
o
w
n
o
n
a
s
u
b
j
e
c
t
l
i
n
e
i
n
a
m
e
e
t
i
n
g
r
e
q
u
e
s
t
,
o
r
a
p
p
o
i
n
t
m
e
n
t
l
i
s
t
.
Type `str`. """
self.end = None
"""
W
h
e
n
a
p
p
o
i
n
t
m
e
n
t
i
s
t
o
c
o
n
c
l
u
d
e
.
Type `FHIRDate` (represented as `str` in JSON). """
self.identifier = None
"""
E
x
t
e
r
n
a
l
I
d
s
f
o
r
t
h
i
s
i
t
e
m
.
List of `Identifier` items (represented as `dict` in JSON). """
self.minutesDuration = None
"""
C
a
n
b
e
l
e
s
s
t
h
a
n
s
t
a
r
t
/
e
n
d
(
e
.
g
.
e
s
t
i
m
a
t
e
)
.
Type `int`. """
self.participant = None
"""
P
a
r
t
i
c
i
p
a
n
t
s
i
n
v
o
l
v
e
d
i
n
a
p
p
o
i
n
t
m
e
n
t
.
List of `AppointmentParticipant` items (represented as `dict` in JSON). """
self.patientInstruction = None
"""
D
e
t
a
i
l
e
d
i
n
f
o
r
m
a
t
i
o
n
a
n
d
i
n
s
t
r
u
c
t
i
o
n
s
f
o
r
t
h
e
p
a
t
i
e
n
t
.
Type `str`. """
self.priority = None
"""
U
s
e
d
t
o
m
a
k
e
i
n
f
o
r
m
e
d
d
e
c
i
s
i
o
n
s
i
f
n
e
e
d
i
n
g
t
o
r
e
-
p
r
i
o
r
i
t
i
z
e
.
Type `int`. """
self.reasonCode = None
"""
C
o
d
e
d
r
e
a
s
o
n
t
h
i
s
a
p
p
o
i
n
t
m
e
n
t
i
s
s
c
h
e
d
u
l
e
d
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
"""
R
e
a
s
o
n
t
h
e
a
p
p
o
i
n
t
m
e
n
t
i
s
t
o
t
a
k
e
p
l
a
c
e
(
r
e
s
o
u
r
c
e
)
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.requestedPeriod = None
"""
P
o
t
e
n
t
i
a
l
d
a
t
e
/
t
i
m
e
i
n
t
e
r
v
a
l
(
s
)
r
e
q
u
e
s
t
e
d
t
o
a
l
l
o
c
a
t
e
t
h
e
a
p
p
o
i
n
t
m
e
n
t
w
i
t
h
i
n
.
List of `Period` items (represented as `dict` in JSON). """
self.serviceCategory = None
"""
A
b
r
o
a
d
c
a
t
e
g
o
r
i
z
a
t
i
o
n
o
f
t
h
e
s
e
r
v
i
c
e
t
h
a
t
i
s
t
o
b
e
p
e
r
f
o
r
m
e
d
d
u
r
i
n
g
t
h
i
s
a
p
p
o
i
n
t
m
e
n
t
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceType = None
"""
T
h
e
s
p
e
c
i
f
i
c
s
e
r
v
i
c
e
t
h
a
t
i
s
t
o
b
e
p
e
r
f
o
r
m
e
d
d
u
r
i
n
g
t
h
i
s
a
p
p
o
i
n
t
m
e
n
t
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.slot = None
"""
T
h
e
s
l
o
t
s
t
h
a
t
t
h
i
s
a
p
p
o
i
n
t
m
e
n
t
i
s
| |
<filename>MODULES_DATA/Discovery_SecuritySoftwareDiscovery_ListAVByTasklist/avlist.py
# -*- coding: utf-8 -*-
# @File : avList.py
# @Date : 2021/7/26
# @Desc :
avList_zh = {
"360tray.exe": "360安全卫士-实时保护",
"360safe.exe": "360安全卫士-主程序",
"ZhuDongFangYu.exe": "360安全卫士-主动防御",
"360sd.exe": "360杀毒",
"a2guard.exe": "a-squared杀毒",
"ad-watch.exe": "Lavasoft杀毒",
"cleaner8.exe": "The Cleaner杀毒",
"vba32lder.exe": "vb32杀毒",
"MongoosaGUI.exe": "Mongoosa杀毒",
"CorantiControlCenter32.exe": "Coranti2012杀毒",
"F-PROT.exe": "F-Prot AntiVirus",
"CMCTrayIcon.exe": "CMC杀毒",
"K7TSecurity.exe": "K7杀毒",
"UnThreat.exe": "UnThreat杀毒",
"CKSoftShiedAntivirus4.exe": "Shield Antivirus杀毒",
"AVWatchService.exe": "VIRUSfighter杀毒",
"ArcaTasksService.exe": "ArcaVir杀毒",
"iptray.exe": "Immunet杀毒",
"PSafeSysTray.exe": "PSafe杀毒",
"nspupsvc.exe": "nProtect杀毒",
"SpywareTerminatorShield.exe": "SpywareTerminator反间谍软件",
"BKavService.exe": "Bkav杀毒",
"MsMpEng.exe": "Windows Defender",
"SBAMSvc.exe": "VIPRE",
"ccSvcHst.exe": "Norton杀毒",
"f-secure.exe": "冰岛",
"avp.exe": "Kaspersky",
"KvMonXP.exe": "江民杀毒",
"RavMonD.exe": "瑞星杀毒",
"Mcshield.exe": "McAfee",
"Tbmon.exe": "McAfee",
"Frameworkservice.exe": "McAfee",
"egui.exe": "ESET NOD32",
"ekrn.exe": "ESET NOD32",
"eguiProxy.exe": "ESET NOD32",
"kxetray.exe": "金山毒霸",
"knsdtray.exe": "可牛杀毒",
"TMBMSRV.exe": "趋势杀毒",
"avcenter.exe": "Avira(小红伞)",
"avguard.exe": "Avira(小红伞)",
"avgnt.exe": "Avira(小红伞)",
"sched.exe": "Avira(小红伞)",
"ashDisp.exe": "Avast网络安全",
"rtvscan.exe": "诺顿杀毒",
"ccapp.exe": "SymantecNorton",
"NPFMntor.exe": "Norton杀毒软件",
"ccSetMgr.exe": "赛门铁克",
"ccRegVfy.exe": "Norton杀毒软件",
"ksafe.exe": "金山卫士",
"QQPCRTP.exe": "QQ电脑管家",
"avgwdsvc.exe": "AVG杀毒",
"QUHLPSVC.exe": "QUICK HEAL杀毒",
"mssecess.exe": "微软杀毒",
"SavProgress.exe": "Sophos杀毒",
"SophosUI.exe": "Sophos杀毒",
"SophosFS.exe": "Sophos杀毒",
"SophosHealth.exe": "Sophos杀毒",
"SophosSafestore64.exe": "Sophos杀毒",
"SophosCleanM.exe": "Sophos杀毒",
"fsavgui.exe": "F-Secure杀毒",
"vsserv.exe": "比特梵德",
"remupd.exe": "熊猫卫士",
"FortiTray.exe": "飞塔",
"safedog.exe": "安全狗",
"parmor.exe": "木马克星",
"Iparmor.exe.exe": "木马克星",
"beikesan.exe": "贝壳云安全",
"KSWebShield.exe": "金山网盾",
"TrojanHunter.exe": "木马猎手",
"GG.exe": "巨盾网游安全盾",
"adam.exe": "绿鹰安全精灵",
"AST.exe": "超级巡警",
"ananwidget.exe": "墨者安全专家",
"AVK.exe": "AntiVirusKit",
"avg.exe": "AVG Anti-Virus",
"spidernt.exe": "Dr.web",
"avgaurd.exe": "Avira Antivir",
"vsmon.exe": "Zone Alarm",
"cpf.exe": "Comodo",
"outpost.exe": "Outpost Firewall",
"rfwmain.exe": "瑞星防火墙",
"kpfwtray.exe": "金山网镖",
"FYFireWall.exe": "风云防火墙",
"MPMon.exe": "微点主动防御",
"pfw.exe": "天网防火墙",
"BaiduSdSvc.exe": "百度杀毒-服务进程",
"BaiduSdTray.exe": "百度杀毒-托盘进程",
"BaiduSd.exe": "百度杀毒-主程序",
"SafeDogGuardCenter.exe": "安全狗",
"safedogupdatecenter.exe": "安全狗",
"safedogguardcenter.exe": "安全狗",
"SafeDogSiteIIS.exe": "安全狗",
"SafeDogTray.exe": "安全狗",
"SafeDogServerUI.exe": "安全狗",
"D_Safe_Manage.exe": "D盾",
"d_manage.exe": "D盾",
"yunsuo_agent_service.exe": "云锁",
"yunsuo_agent_daemon.exe": "云锁",
"HwsPanel.exe": "护卫神",
"hws_ui.exe": "护卫神",
"hws.exe": "护卫神",
"hwsd.exe": "护卫神",
"HipsTray.exe": "火绒",
"HipsDaemon.exe": "火绒",
"wsctrl.exe": "火绒",
"usysdiag.exe": "火绒",
"SPHINX.exe": "SPHINX防火墙",
"bddownloader.exe": "百度卫士",
"baiduansvx.exe": "百度卫士-主进程",
"AvastUI.exe": "Avast!5主程序",
"emet_agent.exe": "EMET",
"emet_service.exe": "EMET",
"firesvc.exe": "McAfee",
"firetray.exe": "McAfee",
"hipsvc.exe": "McAfee",
"mfevtps.exe": "McAfee",
"mcafeefire.exe": "McAfee",
"scan32.exe": "McAfee",
"shstat.exe": "McAfee",
"vstskmgr.exe": "McAfee",
"engineserver.exe": "McAfee",
"mfeann.exe": "McAfee",
"mcscript.exe": "McAfee",
"updaterui.exe": "McAfee",
"udaterui.exe": "McAfee",
"naprdmgr.exe": "McAfee",
"cleanup.exe": "McAfee",
"cmdagent.exe": "McAfee",
"frminst.exe": "McAfee",
"mcscript_inuse.exe": "McAfee",
"mctray.exe": "McAfee",
"_avp32.exe": "卡巴斯基",
"_avpcc.exe": "卡巴斯基",
"_avpm.exe": "卡巴斯基",
"aAvgApi.exe": "AVG",
"ackwin32.exe": "已知杀软进程,名称暂未收录",
"alertsvc.exe": "Norton AntiVirus",
"alogserv.exe": "McAfee VirusScan",
"anti-trojan.exe": "Anti-Trojan Elite",
"arr.exe": "Application Request Route",
"atguard.exe": "AntiVir",
"atupdater.exe": "已知杀软进程,名称暂未收录",
"atwatch.exe": "Mustek",
"au.exe": "NSIS",
"aupdate.exe": "Symantec",
"auto-protect.nav80try.exe": "已知杀软进程,名称暂未收录",
"autodown.exe": "AntiVirus AutoUpdater",
"avconsol.exe": "McAfee",
"avgcc32.exe": "AVG",
"avgctrl.exe": "AVG",
"avgemc.exe": "AVG",
"avgrsx.exe": "AVG",
"avgserv.exe": "AVG",
"avgserv9.exe": "AVG",
"avgw.exe": "AVG",
"avkpop.exe": "G DATA SOFTWARE AG",
"avkserv.exe": "G DATA SOFTWARE AG",
"avkservice.exe": "G DATA SOFTWARE AG",
"avkwctl9.exe": "G DATA SOFTWARE AG",
"avltmain.exe": "Panda Software Aplication",
"avnt.exe": "H+BEDV Datentechnik GmbH",
"avp32.exe": "Kaspersky Anti-Virus",
"avpcc.exe": " Kaspersky AntiVirus",
"avpdos32.exe": " Kaspersky AntiVirus",
"avpm.exe": " Kaspersky AntiVirus",
"avptc32.exe": " Kaspersky AntiVirus",
"avpupd.exe": " Kaspersky AntiVirus",
"avsynmgr.exe": "McAfee",
"avwin.exe": " H+BEDV",
"bargains.exe": "Exact Advertising SpyWare",
"beagle.exe": "Avast",
"blackd.exe": "BlackICE",
"blackice.exe": "BlackICE",
"blink.exe": "micromedia",
"blss.exe": "CBlaster",
"bootwarn.exe": "Symantec",
"bpc.exe": "Grokster",
"brasil.exe": "Exact Advertising",
"ccevtmgr.exe": "Norton Internet Security",
"cdp.exe": "CyberLink Corp.",
"cfd.exe": "Motive Communications",
"cfgwiz.exe": " Norton AntiVirus",
"claw95.exe": "已知杀软进程,名称暂未收录",
"claw95cf.exe": "已知杀软进程,名称暂未收录",
"clean.exe": "windows流氓软件清理大师",
"cleaner.exe": "windows流氓软件清理大师",
"cleaner3.exe": "windows流氓软件清理大师",
"cleanpc.exe": "windows流氓软件清理大师",
"cpd.exe": "McAfee",
"ctrl.exe": "已知杀软进程,名称暂未收录",
"cv.exe": "已知杀软进程,名称暂未收录",
"defalert.exe": "Symantec",
"defscangui.exe": "Symantec",
"defwatch.exe": "Norton Antivirus",
"doors.exe": "已知杀软进程,名称暂未收录",
"dpf.exe": "已知杀软进程,名称暂未收录",
"dpps2.exe": "PanicWare",
"dssagent.exe": "Broderbund",
"ecengine.exe": "已知杀软进程,名称暂未收录",
"emsw.exe": "Alset Inc",
"ent.exe": "已知杀软进程,名称暂未收录",
"espwatch.exe": "已知杀软进程,名称暂未收录",
"ethereal.exe": "RationalClearCase",
"exe.avxw.exe": "已知杀软进程,名称暂未收录",
"expert.exe": "已知杀软进程,名称暂未收录",
"f-prot95.exe": "已知杀软进程,名称暂未收录",
"fameh32.exe": "F-Secure",
"fast.exe": " FastUsr",
"fch32.exe": "F-Secure",
"fih32.exe": "F-Secure",
"findviru.exe": "F-Secure",
"firewall.exe": "AshampooSoftware",
"fnrb32.exe": "F-Secure",
"fp-win.exe": " F-Prot Antivirus OnDemand",
"fsaa.exe": "F-Secure",
"fsav.exe": "F-Secure",
"fsav32.exe": "F-Secure",
"fsav530stbyb.exe": "F-Secure",
"fsav530wtbyb.exe": "F-Secure",
"fsav95.exe": "F-Secure",
"fsgk32.exe": "F-Secure",
"fsm32.exe": "F-Secure",
"fsma32.exe": "F-Secure",
"fsmb32.exe": "F-Secure",
"gbmenu.exe": "已知杀软进程,名称暂未收录",
"guard.exe": "ewido",
"guarddog.exe": "ewido",
"htlog.exe": "已知杀软进程,名称暂未收录",
"htpatch.exe": "Silicon Integrated Systems Corporation",
"hwpe.exe": "已知杀软进程,名称暂未收录",
"iamapp.exe": "Symantec",
"iamserv.exe": "Symantec",
"iamstats.exe": "Symantec",
"iedriver.exe": " Urlblaze.com",
"iface.exe": "Panda Antivirus Module",
"infus.exe": "Infus Dialer",
"infwin.exe": "Msviewparasite",
"intdel.exe": "Inet Delivery",
"intren.exe": "已知杀软进程,名称暂未收录",
"jammer.exe": "已知杀软进程,名称暂未收录",
"kavpf.exe": "Kapersky",
"kazza.exe": "Kapersky",
"keenvalue.exe": "EUNIVERSE INC",
"launcher.exe": "Intercort Systems",
"ldpro.exe": "已知杀软进程,名称暂未收录",
"ldscan.exe": "Windows Trojans Inspector",
"localnet.exe": "已知杀软进程,名称暂未收录",
"luall.exe": "Symantec",
"luau.exe": "Symantec",
"lucomserver.exe": "Norton",
"mcagent.exe": "McAfee",
"mcmnhdlr.exe": "McAfee",
"mctool.exe": "McAfee",
"mcupdate.exe": "McAfee",
"mcvsrte.exe": "McAfee",
"mcvsshld.exe": "McAfee",
"mfin32.exe": "MyFreeInternetUpdate",
"mfw2en.exe": "MyFreeInternetUpdate",
"mfweng3.02d30.exe": "MyFreeInternetUpdate",
"mgavrtcl.exe": "McAfee",
"mgavrte.exe": "McAfee",
"mghtml.exe": "McAfee",
"mgui.exe": "BullGuard",
"minilog.exe": "Zone Labs Inc",
"mmod.exe": "EzulaInc",
"mostat.exe": "WurldMediaInc",
"mpfagent.exe": "McAfee",
"mpfservice.exe": "McAfee",
"mpftray.exe": "McAfee",
"mscache.exe": "Integrated Search Technologies Spyware",
"mscman.exe": "OdysseusMarketingInc",
"msmgt.exe": "Total Velocity Spyware",
"msvxd.exe": "W32/Datom-A",
"mwatch.exe": "已知杀软进程,名称暂未收录",
"nav.exe": "Reuters Limited",
"navapsvc.exe": "Norton AntiVirus",
"navapw32.exe": "Norton AntiVirus",
"navw32.exe": "Norton Antivirus",
"ndd32.exe": "诺顿磁盘医生",
"neowatchlog.exe": "已知杀软进程,名称暂未收录",
"netutils.exe": "已知杀软进程,名称暂未收录",
"nisserv.exe": "Norton",
"nisum.exe": "Norton",
"nmain.exe": "Norton",
"nod32.exe": "ESET Smart Security",
"norton_internet_secu_3.0_407.exe": "已知杀软进程,名称暂未收录",
"notstart.exe": "已知杀软进程,名称暂未收录",
"nprotect.exe": "Symantec",
"npscheck.exe": "Norton",
"npssvc.exe": "Norton",
"ntrtscan.exe": "趋势反病毒应用程序",
"nui.exe": "已知杀软进程,名称暂未收录",
"otfix.exe": "已知杀软进程,名称暂未收录",
"outpostinstall.exe": "Outpost",
"patch.exe": "趋势科技",
"pavw.exe": "已知杀软进程,名称暂未收录",
"pcscan.exe": "趋势科技",
"pdsetup.exe": "已知杀软进程,名称暂未收录",
"persfw.exe": "Tiny Personal Firewall",
"pgmonitr.exe": "PromulGate SpyWare",
"pingscan.exe": "已知杀软进程,名称暂未收录",
"platin.exe": "已知杀软进程,名称暂未收录",
"pop3trap.exe": "PC-cillin",
"poproxy.exe": "NortonAntiVirus",
"popscan.exe": "已知杀软进程,名称暂未收录",
"powerscan.exe": "Integrated Search Technologies",
"ppinupdt.exe": "已知杀软进程,名称暂未收录",
"pptbc.exe": "已知杀软进程,名称暂未收录",
"ppvstop.exe": "已知杀软进程,名称暂未收录",
"prizesurfer.exe": "Prizesurfer",
"prmt.exe": "OpiStat",
"prmvr.exe": "Adtomi",
"processmonitor.exe": "Sysinternals",
"proport.exe": "已知杀软进程,名称暂未收录",
"protectx.exe": "ProtectX",
"pspf.exe": "已知杀软进程,名称暂未收录",
"purge.exe": "已知杀软进程,名称暂未收录",
"qconsole.exe": "Norton AntiVirus Quarantine Console",
"qserver.exe": "Norton Internet Security",
"rapapp.exe": "BlackICE",
"rb32.exe": "RapidBlaster",
"rcsync.exe": "PrizeSurfer",
"realmon.exe": "Realmon ",
"rescue.exe": "已知杀软进程,名称暂未收录",
"rescue32.exe": "卡巴斯基互联网安全套装",
"rshell.exe": "已知杀软进程,名称暂未收录",
"rtvscn95.exe": "Real-time virus scanner ",
"rulaunch.exe": "McAfee User Interface",
"run32dll.exe": "PAL PC Spy",
"safeweb.exe": "PSafe Tecnologia",
"sbserv.exe": "Norton Antivirus",
"scrscan.exe": "360杀毒",
"sfc.exe": "System file checker",
"sh.exe": "MKS Toolkit for Win3",
"showbehind.exe": "MicroSmarts Enterprise Component ",
"soap.exe": "System Soap Pro",
"sofi.exe": "已知杀软进程,名称暂未收录",
"sperm.exe": "已知杀软进程,名称暂未收录",
"supporter5.exe": "eScorcher反病毒",
"symproxysvc.exe": "Symantec",
"symtray.exe": "Symantec",
"tbscan.exe": "ThunderBYTE",
"tc.exe": "TimeCalende",
"titanin.exe": "TitanHide",
"tvmd.exe": "Total Velocity",
"tvtmd.exe": " Total Velocity",
"vettray.exe": "eTrust",
"vir-help.exe": "已知杀软进程,名称暂未收录",
"vnpc3000.exe": "已知杀软进程,名称暂未收录",
"vpc32.exe": "Symantec",
"vpc42.exe": "Symantec",
"vshwin32.exe": "McAfee",
"vsmain.exe": "McAfee",
"vsstat.exe": "McAfee",
"wfindv32.exe": "已知杀软进程,名称暂未收录",
"zapro.exe": "Zone Alarm",
"zonealarm.exe": "Zone Alarm",
"AVPM.exe": "Kaspersky",
"A2CMD.exe": "Emsisoft Anti-Malware",
"A2SERVICE.exe": "a-squared free",
"A2FREE.exe": "a-squared Free",
"ADVCHK.exe": "Norton AntiVirus",
"AGB.exe": "安天防线",
"AHPROCMONSERVER.exe": "安天防线",
"AIRDEFENSE.exe": "AirDefense",
"ALERTSVC.exe": "Norton AntiVirus",
"AVIRA.exe": "小红伞杀毒",
"AMON.exe": "Tiny Personal Firewall",
"AVZ.exe": "AVZ",
"ANTIVIR.exe": "已知杀软进程,名称暂未收录",
"APVXDWIN.exe": "熊猫卫士",
"ASHMAISV.exe": "Alwil",
"ASHSERV.exe": "Avast Anti-virus",
"ASHSIMPL.exe": "AVAST!VirusCleaner",
"ASHWEBSV.exe": "Avast",
"ASWUPDSV.exe": "Avast",
"ASWSCAN.exe": "Avast",
"AVCIMAN.exe": "熊猫卫士",
"AVCONSOL.exe": "McAfee",
"AVENGINE.exe": "熊猫卫士",
"AVESVC.exe": "Avira AntiVir Security Service",
"AVEVL32.exe": "已知杀软进程,名称暂未收录",
"AVGAM.exe": "AVG",
"AVGCC.exe": "AVG",
"AVGCHSVX.exe": "AVG",
"AVGCSRVX": "AVG",
"AVGNSX.exe": "AVG",
"AVGCC32.exe": "AVG",
"AVGCTRL.exe": "AVG",
"AVGEMC.exe": "AVG",
"AVGFWSRV.exe": "AVG",
"AVGNTMGR.exe": "AVG",
"AVGSERV.exe": "AVG",
"AVGTRAY.exe": "AVG",
"AVGUPSVC.exe": "AVG",
"AVINITNT.exe": "Command AntiVirus for NT Server",
"AVPCC.exe": "Kaspersky",
"AVSERVER.exe": "Kerio MailServer",
"AVSCHED32.exe": "H+BEDV",
"AVSYNMGR.exe": "McAfee",
"AVWUPSRV.exe": "H+BEDV",
"BDSWITCH.exe": "BitDefender Module",
"BLACKD.exe": "BlackICE",
"CCEVTMGR.exe": "Symantec",
"CFP.exe": "COMODO",
"CLAMWIN.exe": "ClamWin Portable",
"CUREIT.exe": "DrWeb CureIT",
"DEFWATCH.exe": "Norton Antivirus",
"DRWADINS.exe": "Dr.Web",
"DRWEB.exe": "Dr.Web",
"DEFENDERDAEMON.exe": "ShadowDefender",
"EWIDOCTRL.exe": "Ewido Security Suite",
"EZANTIVIRUSREGISTRATIONCHECK.exe": "e-Trust Antivirus",
"FIREWALL.exe": "AshampooSoftware",
"FPROTTRAY.exe": "F-PROT Antivirus",
"FPWIN.exe": "Verizon",
"FRESHCLAM.exe": "ClamAV",
"FSAV32.exe": "F-Secure",
"FSBWSYS.exe": "F-secure",
"FSDFWD.exe": "F-Secure",
"FSGK32.exe": "F-Secure",
"FSGK32ST.exe": "F-Secure",
"FSMA32.exe": "F-Secure",
"FSMB32.exe": "F-Secure",
"FSSM32.exe": "F-Secure",
"GUARDGUI.exe": "网游保镖",
"GUARDNT.exe": "IKARUS",
"IAMAPP.exe": "Symantec",
"INOCIT.exe": "eTrust",
"INORPC.exe": "eTrust",
"INORT.exe": "eTrust",
"INOTASK.exe": "eTrust",
"INOUPTNG.exe": "eTrust",
"ISAFE.exe": "eTrust",
"KAV.exe": "Kaspersky",
"KAVMM.exe": "Kaspersky",
"KAVPF.exe": "Kaspersky",
"KAVPFW.exe": "Kaspersky",
"KAVSTART.exe": "Kaspersky",
"KAVSVC.exe": "Kaspersky",
"KAVSVCUI.exe": "Kaspersky",
"KMAILMON.exe": "金山毒霸",
"MCAGENT.exe": "McAfee",
"MCMNHDLR.exe": "McAfee",
"MCREGWIZ.exe": "McAfee",
"MCUPDATE.exe": "McAfee",
"MCVSSHLD.exe": "McAfee",
"MINILOG.exe": "Zone Alarm",
"MYAGTSVC.exe": "McAfee",
"MYAGTTRY.exe": "McAfee",
"NAVAPSVC.exe": "Norton",
"NAVAPW32.exe": "Norton",
"NAVLU32.exe": "Norton",
"NAVW32.exe": "Norton Antivirus",
"NEOWATCHLOG.exe": "NeoWatch",
"NEOWATCHTRAY.exe": "NeoWatch",
"NISSERV.exe": "Norton",
"NISUM.exe": "Norton",
"NMAIN.exe": "Norton",
"NOD32.exe": "ESET NOD32",
"NPFMSG.exe": "Norman个人防火墙",
"NPROTECT.exe": "Symantec",
"NSMDTR.exe": "Norton",
"NTRTSCAN.exe": "趋势科技",
"OFCPFWSVC.exe": "OfficeScanNT",
"ONLINENT.exe": "已知杀软进程,名称暂未收录",
"OP_MON.exe": " OutpostFirewall",
"PAVFIRES.exe": "熊猫卫士",
"PAVFNSVR.exe": "熊猫卫士",
"PAVKRE.exe": "熊猫卫士",
"PAVPROT.exe": "熊猫卫士",
"PAVPROXY.exe": "熊猫卫士",
"PAVPRSRV.exe": "熊猫卫士",
"PAVSRV51.exe": "熊猫卫士",
"PAVSS.exe": "熊猫卫士",
"PCCGUIDE.exe": "PC-cillin",
"PCCIOMON.exe": "PC-cillin",
"PCCNTMON.exe": "PC-cillin",
"PCCPFW.exe": "趋势科技",
"PCCTLCOM.exe": "趋势科技",
"PCTAV.exe": "PC Tools AntiVirus",
"PERSFW.exe": "Tiny Personal Firewall",
"PERVAC.exe": "已知杀软进程,名称暂未收录",
"PESTPATROL.exe": "Ikarus",
"PREVSRV.exe": "熊猫卫士",
"RTVSCN95.exe": "Real-time Virus Scanner",
"SAVADMINSERVICE.exe": "SAV",
"SAVMAIN.exe": "SAV",
"SAVSCAN.exe": "SAV",
"SDHELP.exe": "Spyware Doctor",
"SHSTAT.exe": "McAfee",
"SPBBCSVC.exe": "Symantec",
"SPIDERCPL.exe": "Dr.Web",
"SPIDERML.exe": "Dr.Web",
"SPIDERUI.exe": "Dr.Web",
"SPYBOTSD.exe": "Spybot ",
"SWAGENT.exe": "SonicWALL",
"SWDOCTOR.exe": "SonicWALL",
"SWNETSUP.exe": "Sophos",
"SYMLCSVC.exe": "Symantec",
"SYMPROXYSVC.exe": "Symantec",
"SYMSPORT.exe": "Sysmantec",
"SYMWSC.exe": "Sysmantec",
"SYNMGR.exe": "Sysmantec",
"TMLISTEN.exe": "趋势科技",
"TMNTSRV.exe": "趋势科技",
"TMPROXY.exe": "趋势科技",
"TNBUTIL.exe": "Anti-Virus",
"VBA32ECM.exe": "已知杀软进程,名称暂未收录",
"VBA32IFS.exe": "已知杀软进程,名称暂未收录",
"VBA32PP3.exe": "已知杀软进程,名称暂未收录",
"VCRMON.exe": "VirusChaser",
"VRMONNT.exe": "HAURI",
"VRMONSVC.exe": "HAURI",
"VSHWIN32.exe": "McAfee",
"VSSTAT.exe": "McAfee",
"XCOMMSVR.exe": "BitDefender",
"ZONEALARM.exe": "Zone Alarm",
"360rp.exe": "360杀毒",
"afwServ.exe": " Avast Antivirus ",
"safeboxTray.exe": "360杀毒",
"360safebox.exe": "360杀毒",
"QQPCTray.exe": "QQ电脑管家",
"KSafeTray.exe": "金山毒霸",
"KSafeSvc.exe": "金山毒霸",
"KWatch.exe": "金山毒霸",
"gov_defence_service.exe": "云锁",
"gov_defence_daemon.exe": "云锁",
"smartscreen.exe": "Windows Defender",
"macompatsvc.exe": "McAfee",
"mcamnsvc.exe ": "McAfee",
"masvc.exe": "McAfee",
"mfemms.exe": "McAfee",
"mctary.exe": "McAfee",
"mcshield.exe": "McAfee",
"mfewc.exe": "McAfee",
"mfewch.exe": "McAfee",
"mfefw.exe": "McAfee",
"mfefire.exe": "McAfee",
"mfetp.exe": "McAfee",
"mfecanary.exe": "McAfee",
"mfeconsole.exe": "McAfee",
"mfeesp.exe": "McAfee",
"fcag.exe": "McAfee",
"fcags.exe": "McAfee",
"fcagswd.exe": "McAfee",
"fcagate.exe": "McAfee",
"360EntClient.exe": "天擎EDR Agent",
"edr_sec_plan.exe": "深信服EDR Agent",
"edr_monitor.exe": "深信服EDR | |
from redux_tests.testRig import emptyHand, holding, inRegion, placedAndHolding, placed
from hpn.traceFile import debug
from util.planUtil import ObjGraspB, PoseD, ObjPlaceB
from mm_operators.mmGenGrasp import potentialGraspConfGen, graspConfHypGen
from redux_tests.testRig import typicalErrProbs
from redux_tests.testObjects import makeLegTable, makeIkeaTable
from redux_tests.testObjects import ikZ
from hpn.flatMLSReplanner import Experiment, PlanTest, makeMLS
from mm_operators.mmBeliefState import typeOfObjName
from util.dist import MultivariateGaussianDistribution
import redux_tests.planGlobals as glob
import geometry.hu as hu
from stripstream.pddl.examples.hpn.or_hpn import set_or_state, or_from_hpn_conf, or_manipulation_problem, \
hpn_from_or_conf, get_object_frame, SIDE_APPROACH_TRANS, get_approach_frame, get_hpn_grasp_tform, get_or_grasps, \
or_from_hpn_approach_vector, real_manipulation_problem
from stripstream.utils import SEPARATOR
from stripstream.pddl.examples.openrave.easy_tamp import convert_state
from itertools import combinations
from manipulation.primitives.transforms import set_full_config, xyzt_from_trans
from manipulation.visualizations import execute_viewer
from openravepy import Environment, RaveDestroy
from itertools import islice
import numpy as np
import copy
import math
from redux_tests.pr2_test.testPr2 import tZ, table1Pose, table2Pose, tinyVar, smallVar, medVar, bigVar
# TODO - maybe just make directly
def make_move(q1, q2):
return ('Move', ([q1, q2], None))
def make_move_no_base(q1, q2):
return ('MoveNB', ([q1, q2], None))
#def make_pick(hand, obj, grasp_desc, grasp, mu, grasp_conf, approach_conf, var=TINY_VAR):
def make_pick(hand, obj, grasp_index, mu_grasp, grasp_conf, approach_conf, var=tinyVar):
# grasp_index is just for updating discrete distribution over grasps
#grasp_desc = glob.objectDataBase['graspDesc'][obj]
#gb = ObjGraspB(obj, grasp_desc, grasp, None, PoseD(hu.Pose(*mu), var))
grasp_descs = None
gb = ObjGraspB(obj, grasp_descs, grasp_index, None, PoseD(hu.Pose(*mu_grasp), var))
return ('Pick', (approach_conf, grasp_conf, hand, gb))
def make_place(hand, obj, rest_face, mu_pose, grasp_conf, approach_conf, var=tinyVar):
# rest_face is just for updating discrete distribution over resting face
#face_frames = world.getFaceFrames(obj)
face_frames = None # Not actually used in the operator
pb = ObjPlaceB(obj, face_frames, rest_face, PoseD(hu.Pose(*mu_pose), var))
return ('Place', (approach_conf, grasp_conf, hand, pb))
def make_look(obj, look_conf): # NOTE - includes moving the head
return ('LookAt', (look_conf, obj))
#################################################################
def satisfies(belief, partial_state):
# s.fluentValue(f1) where s is fbch.State
return all(fluent.valueInDetails(belief) is True for fluent in partial_state.fluents)
from random import choice
from geometry import ranges
from redux_tests.pr2_test import pr2IkPoses
HORIZONTAL, VERTICAL = pr2IkPoses.ikTrans()
def sample_base_pose_from_hand_pose(robot, hand_pose, attempts=20): #Hand pose represents the tip of the hand
base_limits = map(lambda pair: ranges.realRange(*pair), list(robot.limits(['pr2Base'])))
for i in range(attempts):
base_pose = hand_pose.compose(choice(HORIZONTAL).inversePose()).pose()
if not (base_limits[0].inside(base_pose.x) and base_limits[1].inside(base_pose.y) and base_limits[2].inside(base_pose.theta)):
continue
base_pose = hu.Pose(base_pose.x, base_pose.y, 0.0, base_pose.theta)
#if not object_collides(base.applyTrans(base_pose), obstacles = self.obstacles):
return base_pose
return None
def distance_fn(robot, q1, q2):
return robot.distConf(q1, q2)
def config_interpolation(robot, start, end, step_distance=glob.rrtStep):
configs = []
config = start
while True:
configs.append(config)
if config == end:
return configs
config = robot.stepAlongLine(end, config, step_distance, forward=True, moveChains=start.conf.keys())
# geometry.objects2.World # Seems to only have the known state elements (i.e. not the current positions)
# print world.__dict__.keys()
# ['robot', 'objectShapes', 'regions', 'objects', 'workspace', 'typePointClouds', 'world']
# world.world = world?
# world.objects is a dict of geometry.objects2.MultiChain
# world.objectShapes is a cached dict of shape
# getObjectShapeAtOrigin
def collision(world, conf, obstacles=tuple()):
obstacles = world.objects.keys()
if not obstacles:
return False
placed_robot = conf.placement()
#placed_robot, frames = conf.robot.placement(conf, self.world)
#return any(placed_robot.collides(world.objectPlaces[obst]) for obst in obstacles)
for obst in obstacles:
obst_shape = world.getObjectShapeAtOrigin(obst)
placed_obst = obst_shape.applyTrans()
if placed_robot.collides(obst_shape):
return True
return False
def world_check_collisions(real_world): # RealWorld in sim.py
robot_conf = real_world.robotConf
#for obj in real_world.objectConfs:
# if real_world.checkRobotCollision(robot_conf, real_world.objectShapes[obj]):
# return True
placed_robot = robot_conf.placement()
for placed_obj in real_world.objectShapes.values():
if placed_robot.collides(placed_obj):
return True
return False
# NOTE - could also use sigma points
def with_high_probability(mvg, probs):
# Treats dimensions independently
assert mvg.shape[1] == len(probs)
return mvg.pnm(np.array(probs))
from scipy.integrate import nquad
from scipy.stats.mvn import mvnun
def sample_high_probability(mvg, test, prob, samples=100):
within = 0.
for i in range(samples):
within += test(mvg.draw())
if within/samples >= prob:
return True
if (samples - i + within)/samples < prob:
return False
return within/samples >= prob
def pose_high_probability(pose_belief):
mvg = get_pose_mvg(pose_belief)
def test(sample):
dx, dy, dz, dtheta = np.abs(sample - np.array(mvg.mu).reshape(-1))
return math.sqrt(dx**2 + dy**2) < 1e-2 and abs(dz) < 1e-2 and dtheta < 1e-2
return sample_high_probability(mvg, test, .95)
def get_pose_mvg(pose_belief):
return MultivariateGaussianDistribution(pose_belief.poseD.muTuple, np.diag(pose_belief.poseD.var), pose4=False)
def sample_pose(pose_belief, dist):
face_frame = pose_belief.faceFrames[pose_belief.restFace]
return hu.Pose(*dist.draw()).compose(face_frame.inverse())
# TODO - weight samples?
def sample_robot_obj_collisions(world, conf, pose_belief, prob, samples=100):
# pose_belief.shadow(world)
# pose_belief.makeShadow(pbs, prob)
#import util.windowManager3D as wm
placed_robot = conf.placement()
dist = get_pose_mvg(pose_belief)
collisions = 0.
#pose_belief.draw(win, ...)
for i in range(samples):
pose = sample_pose(pose_belief, dist)
placed_obj = world.getObjectShapeAtOrigin(pose_belief.obj).applyTrans(pose)
#placed_obj.draw('W', color='blue')
collisions += placed_robot.collides(placed_obj)
if collisions/samples >= prob:
return True
if (samples - i + collisions)/samples < prob:
return False
return collisions/samples >= prob
def sample_obj_obj_collisions(world, pose_belief1, pose_belief2, prob, samples=100):
dist1 = get_pose_mvg(pose_belief1)
dist2 = get_pose_mvg(pose_belief2)
shape1 = world.getObjectShapeAtOrigin(pose_belief1.obj)
shape2 = world.getObjectShapeAtOrigin(pose_belief2.obj)
collisions = 0.
for i in range(samples):
placed_obj1 = shape1.applyTrans(sample_pose(pose_belief1, dist1))
placed_obj2 = shape2.applyTrans(sample_pose(pose_belief2, dist2))
collisions += placed_obj1.collides(placed_obj2)
if collisions/samples >= prob:
return True
if (samples - i + collisions)/samples < prob:
return False
return collisions/samples >= prob
from operator import itemgetter
def check_belief_collisions(belief, prob): # TODO - individual or product probability?
# TODO - pairwise object collisions
world = belief.pbs.getWorld() # belief.beliefContext.world
conf = belief.pbs.getConf() # belief.conf
# belief.pbs.getPlaceB, belief.pbs.getPlacedObjBs
pose_beliefs = map(itemgetter(1), belief.pbs.objectBs.values())
#return any(sample_robot_obj_collisions(world, conf, pose_belief, prob) for fixed, pose_belief in belief.pbs.objectBs.values())
if any(sample_robot_obj_collisions(world, conf, pose_belief, prob) for pose_belief in pose_beliefs):
return True
if any(sample_obj_obj_collisions(world, pb1, pb2, prob) for pb1, pb2 in combinations(pose_beliefs, 2)):
return True
return False
# NOTE - BHPN uses the UKF for updates
#from mm_operators.mmOps import singleTargetUpdate
#################################################################
from manipulation.oracle import ManipulationOracle
from manipulation.primitives.display import is_viewer_active
from stripstream.algorithms.search.fast_downward import get_fast_downward
from stripstream.algorithms.focused.focused_planner import focused_planner
from stripstream.algorithms.incremental.incremental_planner import incremental_planner
from manipulation.primitives.utils import get_env
from manipulation.primitives.transforms import trans_from_pose
from stripstream.pddl.utils import convert_plan
from manipulation.primitives.look import look_at_ik
from manipulation.constants import ACTIVE_LEFT, ACTIVE_RIGHT
from manipulation.primitives.transforms import object_trans_from_manip_trans
from manipulation.bodies.robot import manip_from_pose_grasp
from manipulation.visualizations import visualize_plan, execute_viewer, Plan, set_state
from stripstream.pddl.examples.openrave.easy_tamp import compile_problem, executable_plan
from stripstream.algorithms.plan import get_states
# robot
# ['confCache', 'color', 'headChainName', 'armStowAngles', 'OSa', 'scanner', 'gripMax', 'baseChainName',
# 'chainNames', 'chainDependRev', 'nominalConf', 'bodyChains', 'chainDependencies', 'verticalTrans',
# 'gripperFaceFrame', 'selfCollideChainNames', 'chains', 'armChainNames', 'moveChainNames',
# 'gripperChainNames', 'horizontalTrans', 'name', 'compiledChainsOS', 'selfCollidePairs', 'wristFrameNames',
# 'toolOffsetZ', 'confCacheKeys', 'toolOffsetX']
#def objFrame(self): # planUtil.pyx
# faceFrame = self.faceFrames[self.restFace]
# return self.poseD.mode().compose(faceFrame.inverse())
#def objectGraspFrame(robot, gdesc, obj_frame, hand): # mmUtil -> objectGraspFrame
# centerFrame = gdesc.frame.compose(hu.Pose(0,0,gdesc.dz,0))
# graspFrame = obj_frame.compose(centerFrame)
# wristFrame = graspFrame.compose(robot.gripperFaceFrame[hand].inverse())
# return wristFrame
# TODO - try a manual pick and place policy using their generators. Do I want the prim or function stuff?
# Prim seems to be called using op.evalPrim(s.details)
# But executePrim doesn't seem to be called. This is because fbch is effectively just HPN
# env.executePrim is different than executePrim as a method
# Neither the prim or the fn seem to be called with just the simulator
# NOTE - Leslie and Tomas reason about faces to have x, y, z, theta distributions?
#print belief.objects[obj].attrs.keys() # ['fixed', 'restFace', 'z', 'poseModeProb', 'shape', 'faceFrames', 'type', 'graspDesc']
#print belief.objects[obj].__dict__.keys() # ['name', 'attrs']
#print belief.pbs.objectBs[obj].items() # [(name, (fix, pb)), ...]
#print belief.pbs.__dict__.keys() # ['exceptions', 'shadowWorld', 'pbs', 'targetConfs', 'beliefContext', 'held', 'base', 'objectBs', 'conf', 'shadowProb', 'avoidShadow', 'conditions', 'graspB']
#type_name = typeOfObjName(obj)
#world.getObjType(name)
#shape, _ = glob.constructor[type_name](name=obj)
#print 'Origin', shape.parts()[0].origin() # Centered around half dz
#print shape.parts()[0] # ((-0.0445, -0.027, 0.0), (0.0445, 0.027, 0.1175))
#shape = belief.objects[obj].attrs['shape'] # NOTE - this is different than the previous line
#print 'Origin', shape.parts()[0].origin() # Unit transform
#print shape.parts()[0] # ((-0.0445, -0.027, -0.05875), (0.0445, 0.027, 0.05875))
from redux_tests.pr2_test.pr2InvKin import gripperTip, gripperToolOffset
from redux_tests.pr2_test.pr2Robot import right_gripperToolOffsetX, right_gripperToolOffsetZ, gFaceFrame, gripperFaceFrame
# print right_gripperToolOffsetZ # NOTE - very close to hu.Transform(gripperToolOffset) (only difference is the offset)
# gripperFaceFrame[hand] # pr2HeadFrame, pr2LeftArmFrame, pr2RightArmFrame
# NOTE - the right gripper has the extra sensor on it
# Face = GDesc*Pose
# Grasp = ObjFrame
# Wrist = Grasp*Tool.T
def get_old_obj_trans(conf, or_grasp):
cart_conf = conf.getCart()
manip_trans = cart_conf.get('pr2RightArmFrame')
grasp_trans = hu.Transform(np.linalg.inv(or_grasp.grasp_trans)) # NOTE - this works
tool_trans = hu.Transform(gripperToolOffset) # NOTE - this works
obj_trans = manip_trans.compose(tool_trans).compose(grasp_trans.inverse())
return obj_trans
class ORAgent(object):
def __init__(self, replan=True):
self.plan = None
self.replan = replan
#self.hand = 'left' if ACTIVE_LEFT else 'right'
self.hand = 'left'
print 'Hand', self.hand
# TODO - move the planning here?
def convert_action(self, belief, action, args):
default_conf = belief.conf
#hpn_robot = default_conf.robot
or_robot = self.oracle.robot
# TODO - would be good to load the grasps
if action == 'pick':
obj, _, grasp, _, pap = args
grasp_conf, approach_conf = pap.grasp_config, pap.vector_config # TODO - vector config makes a big difference here?
hpn_grasp_conf = hpn_from_or_conf(default_conf, or_robot, grasp_conf)
hpn_approach_conf = hpn_from_or_conf(default_conf, or_robot, approach_conf)
mu_grasp = (0, 0, 0, 0) # This is the error with respect to the grasp
grasp_index = grasp.grasp_index
return make_pick(self.hand, obj, grasp_index, mu_grasp, hpn_grasp_conf, hpn_approach_conf)
elif action == 'move':
# TODO - if the robot collides, it won't move
# NOTE - the goal config moving to the end config is in collision?
# NOTE - maybe the moving is all relative commands?
start_conf, end_conf = args
hpn_start_conf = hpn_from_or_conf(default_conf, or_robot, start_conf)
hpn_end_conf = hpn_from_or_conf(default_conf, or_robot, end_conf)
#assert not collision( belief.pbs.getWorld(), hpn_end_conf)
#return make_move(hpn_start_conf, hpn_end_conf) # NOTE - more likely | |
<reponame>mahabul123/diffractem<filename>diffractem/stream_parser.py<gh_stars>1-10
import pandas as pd
from io import StringIO
import numpy as np
import re
from typing import Union, Optional
BEGIN_GEOM = '----- Begin geometry file -----'
END_GEOM = '----- End geometry file -----'
BEGIN_CELL = '----- Begin unit cell -----'
END_CELL = '----- End unit cell -----'
BEGIN_CHUNK = '----- Begin chunk -----'
END_CHUNK = '----- End chunk -----'
BEGIN_CRYSTAL = '--- Begin crystal'
END_CRYSTAL = '--- End crystal'
BEGIN_PEAKS = 'Peaks from peak search'
END_PEAKS = 'End of peak list'
BEGIN_REFLECTIONS = 'Reflections measured after indexing'
END_REFLECTIONS = 'End of reflections'
HEAD = 'CrystFEL stream format {}.{}'.format(2, 3)
GENERATOR = 'Generated by diffractem StreamParser'
PEAK_COLUMNS = ['fs/px', 'ss/px', '(1/d)/nm^-1', 'Intensity', 'Panel']
REFLECTION_COLUMNS = ['h', 'k', 'l', 'I', 'Sigma(I)', 'Peak', 'Background', 'fs/px', 'ss/px', 'Panel']
ID_FIELDS = ['file', 'Event', 'serial']
def make_substream(stream: 'StreamParser', Ncryst: int, seed: Optional[int] = None,
filename: Optional[str] = None, query: Optional[str] = None):
"""Write a stream file containing a sub-set of events to a new stream file.
Args:
stream (StreamParser): StreamParser object holding the original stream
Ncryst (int): Number of events to sample
seed (Optional[int], optional): Seed of the random generator. Defaults to None.
filename (Optional[str], optional): Output stream filename. Defaults to filename of original stream,
with '-N_{Ncryst}' appended, where {Ncryst} is the number of sampled crystals.
query (str, optional): query to pre-select events. A sensible choice might be to
only pick indexed events by setting query='indexed_by != "none"'. Defaults to None.
Returns:
[type]: [description]
"""
fn2 = 'subsets/' + stream.filename.rsplit('.',1)[0] + f'-N_{Ncryst}.stream' \
if filename is None else filename
sel = stream.shots if query is None else stream.shots.query(query)
sel = sel.sample(n=Ncryst, random_state=seed)
sel.sort_values(by='first_line', ascending=False, inplace=True)
first = list(sel.first_line)
last = list(sel.last_line)
first.append(0)
last.append(stream.shots.first_line.min() - 1)
copying = False
section = (first.pop(), last.pop())
with open(stream.filename,'r') as fh_from, open(fn2,'w') as fh_to:
for ln, l in enumerate(fh_from):
if not copying:
if ln == section[0]:
copying = True
#print(section[0], ln)
if copying:
fh_to.write(l)
if ln == section[1]:
copying = False
try:
section = (first.pop(), last.pop())
except IndexError:
break
print('Wrote subset with', len(sel), 'events to', fn2)
return fn2
def augment_stream(streamname: str, outfile:str, new_fields: Union[pd.DataFrame, dict], where: str = 'chunk'):
"""Add new fields to chunk headers in the stream file, which can then be used for chopping or filtering.
Somewhat similar to indexamajig's "include-hdf5-field" option, just *after* the fact.
Args:
streamname (str): Name of stream file
new_fields (pd.DataFrame): pandas DataFrame with index matching the file and Event of the stream file
and columns matching the additional fields to be added
"""
chunk_init = False
found_fn = ''
found_event = ''
with open(streamname, 'r') as fh_in, open(outfile, 'w') as fh:
for ln, l in enumerate(fh_in):
if not chunk_init and l.startswith(BEGIN_CHUNK):
# print('new chunk')
chunk_init = True
file_init = False
event_init = False
found_event = ''
cols = list(new_fields.keys())
elif chunk_init and l.startswith('Image filename:'):
found_fn = l.split(': ')[-1].strip()
# print(found_fn)
file_init = True
elif chunk_init and l.startswith('Event:'):
found_event = l.split(': ')[-1].strip()
# print(found_event)
event_init = True
elif chunk_init and event_init and file_init and \
l.startswith(BEGIN_REFLECTIONS if where=='crystal' else BEGIN_PEAKS):
# now is the time to insert the new stuff
# print(found_fn, found_event)
# print(chunk_init, event_init)
for k, v in new_fields.loc[(found_fn, found_event),:].iteritems():
# print(v)
fh.write(f'{k} = {v}\n')
elif chunk_init and l.startswith(END_CHUNK):
chunk_init = False
fh.write(l)
def chop_stream(streamname: str, id_list: list, id_field: str = 'hdf5/%/shots/frame',
id_suffix: str = 'frame', fn_contains: str = None):
"""Chops a stream file into sub-streams containing only shots with a specific value of
a defined field, which must be in the chunk header. Useful e.g. for chopping into aggregation
frames, different sample grids, runs with different rotation angles etc.
If you just want to *select* a sub-set of a stream file instead of chopping it up into many parts,
consider using the stream_grep script included with CrystFEL, which is way faster and more flexible.
Args:
streamname (str): Stream file name
id_list (str): List of values of the ID variable which you want to have in the final files.
id_field (str): Field in chunk data to select by. Defaults to 'hdf5/%/shots/frame'.
id_appendix (str): Appendix to be applied to the output stream file names. Defaults to 'frame'.
"""
outfiles = {}
for fnum in id_list:
outfiles[fnum] = open(streamname.rsplit('.', 1)[0] + f'-{id_suffix}{fnum}.stream', 'w')
chunk_init = False
chunk_string = ''
value = -1
with open(streamname, 'r') as fh_in:
for ln, l in enumerate(fh_in):
if not chunk_init and l.startswith(BEGIN_CHUNK):
chunk_init = True
chunk_string += l
value = None
include_file = True
elif chunk_init and (fn_contains is not None) and l.startswith('Image filename'):
found_fn = l.split(': ')[-1].strip()
include_file = fn_contains in found_fn
chunk_string += l
elif chunk_init and l.startswith(id_field):
found_value = l.rsplit('=',1)[-1].strip()
found_value = parse_str_val(found_value)
chunk_string += l
value = found_value if found_value in id_list else None
elif chunk_init and l.startswith(END_CHUNK):
chunk_init = False
chunk_string += l
#print(frame)
if (value is not None) and include_file:
#print(chunk_string)
outfiles[value].write(chunk_string)
chunk_string = ''
elif chunk_init:
chunk_string += l
elif not chunk_init:
# no chunk initialized, write to all files
for _, fh in outfiles.items():
fh.write(l)
else:
raise RuntimeError('This should not happen?! Please debug me.')
def parse_str_val(input: str):
try:
return int(input.strip())
except ValueError:
try:
return float(input.strip())
except:
return input.strip()
class StreamParser:
def __init__(self, filename, parse_now=True, serial_offset=-1, new_folder=None):
self.merge_shot = False
self.command = ''
self._cell_string = []
self._geometry_string = []
self._peaks = pd.DataFrame()
self._indexed = pd.DataFrame()
self._shots = pd.DataFrame()
self._crystals = pd.DataFrame()
self._parsed_lines = 0
self._total_lines = 0
self.filename = filename
self.serial_offset = serial_offset
if parse_now:
self.parse(new_folder)
@property
def geometry(self):
"""
:return: geometry section as dictionary
"""
g = {}
for l in self._geometry_string:
if l.startswith(';'):
continue
if '=' not in l:
continue
k, v = l.split(';')[0].split('=', 1)
g[k.strip()] = parse_str_val(v)
return g
@property
def cell(self):
"""
:return: cell section as dictionary
"""
c = {}
for l in self._cell_string:
if '=' not in l:
continue
k, v = l.split('=', 1)
try:
c[k.strip()] = float(v)
except ValueError:
c[k.strip()] = v.strip()
return c
@property
def options(self):
"""
:return: crystfel call options (ONLY -- ones) as dict
"""
o = {}
for opt in re.findall('--\S+', self.command):
if '=' in opt:
k, v = opt[2:].split('=', 1)
try:
o[k.strip()] = int(v)
except ValueError:
try:
o[k.strip()] = float(v)
except ValueError:
o[k.strip()] = v.strip()
else:
o[opt[2:].strip()] = None
return o
@property
def indexed(self):
return self._indexed
@property
def peaks(self):
return self._peaks
@property
def shots(self):
return self._shots.merge(self._crystals, on=ID_FIELDS, how='left')
@property
def input_file(self):
return self.command.split('-i ')[1].split(' -')[0].strip()
@property
def files(self):
return list(self.shots.file.unique())
def parse(self, new_folder):
linedat_peak = StringIO()
linedat_index = StringIO()
shotlist = []
crystallist = []
init_peak = False
init_index = False
init_geom = False
init_cell = False
init_crystal_info = False
init_chunk = False
shotdat = {'Event': None, 'shot_in_subset': None, 'subset': None,
'file': None, 'serial': None}
crystal_info = {}
idstr = None
self._parsed_lines = 0
self._total_lines = 0
skip = False
# lines are queried for their meaning. Lines belonging to tables are appended to StringIO virtual files,
# which are then read into pandas data frames at the very end. The order of Queries is chosen to optimize
# performance, that is, the table lines (most frequent) come first.
with open(self.filename) as fh:
for ln, l in enumerate(fh):
self._parsed_lines += 1
self._total_lines += 1
if skip:
skip = False
continue
# EVENT CHUNKS
# Actual parsing (indexed peaks)
if init_index and END_REFLECTIONS in l:
init_index = False
elif init_index:
linedat_index.write(
' '.join([l.strip(), str(ln), idstr, '\n']))
# Actual parsing (found peaks)
elif init_peak and END_PEAKS in l:
init_peak = False
elif init_peak:
linedat_peak.write(
' '.join([l.strip(), str(ln), idstr, '\n']))
# Required info at chunk head
elif BEGIN_CHUNK in l:
shotdat = {'Event': '_', 'shot_in_subset': -1, 'subset': '_',
'file': '', 'serial': -1, 'first_line': ln, 'last_line': -1}
init_chunk = True
elif END_CHUNK in l:
shotdat['last_line'] = ln
shotlist.append(shotdat)
shotdat = {'Event': None, 'shot_in_subset': None, 'subset': None,
'file': None, 'serial': None, 'first_line': None, 'last_line': None}
init_chunk = False
elif 'Event:' in l:
shotdat['Event'] = l.split(': ')[-1].strip()
dummy_shot = shotdat['Event'].split('//')[-1]
if dummy_shot in | |
<reponame>heprom/pymicro
"""The dct module provide helpers functions to work with experimental diffraction contrast tomography data.
"""
import os
import h5py
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt, cm
from pymicro.xray.experiment import ForwardSimulation
from pymicro.crystal.lattice import HklPlane
from pymicro.xray.xray_utils import lambda_keV_to_nm, radiograph, radiographs
from pymicro.crystal.microstructure import Grain, Orientation
class DctForwardSimulation(ForwardSimulation):
"""Class to represent a Forward Simulation."""
def __init__(self, verbose=False):
super(DctForwardSimulation, self).__init__('dct', verbose=verbose)
self.hkl_planes = []
self.check = 1 # grain id to display infos in verbose mode
self.omegas = None
self.reflections = []
def set_hkl_planes(self, hkl_planes):
self.hkl_planes = hkl_planes
def set_diffracting_famillies(self, hkl_list):
"""Set the list of diffracting hk planes using a set of families."""
symmetry = self.exp.get_sample().get_material().get_symmetry()
hkl_planes = []
for hkl in hkl_list:
# here we set include_friedel_pairs to False as we take it into account in the calculation
planes = HklPlane.get_family(hkl, include_friedel_pairs=True, crystal_structure=symmetry)
for plane in planes: # fix the lattice
plane.set_lattice(self.exp.get_sample().get_material())
hkl_planes.extend(planes)
self.set_hkl_planes(hkl_planes)
def setup(self, omega_step, grain_ids=None):
"""Setup the forward simulation.
:param float omega_step: the angular integration step (in degrees) use to compute the diffraction comditions.
:param list grain_ids: a list of grain ids to restrict the forward simulation (use all grains by default).
"""
assert self.exp.source.min_energy == self.exp.source.max_energy # monochromatic case
lambda_keV = self.exp.source.max_energy
self.omegas = np.linspace(0.0, 360.0, num=int(360.0 / omega_step), endpoint=False)
self.reflections = []
for omega in self.omegas:
self.reflections.append([])
if grain_ids:
# make a list of the grains selected for the forward simulation
grains = [self.exp.sample.microstructure.get_grain(gid) for gid in grain_ids]
else:
grains = self.exp.sample.microstructure.grains
for g in grains:
for plane in self.hkl_planes:
(h, k, i, l) = HklPlane.three_to_four_indices(*plane.miller_indices())
try:
(w1, w2) = g.dct_omega_angles(plane, lambda_keV, verbose=False)
except ValueError:
if self.verbose:
print('plane {} does not fulfil the Bragg condition for grain {:d}'.format((h, k, i, l), g.id))
continue
# add angles for Friedel pairs
w3 = (w1 + 180.) % 360
w4 = (w2 + 180.) % 360
if self.verbose and g.id == self.check:
print('grain %d, angles for plane %d%d%d: w1=%.3f and w2=%.3f | delta=%.1f' % (g.id, h, k, l, w1, w2, w1-w2))
print('(%3d, %3d, %3d, %3d) -- %6.2f & %6.2f' % (h, k, i, l, w1, w2))
self.reflections[int(w1 / omega_step)].append([g.id, (h, k, l)])
self.reflections[int(w2 / omega_step)].append([g.id, (h, k, l)])
self.reflections[int(w3 / omega_step)].append([g.id, (-h, -k, -l)])
self.reflections[int(w4 / omega_step)].append([g.id, (-h, -k, -l)])
def load_grain(self, gid=1):
print('loading grain from file 4_grains/phase_01/grain_%04d.mat' % gid)
with h5py.File(os.path.join(self.exp.get_sample().data_dir, '4_grains/phase_01/grain_%04d.mat' % gid)) as gmat:
g = Grain(gid, Orientation.from_rodrigues(gmat['R_vector'][()]))
g.om_exp = gmat['om_exp'][0, :]
g.uv_exp = gmat['uv_exp'][:, :]
g.center = gmat['center'][:, 0]
try:
ref_included = gmat['proj/included'][0][0]
g.included = gmat[ref_included][0, :]
ref_ondet = gmat['proj/ondet'][0][0]
g.ondet = gmat[ref_ondet][0, :]
# grab the projection stack
ref_stack = gmat['proj']['stack'][0][0]
g.stack_exp = gmat[ref_stack][()].transpose(1, 2, 0) # now in [ndx, u, v] form
g.hklsp = gmat['allblobs/hklsp'][:, :]
except AttributeError:
# classic file organization
g.included = gmat['proj/included'][0, :]
g.ondet = gmat['proj/ondet'][0, :]
g.stack_exp = gmat['proj/stack'][()].transpose(1, 2, 0) # now in [ndx, u, v] form
# for the Ti7AL data set, we have to hack around the DCT + TT work in progress
#ref_hklsp = gmat['allblobs/hklsp'][()][0][0]
#g.hklsp = gmat[ref_hklsp][:, :]
g.hklsp = gmat['allblobs/hklsp'][:, :]
self.grain = g
if self.verbose:
print('experimental proj stack shape: {}'.format(g.stack_exp.shape))
def grain_projection_image(self, g_uv, g_proj):
"""Produce a 2D image placing all diffraction spots of a given grain at their respective position on the detector.
Spots outside the detector are are skipped while those only partially on the detector are cropped accordingly.
:param g_proj: image stack of the diffraction spots. The first axis is so that g_proj[0] is the first spot \
the second axis is the horizontal coordinate of the detector (u) and the third axis the vertical coordinate \
of the detector (v).
:param g_uv: list or array of the diffraction spot position.
:returns: a 2D composite image of all the diffraction spots.
"""
print(len(g_proj), g_uv.shape[1])
assert len(g_proj) == g_uv.shape[1]
image = np.zeros(self.exp.get_active_detector().get_size_px())
for i in range(len(g_proj)):
spot = g_proj[i]
if self.verbose:
print('i={0}, size of spot: {1}'.format(i, spot.shape))
print('placing diffraction spot at location {0}'.format(g_uv[:, i]))
add_to_image(image, spot, g_uv[:, i], self.verbose)
return image
def grain_projection_exp(self, gid=1):
"""Produce a composite image with all the experimental diffraction spots of this grain on the detector.
:param int gid: the number of the selected grain.
:returns: a 2D composite image of all the diffraction spots.
"""
#self.grain = self.exp.get_sample().get_microstructure().get_grain(gid)
if not hasattr(self, 'grain') or self.grain.id != gid:
# load the corresponding grain
self.load_grain(gid=gid)
return self.grain_projection_image(self.grain.uv_exp, self.grain.stack_exp)
def grain_projections(self, omegas, gid=1, data=None, hor_flip=False, ver_flip=False):
"""Compute the projections of a grain at different rotation angles.
The method compute each projection and concatenate them into a single 3D array in the form [n, u, v]
with n the number of angles.
:param list omegas: the list of omega angles to use (in degrees).
:param int gid: the id of the grain to project (1 default).
:param ndarray data: the data array representing the grain.
:param bool hor_flip: a flag to apply a horizontal flip.
:param bool ver_flip: a flag to apply a vertical flip.
:return: a 3D array containing the n projections.
"""
from scipy import ndimage
if data is None:
grain_ids = self.exp.get_sample().get_grain_ids()
print('binarizing grain %d' % gid)
data = np.where(grain_ids[ndimage.find_objects(grain_ids == gid)[0]] == gid, 1, 0)
print('shape of binary grain is {}'.format(data.shape))
stack_sim = radiographs(data, omegas)
stack_sim = stack_sim.transpose(2, 0, 1)[:, ::-1, ::-1]
# here we need to account for the detector flips (detector is always supposed to be perpendicular to the beam)
# by default (u, v) correspond to (-Y, -Z)
if hor_flip:
print('applying horizontal flip to the simulated image stack')
stack_sim = stack_sim[:, ::-1, :]
if ver_flip:
print('applying vertical flip to the simulated image stack')
stack_sim = stack_sim[:, :, ::-1]
return stack_sim
def grain_projection_simulation(self, gid=1):
"""Function to compute all the grain projection in DCT geometry and create a composite image.
:param int gid: the id of the grain to project (1 default).
"""
print('forward simulation of grain %d' % gid)
detector = self.exp.get_active_detector()
lambda_keV = self.exp.source.max_energy
lambda_nm = lambda_keV_to_nm(lambda_keV)
X = np.array([1., 0., 0.]) / lambda_nm
lattice = self.exp.get_sample().get_material()
if not hasattr(self, 'grain'):
# load the corresponding grain
self.load_grain(gid=gid)
# compute all the omega values
print('simulating diffraction spot positions on the detector')
omegas = np.zeros(2 * len(self.hkl_planes))
g_uv = np.zeros((2, 2 * len(self.hkl_planes)))
for i, plane in enumerate(self.hkl_planes):
#print(plane.miller_indices())
try:
w1, w2 = self.grain.dct_omega_angles(plane, lambda_keV, verbose=False)
except ValueError:
# plane does not fulfil the Bragg condition
continue
omegas[2 * i] = w1
omegas[2 * i + 1] = w2
for j in range(2):
omega = omegas[2 * i + j]
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
gt = self.grain.orientation_matrix().transpose()
G = np.dot(R, np.dot(gt, plane.scattering_vector()))
K = X + G
# position of the grain at this rotation angle
g_pos_rot = np.dot(R, self.grain.center)
pg = detector.project_along_direction(K, g_pos_rot)
(up, vp) = detector.lab_to_pixel(pg)[0]
g_uv[:, 2 * i + j] = up, vp
# check detector flips
hor_flip = np.dot(detector.u_dir, [0, -1, 0]) < 0
ver_flip = np.dot(detector.v_dir, [0, 0, -1]) < 0
if self.verbose:
print(detector.u_dir)
print(detector.v_dir)
print('detector horizontal flip: %s' % hor_flip)
print('detector vertical flip: %s' % ver_flip)
# compute the projections
stack_sim = self.grain_projections(omegas, gid, hor_flip=hor_flip, ver_flip=ver_flip)
return self.grain_projection_image(g_uv, stack_sim)
def dct_projection(self, omega, include_direct_beam=True, att=5):
"""Function to compute a full DCT projection at a given omega angle.
:param float omega: rotation angle in degrees.
:param bool include_direct_beam: flag to compute the transmission through the sample.
:param float att: an attenuation factor used to limit the gray levels in the direct beam.
:return: the dct projection as a 2D numpy array
"""
if len(self.reflections) == 0:
print('empty list of reflections, you should run the setup function first')
return None
grain_ids = self.exp.get_sample().get_grain_ids()
detector = self.exp.get_active_detector()
lambda_keV = self.exp.source.max_energy
lattice = self.exp.get_sample().get_material()
index = np.argmax(self.omegas > omega)
dif_grains = self.reflections[index - 1] # grains diffracting between omegas[index - 1] and omegas[index]
# intialize image result
full_proj = np.zeros(detector.get_size_px(), dtype=np.float)
lambda_nm = lambda_keV_to_nm(lambda_keV)
omegar = omega * np.pi / 180
R = | |
list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})
df_cs_100.to_csv(self.path + 'catalogs/' + 'cs_100' + '.csv', index=False)
df_cs_143 = pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})
df_cs_143.to_csv(self.path + 'catalogs/' + 'cs_143' + '.csv', index=False)
df_cs_217 = pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})
df_cs_217.to_csv(self.path + 'catalogs/' + 'cs_217' + '.csv', index=False)
df_cs_353 = pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})
df_cs_353.to_csv(self.path + 'catalogs/' + 'cs_353' + '.csv', index=False)
df_cs_545 = pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})
df_cs_545.to_csv(self.path + 'catalogs/' + 'cs_545' + '.csv', index=False)
df_cs_857 = pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})
df_cs_857.to_csv(self.path + 'catalogs/' + 'cs_857' + '.csv', index=False)
freq = 0
if '100GHz' in bands:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in bands:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in bands:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in bands:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in bands:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in bands:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT'])
for L in range(1, len(bands)):
for subset in tqdm(itertools.combinations(bands, L)):
freq = 0
if '100GHz' in subset:
freq += 2
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])})))
if '143GHz' in subset:
freq += 4
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])})))
if '217GHz' in subset:
freq += 8
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])})))
if '353GHz' in subset:
freq += 16
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])})))
if '545GHz' in subset:
freq += 32
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])})))
if '857GHz' in subset:
freq += 64
df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])})))
df = pd.concat((df_pgcc, df))
df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2)
df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False)
cs_100.close()
cs_143.close()
cs_217.close()
cs_353.close()
cs_545.close()
cs_857.close()
def remove_duplicates_on_radec(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=5, plot=False):
""""Takes two different dataframes with columns 'RA' & 'DEC' and performs a spatial
coordinate match with a tol=5 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 5.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['RA', 'DEC']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg')
pcatalog_sub = SkyCoord(ra=df_with_dup['RA'].values, dec=df_with_dup['DEC'].values, unit='deg')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_with_dup.drop(columns=['RA', 'DEC'], inplace=True)
df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1)
df_wo_dup.query("ismatched == False", inplace=True)
df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True)
df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
df_main = df_wo_dup.copy()
if plot == True and output_name is not None:
fig = plt.figure(figsize=(8,8), tight_layout=False)
ax = fig.add_subplot(111)
ax.set_facecolor('white')
ax.grid(True, color='grey', lw=0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20)
ax.set_ylabel(output_name, fontsize=20)
ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400)
ax.axvline(tol, color='k', linestyle='--')
ax.set_xlim(0, 2*tol)
plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False)
plt.show()
plt.close()
return df_main
def remove_duplicates_on_lonlat(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=2, plot=False):
""""Takes two different dataframes with columns 'GLON' & 'GLAT' and performs a spatial
coordinate match with a tol=2 arcmin tolerance. Saves a .csv file containing df_main
without objects in common from df_with_dup.
Args:
df_main (pd.DataFrame): main dataframe.
output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None.
df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None.
with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False.
tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 2.
plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False.
"""
if with_itself == True:
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_main.query("ismatched == False", inplace=True)
df_main.drop(columns=['ismatched', 'ID'], inplace=True)
df_main = df_main.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
elif with_itself == False:
assert df_with_dup is not None
ID = np.arange(0, len(df_with_dup))
df_with_dup = df_with_dup[['GLON', 'GLAT']].copy()
df_with_dup.insert(loc=0, value=ID, column='ID')
scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic')
pcatalog_sub = SkyCoord(df_with_dup['GLON'].values, df_with_dup['GLAT'].values, unit='deg', frame='galactic')
idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1)
ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same
df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d})
df_main['ismatched'], df_main['ID'] = ismatched, idx
df_with_dup.drop(columns=['GLON', 'GLAT'], inplace=True)
df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1)
df_wo_dup.query("ismatched == False", inplace=True)
df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True)
df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan)
if output_name is not None:
df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False)
df_main = df_wo_dup.copy()
if plot == True and output_name is not None:
fig = plt.figure(figsize=(8,8), tight_layout=False)
ax = fig.add_subplot(111)
ax.set_facecolor('white')
ax.grid(True, color='grey', lw=0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20)
ax.set_ylabel(output_name, fontsize=20)
ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400)
ax.axvline(tol, color='k', linestyle='--')
ax.set_xlim(0, 2*tol)
plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False)
plt.show()
plt.close()
return df_main
def create_circular_mask(self, h, w, center, ang_center, radius):
"""Takes a list of center positions and returns a segmentation mask with circulat masks at the center's
position.
Args:
h (int): patch height.
w (int): patch width.
center (list of tuples): In pixels. List of tupples containing center coordinates to mask.
ang_center (list of tuples): In ICRS. Same as center
radius ([type]): In arcmin. Disk radius for mask
Returns:
np.ndarray: ndarray with shape (h,w) filled with zeros except at centers position where circular masks
with size radius are equal to one.
"""
if radius is None:
size_distribution = fits.open(self.path + 'catalogs/exp_joined_ami_carma_plck_psz1_psz2_act_spt_YT.fits')[1].data['T500']
heights, bins = np.histogram(size_distribution, bins=8, density=False, range=[0,15])
heights = heights/sum(heights)
bins = bins[1:]
radius = np.random.choice(bins, p=heights)/self.pixsize
else:
radius = radius/self.pixsize
Y, X = np.ogrid[:h, :w]
mask = np.zeros((h,w))
count = 0
ra, dec = [], []
for i,c in enumerate(center):
if np.isnan(c[0]):
continue
elif np.isnan(c[1]):
continue
else:
dist_from_center = np.sqrt((X - int(c[0]))**2 + (Y - int(c[1]))**2)
mask += (dist_from_center <= radius).astype(int)
is_all_zero = np.all(((dist_from_center <= radius).astype(int) == 0))
if is_all_zero == False:
count += 1
ra.append(ang_center[i][0])
dec.append(ang_center[i][1])
return np.where(mask > 1, 1, mask), | |
<filename>src/model.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to build and run the tensorflow graph for the sequence-to-sequence model"""
import os
import time
import numpy as np
import tensorflow as tf
from attention_decoder import attention_decoder
from tensorflow.contrib.tensorboard.plugins import projector
from nltk.translate.bleu_score import sentence_bleu
#### <EMAIL> adding rouge library
from rouge import rouge
from rouge_tensor import rouge_l_fscore
import data
from scipy.sparse import lil_matrix
from dqn import DQN
from replay_buffer import ReplayBuffer
from replay_buffer import Transition
from sklearn.preprocessing import OneHotEncoder
FLAGS = tf.app.flags.FLAGS
class SummarizationModel(object):
"""A class to represent a sequence-to-sequence model for text summarization. Supports both baseline mode, pointer-generator mode, and coverage"""
def __init__(self, hps, vocab):
self._hps = hps
self._vocab = vocab
def reward_function(self, summary, reference):
"""Calculate the reward between the reference and summary.
Args:
reference: A list of ids representing the ground-truth data
summary: A list of ids representing the model generated data
Returns:
A single value representing the evaluation value for reference and summary
"""
if 'rouge' in self._hps.reward_function:
return rouge([summary],[reference])[self._hps.reward_function]
else:
return sentence_bleu([reference.split()],summary.split(),weights=(0.25,0.25,0.25,0.25))
def variable_summaries(self, var_name, var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries_{}'.format(var_name)):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def _add_placeholders_common(self):
"""Add common placeholders to the graph. These are entry points for any input data."""
hps = self._hps
self._eta = tf.placeholder(tf.float32, None, name='eta')
self._zeta = tf.placeholder(tf.float32, None, name='zeta')
if FLAGS.embedding:
self.embedding_place = tf.placeholder(tf.float32, [self._vocab.size(), hps.emb_dim])
if FLAGS.scheduled_sampling:
self._sampling_probability = tf.placeholder(tf.float32, None, name='sampling_probability')
self._alpha = tf.placeholder(tf.float32, None, name='alpha')
def _add_placeholders(self):
"""Add placeholders for full dataset to the graph. These are entry points for any input data."""
hps = self._hps
# encoder part
self._enc_batch = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch')
self._enc_lens = tf.placeholder(tf.int32, [hps.batch_size], name='enc_lens')
self._enc_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='enc_padding_mask')
if FLAGS.pointer_gen:
self._enc_batch_extend_vocab = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch_extend_vocab')
self._max_art_oovs = tf.placeholder(tf.int32, [], name='max_art_oovs')
# decoder part
self._dec_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='dec_batch')
self._target_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='target_batch')
self._dec_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='dec_padding_mask')
if hps.mode == "decode":
if hps.coverage:
self.prev_coverage = tf.placeholder(tf.float32, [hps.batch_size, None], name='prev_coverage')
if hps.intradecoder:
self.prev_decoder_outputs = tf.placeholder(tf.float32, [None, hps.batch_size, hps.dec_hidden_dim], name='prev_decoder_outputs')
if hps.use_temporal_attention:
self.prev_encoder_es = tf.placeholder(tf.float32, [None, hps.batch_size, None], name='prev_encoder_es')
def _make_feed_dict(self, batch, just_enc=False):
"""Make a feed dictionary mapping parts of the batch to the appropriate placeholders.
Args:
batch: Batch object
just_enc: Boolean. If True, only feed the parts needed for the encoder.
"""
feed_dict = {}
feed_dict[self._enc_batch] = batch.enc_batch
feed_dict[self._enc_lens] = batch.enc_lens
feed_dict[self._enc_padding_mask] = batch.enc_padding_mask
if FLAGS.pointer_gen:
feed_dict[self._enc_batch_extend_vocab] = batch.enc_batch_extend_vocab
feed_dict[self._max_art_oovs] = batch.max_art_oovs
if not just_enc:
feed_dict[self._dec_batch] = batch.dec_batch
feed_dict[self._target_batch] = batch.target_batch
feed_dict[self._dec_padding_mask] = batch.dec_padding_mask
return feed_dict
def _add_encoder(self, emb_enc_inputs, seq_len, partial=False):
"""Add a single-layer bidirectional LSTM encoder to the graph.
Args:
emb_enc_inputs: A tensor of shape [batch_size, <=max_enc_steps, emb_size].
seq_len: Lengths of emb_enc_inputs (before padding). A tensor of shape [batch_size].
Returns:
encoder_outputs:
A tensor of shape [batch_size, <=max_enc_steps, 2*hidden_dim]. It's 2*hidden_dim because it's the concatenation of the forwards and backwards states.
fw_state, bw_state:
Each are LSTMStateTuples of shape ([batch_size,hidden_dim],[batch_size,hidden_dim])
"""
with tf.variable_scope('encoder') as scope:
#if self._hps.rl_training and partial:
# scope.reuse_variables()
cell_fw = tf.contrib.rnn.LSTMCell(self._hps.enc_hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self._hps.enc_hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, emb_enc_inputs,
dtype=tf.float32, sequence_length=seq_len,
swap_memory=True)
encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states
return encoder_outputs, fw_st, bw_st
def _reduce_states(self, fw_st, bw_st, partial=False):
"""Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder. This is needed because the encoder is bidirectional but the decoder is not.
Args:
fw_st: LSTMStateTuple with hidden_dim units.
bw_st: LSTMStateTuple with hidden_dim units.
Returns:
state: LSTMStateTuple with hidden_dim units.
"""
enc_hidden_dim = self._hps.enc_hidden_dim
dec_hidden_dim = self._hps.dec_hidden_dim
with tf.variable_scope('reduce_final_st') as scope:
#if self._hps.rl_training and partial:
# scope.reuse_variables()
# Define weights and biases to reduce the cell and reduce the state
w_reduce_c = tf.get_variable('w_reduce_c', [enc_hidden_dim * 2, dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
w_reduce_h = tf.get_variable('w_reduce_h', [enc_hidden_dim * 2, dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
bias_reduce_c = tf.get_variable('bias_reduce_c', [dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
bias_reduce_h = tf.get_variable('bias_reduce_h', [dec_hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
# Apply linear layer
old_c = tf.concat(axis=1, values=[fw_st.c, bw_st.c]) # Concatenation of fw and bw cell
old_h = tf.concat(axis=1, values=[fw_st.h, bw_st.h]) # Concatenation of fw and bw state
new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c) # Get new cell from old cell
new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h) # Get new state from old state
return tf.contrib.rnn.LSTMStateTuple(new_c, new_h) # Return new cell and state
def _add_decoder(self, emb_dec_inputs, embedding):
"""Add attention decoder to the graph. In train or eval mode, you call this once to get output on ALL steps. In decode (beam search) mode, you call this once for EACH decoder step.
Args:
emb_dec_inputs: inputs to the decoder (word embeddings). A list of tensors shape (batch_size, emb_dim)
embedding: embedding matrix (vocab_size, emb_dim)
Returns:
outputs: List of tensors; the outputs of the decoder
out_state: The final state of the decoder
attn_dists: A list of tensors; the attention distributions
p_gens: A list of tensors shape (batch_size, 1); the generation probabilities
coverage: A tensor, the current coverage vector
"""
hps = self._hps
cell = tf.contrib.rnn.LSTMCell(hps.dec_hidden_dim, state_is_tuple=True, initializer=self.rand_unif_init)
prev_coverage = self.prev_coverage if (hps.mode=="decode" and hps.coverage) else None # In decode mode, we run attention_decoder one step at a time and so need to pass in the previous step's coverage vector each time
prev_decoder_outputs = self.prev_decoder_outputs if (hps.intradecoder and hps.mode=="decode") else tf.stack([],axis=0)
prev_encoder_es = self.prev_encoder_es if (hps.use_temporal_attention and hps.mode=="decode") else tf.stack([],axis=0)
return attention_decoder(_hps=hps,
v_size=self._vocab.size(),
_max_art_oovs=self._max_art_oovs,
_enc_batch_extend_vocab=self._enc_batch_extend_vocab,
emb_dec_inputs=emb_dec_inputs,
target_batch=self._target_batch,
_dec_in_state=self._dec_in_state,
_enc_states=self._enc_states,
enc_padding_mask=self._enc_padding_mask,
dec_padding_mask=self._dec_padding_mask,
cell=cell,
embedding=embedding,
sampling_probability=self._sampling_probability if FLAGS.scheduled_sampling else 0,
alpha=self._alpha if FLAGS.E2EBackProp else 0,
unk_id=self._vocab.word2id(data.UNKNOWN_TOKEN),
initial_state_attention=(hps.mode=="decode"),
pointer_gen=hps.pointer_gen,
use_coverage=hps.coverage,
prev_coverage=prev_coverage,
prev_decoder_outputs=prev_decoder_outputs,
prev_encoder_es = prev_encoder_es)
def _add_emb_vis(self, embedding_var):
"""Do setup so that we can view word embedding visualization in Tensorboard, as described here:
https://www.tensorflow.org/get_started/embedding_viz
Make the vocab metadata file, then make the projector config file pointing to it."""
train_dir = os.path.join(FLAGS.log_root, "train")
vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
self._vocab.write_metadata(vocab_metadata_path) # write metadata file
summary_writer = tf.summary.FileWriter(train_dir)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = vocab_metadata_path
projector.visualize_embeddings(summary_writer, config)
def discount_rewards(self, r):
""" take a list of size max_dec_step * (batch_size, k) and return a list of the same size """
discounted_r = []
running_add = tf.constant(0, tf.float32)
for t in reversed(range(0, len(r))):
running_add = running_add * self._hps.gamma + r[t] # rd_t = r_t + gamma * r_{t+1}
discounted_r.append(running_add)
discounted_r = tf.stack(discounted_r[::-1]) # (max_dec_step, batch_size, k)
normalized_discounted_r = tf.nn.l2_normalize(discounted_r, axis=0)
return tf.unstack(normalized_discounted_r) # list of max_dec_step * (batch_size, k)
def intermediate_rewards(self, r):
""" take a list of size max_dec_step * (batch_size, k) and return a list of the same size
uses the intermediate reward as proposed by: R_t = r_t - r_{t-1} """
intermediate_r = []
intermediate_r.append(r[0])
for t in range(1, len(r)):
intermediate_r.append(r[t]-r[t-1])
return intermediate_r # list of max_dec_step * (batch_size, k)
def _add_seq2seq(self):
"""Add the whole sequence-to-sequence model to the graph."""
hps = self._hps
vsize = self._vocab.size() # size of the vocabulary
with tf.variable_scope('seq2seq'):
# Some initializers
self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)
self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)
# Add embedding matrix (shared by the encoder and decoder inputs)
with tf.variable_scope('embedding'):
if FLAGS.embedding:
embedding = tf.Variable(self.embedding_place)
else:
embedding = tf.get_variable('embedding', [vsize, hps.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
if hps.mode=="train": self._add_emb_vis(embedding) # add to tensorboard
emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)
emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)
# Add the encoder.
enc_outputs, fw_st, bw_st = self._add_encoder(emb_enc_inputs, self._enc_lens)
self._enc_states = enc_outputs
# Our encoder is bidirectional and our decoder is | |
import operator
import re
import sys
import sympy
import pyparsing
from chempy import Substance
from chempy import balance_stoichiometry
from chemsolve.element import Element
from chemsolve.element import SpecialElement
from chemsolve.compound import Compound
from chemsolve.compound import FormulaCompound
from chemsolve.utils.combustion import determine_main_compound
from chemsolve.utils.validation import assert_chemical_presence
from chemsolve.utils.warnings import ChemsolveDeprecationWarning
from chemsolve.utils.errors import (
InvalidElementError, InvalidCompoundError, InvalidReactionError
)
__all__ = ['Reaction', 'CombustionTrain']
try:
import periodictable as pt
except ModuleNotFoundError:
print("The module periodictable has not been installed.")
except ImportError:
print("The module periodictable could not be found (may have not been installed).")
class Reaction(object):
"""
Stores a balanced/unbalanced chemical reaction.
INPUTS:
Takes in Compound objects, either predefined or defined for the purposes of the reaction.
Reactants/Products are split by "-->".
CALCULATIONS:
Define the quantity of each compound (either moles or grams) directly in the Compound class of each object.
The class will directly use these for its calculations.
If you want to use the calculation methods, you have to set the calculation booleans to True.
Otherwise, it will force you to enter values for the moles/grams of each compound even if you don't have them.
"""
def __init__(self, *args, reactants = (), products = (), lim_calc = False, main_reactant = None, **kwargs):
self.lim_calc = lim_calc
self.reactants = []
self.products = []
self._original_reaction = ""
self._reactant_store = []
self._product_store = []
assert_chemical_presence(self._reactant_store, self._product_store)
temp = self.reactants
temp2 = self._reactant_store
if args: # Adding reactants/products through *args is deprecated, to be removed in v2.0.0.
# Warn of future deprecation.
ChemsolveDeprecationWarning("Adding compounds to a Reaction using *args is deprecated and will be "
"removed in v2.0.0. Start using the `reactants` and `products` arguments.",
future_version = 'bypass')
for compound in args:
if isinstance(compound, str):
self._original_reaction += str("--> ")
temp = self.products
temp2 = self._product_store
else:
temp2.append(compound)
self._original_reaction += str(compound.__str__() + " ")
if compound != args[-1] and not isinstance(args[args.index(compound) + 1], str):
self._original_reaction += str("+ ")
if not isinstance(compound, (Element, Compound)):
raise TypeError("The object " + str(compound) + " is not of type "
"Compound or related subclasses, please redefine it.")
else:
temp.append(compound.__repr__())
else:
self._initialize_reaction(reactants, products)
self._balanced = self._balance()
self.balanced_reaction = self.balanced_display()
self.limiting_reactant = self.get_limiting_reactant(self.lim_calc)
self.coefficient_sum = self.get_coefficient_sum()
if main_reactant:
self.main_reactant = main_reactant
def __str__(self):
return self._balanced
def __contains__(self, item):
# Determine if a compound is in the reaction.
if isinstance(item, Compound):
if repr(item) in self.reactants or repr(item) in self.products:
return True
elif isinstance(item, str):
if item in self.reactants or item in self.products:
return True
return False
def _initialize_reaction(self, reactants, products):
"""Internal method to initialize the class reaction from inputs."""
# Add reactants to list of reactants and original reaction string.
for reactant in reactants:
self._original_reaction += str(str(reactant) + " ")
if reactant != reactants[-1]:
self._original_reaction += str("+ ")
self._reactant_store.append(reactant)
self.reactants.append(repr(reactant))
# Add the arrow differentiating reactants and products (to the printed reaction).
self._original_reaction += "--> "
# Add products to list of products and original reaction string.
for product in products:
self._original_reaction += str(str(product) + " ")
if product != products[-1]:
self._original_reaction += str("+ ")
self._product_store.append(product)
self.products.append(repr(product))
@property
def original_reaction(self):
# Return the original (non-balanced) reaction.
return self._original_reaction
@classmethod
def fromCombustion(cls, *args, hydrocarbon = True, othercompound = False, sample_mass = 0.0, **kwargs):
"""
An implementation of a class method representing a combustion reaction.
"""
if not hydrocarbon:
if sample_mass == 0.0:
raise AttributeError("You must provide the total mass of the product in order "
"to determine the quantity of oxygen.")
else:
sample_mass = round(float(sample_mass), 4)
if len(kwargs) > 3:
raise ValueError("The Compound.fromCombustion method currently doesn't support more "
"than one additional compound.")
product_store = []
products = []
for compound in args:
try:
compound.mass
except AttributeError as exception:
print("You must provide the masses of the compounds as acquired in the Compound definition.")
raise exception
else:
product_store.append(compound)
products.append(repr(compound))
# Calculate main reactant.
main_reactant = None
if hydrocarbon:
if othercompound:
main_reactant = Compound(determine_main_compound(product_store, sample_mass, hydrocarbon = hydrocarbon,
othercompound = True))
else:
main_reactant = Compound(determine_main_compound(product_store, sample_mass,
hydrocarbon = hydrocarbon))
if not hydrocarbon:
main_reactant = Compound(determine_main_compound(product_store, sample_mass, hydrocarbon = hydrocarbon),
grams = sample_mass)
return cls(main_reactant, Compound("O2"), "-->", *product_store, main_reactant = main_reactant)
@staticmethod
def from_combustion(*args, hydrocarbon = True, othercompound = False, sample_mass = 0.0, **kwargs):
"""A transitional method for v2.0.0, will eventually replace fromCombustion."""
return Reaction.fromCombustion(*args, hydrocarbon = hydrocarbon, othercompound = othercompound,
sample_mass = sample_mass, **kwargs)
@classmethod
def from_string(cls, reaction_string, lim_calc = False, **kwargs):
"""Instantiates a reaction from a string containing the reaction.
Given a reaction string, e.g. 2H2 + O2 = 2H2O, this class method will
split the reaction into its relevant compounds and instantiate the
reaction as necessary.
This is merely a convenience method if it is easier to simply write a
string containing the reaction instead of using the traditional method.
Examples
--------
Instantiate the reaction between hydrogen and iodine.
>>> reaction = Reaction.from_string("H + I = HI")
Parameters
----------
reaction_string: str
The string containing the reaction. Note that the reactant/product
delimiter should be one of: '->', '-->', or '=', and the individual
compound delimeter should be one of: '+', '&'.
lim_calc: bool
The same as the regular instantiation of a reaction class, if True then
the class will calculate the reaction's limiting reactant.
Returns
-------
An instantiated reaction class.
"""
# Create holder lists for reactants and products.
reactants = []
products = []
holder = reactants
# Get each of the compounds/delimiters and parse the reaction.
values = reaction_string.split(" ")
try: # Wrap all of the errors in a try/except block to raise custom errors.
for value in values:
try:
holder.append(Compound(value))
except InvalidCompoundError:
if value not in ['+', '&', '->', '-->', '=']:
# Check for general errors and raise them.
raise ValueError(f"Received an invalid delimiter: '{value}'.")
else: # Otherwise, check for specific cases.
if value in ['+', '&']:
# We have received a compound delimiter, just continue.
continue
else:
# We have received a reactant/product delimiter, so
# switch the list from reactants to products.
holder = products
except ValueError:
raise InvalidReactionError("Received an invalid reaction, see traceback "
"for the specific cause of the issue.")
# Instantiate the class.
return cls(reactants = reactants, products = products,
lim_calc = lim_calc, **kwargs)
@property
def get_reactants(self):
"""Returns the reactants of the reaction."""
return self.reactants
@property
def get_products(self):
"""Returns the products of the reaction."""
return self.products
@property
def balanced(self):
"""Returns the balanced reaction."""
return self._balanced
def _balance(self):
"""Internal method, returns ordered dictionaries containing
the balanced reaction's reactants and products."""
try:
return balance_stoichiometry({f for f in self.reactants}, {f for f in self.products})
except pyparsing.ParseException:
raise InvalidReactionError("Received an invalid reaction, there is a reactant "
"which does not appear on the products side, or a product"
"which does not appear on the reactants side.",
property_type = "bypass")
def balanced_display(self):
"""Returns a displayable version of the balanced reaction."""
tempstr = ""
e1 = list(self._balanced[0].items())
count = 0
for reactant in self.reactants:
count += 1
for item in self._balanced[0]:
if str(item) == str(reactant):
self._balanced[0][item] = int(self._balanced[0][item])
if not self._balanced[0][item] == 1:
tempstr += str(self._balanced[0][item])
tempstr += str(Compound(str(item)).__str__()) + str(" ")
if count < len(e1):
tempstr += str("+ ")
tempstr += str("--> ")
#----
e2 = list(self._balanced[1].items())
count = 0
for product in self.products:
count += 1
for item in self._balanced[1]:
if str(item) == str(product):
self._balanced[1][item] = int(self._balanced[1][item])
if not self._balanced[1][item] == 1:
tempstr += str(self._balanced[1][item])
tempstr += str(Compound(str(item)).__str__()) + str(" ")
if count < len(e2):
tempstr += str("+ ")
return tempstr
def get_coefficient_sum(self):
"""Returns the sum of the coefficients of the reactants and products in the reaction."""
self.coefficient_sum = 0
for item in self._balanced[0]:
self.coefficient_sum += int(self._balanced[0][item])
for item in self._balanced[1]:
self.coefficient_sum += int(self._balanced[1][item])
return self.coefficient_sum
def get_limiting_reactant(self, lim_calc = False):
"""Returns the limiting reactant of the chemical reaction.
Uses the moles/grams values from the Compound objects."""
if not lim_calc: # If we do not want to calculate the limiting reactant.
return False
# Create the initial holder objects.
lim_reac = None
moles = 0
org_moles = 0
# Choose a product to test with.
product = (next(iter((self._balanced[1]).items())))[1]
# Iterate over the different reactants.
for item in | |
<reponame>carbo-T/TF<filename>img_proc/multiThread.py
import tensorflow as tf
import numpy as np
import threading
import time
import os
import preprocessing
import mnist_inference
import matplotlib.pyplot as plt
# ********** queue operation ***********
def queue_op():
# FIFOQueue & RandomShuffleQueue
# maximum 2 int elements
q = tf.FIFOQueue(2, "int32")
init = q.enqueue_many(([0, 10],))
x = q.dequeue()
y = x + 1
q_inc = q.enqueue([y])
with tf.Session() as sess:
init.run()
for _ in range(5):
# including dequeue, add 1, enqueue
v, _ = sess.run([x, q_inc])
# print(v)
# tf.train.Coordinator enable thread synchronization
# request_stop, should_stop, join
def MyLoop(coord, worker_id):
while not coord.should_stop():
if np.random.rand() < 0.1:
print("Stoping from id: %d" % worker_id)
coord.request_stop()
else:
time.sleep(0.5)
print("Working on id: %d" % worker_id)
time.sleep(1)
# coord = tf.train.Coordinator()
# threads = [
# threading.Thread(target=MyLoop, args=(coord, i), ) for i in range(5)
# ]
# # start all threads
# for t in threads:
# t.start()
# # wait for all threads to stop
# coord.join(threads)
# ******** tf.QueueRunner **********
def threads_mgmt():
queue = tf.FIFOQueue(100, 'float')
enqueue_op = queue.enqueue([tf.random_normal([1])])
# create 5 threads
qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)
# added to default collection tf.GraphKeys.QUEUE_RUNNERS,
# start_queue_runner() will start all threads in the specified collection
tf.train.add_queue_runner(qr)
out_tensor = queue.dequeue()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(15):
print(sess.run(out_tensor)[0])
time.sleep(0.2)
coord.request_stop()
coord.join(threads)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# simulate big data situation
def generate_files():
# how many files to write
num_shard = 3
# how much data in a file
instances_per_shard = 6
record_path = "record/"
try:
os.mkdir(record_path)
except:
print("directory already exist")
# data 0000n-of-0000m, n means file No., m means how many files the data has been stored as
for i in range(num_shard):
filename = (os.path.join(record_path, "data.tfrecords-%.5d-of-%.5d" % (i, num_shard)))
writer = tf.python_io.TFRecordWriter(filename)
for j in range(instances_per_shard):
example = tf.train.Example(features=tf.train.Features(feature={
'i': _int64_feature(i),
'j': _int64_feature(j)
}))
writer.write(example.SerializeToString())
writer.close()
def read_files():
# 获取文件列表
record_path = "record/"
files = tf.train.match_filenames_once(os.path.join(record_path, "data.tfrecords-*"))
# 1 epochs means 1 cycle
filename_queue = tf.train.string_input_producer(files, num_epochs=1, shuffle=True)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'i': tf.FixedLenFeature([], tf.int64),
'j': tf.FixedLenFeature([], tf.int64),
}
)
with tf.Session() as sess:
# match_filename_once() needs to be initialized
tf.local_variables_initializer().run()
print(sess.run(files))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(18):
print(sess.run([features['i'], features['j']]))
coord.request_stop()
coord.join(threads)
return features
def batch_example():
features = read_files()
print("____ end of read files _____")
example, label = features['i'], features['j']
batch_size = 3
# queue capacity, larger means more memory usage, smaller means can be blocked and less efficient
capacity = 1000 + 3 * batch_size
# example_batch, label_batch = tf.train.batch([example, label], batch_size=batch_size, capacity=capacity)
# min_after_dequeue represent the num of data needed for dequeue operation which is blocked when the num inadequate
example_batch, label_batch = tf.train.shuffle_batch([example, label], batch_size=batch_size, capacity=capacity,
min_after_dequeue=6)
with tf.Session() as sess:
tf.local_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# combine
for i in range(6):
curr_exp_b, curr_lab_b = sess.run([example_batch, label_batch])
print(curr_exp_b, curr_lab_b, "lll")
coord.request_stop()
coord.join(threads)
# ************* use inceptionV3 data to generate data for training **************
def write_record(name, image, label):
writer = tf.python_io.TFRecordWriter(name)
for index in range(len(image)):
# convert img to str
image_raw = image[index].tobytes()
print(label[index])
print(image[index].shape[0])
print(image[index].shape[1])
print(image[index].shape[2])
# create Example Protocol Buffer
example = tf.train.Example(features=tf.train.Features(feature={
'image': _bytes_feature(image_raw),
'label': _int64_feature(label[index]),
'height': _int64_feature(image[index].shape[0]),
'width': _int64_feature(image[index].shape[1]),
'channels': _int64_feature(image[index].shape[2]),
}))
writer.write(example.SerializeToString())
writer.close()
def generate_record(output_filename="output_flower.tfrecords"):
input_data = "../inceptionv3/preprocess/validation_flower.npy"
processed_data = np.load(input_data, allow_pickle=True)
training_images = processed_data[0]
training_labels = processed_data[1]
input_data = "../inceptionv3/preprocess/test_flower.npy"
processed_data = np.load(input_data, allow_pickle=True)
validation_images = processed_data[0]
validation_labels = processed_data[1]
write_record("output_flower_train.tfrecord", training_images, training_labels)
write_record("output_flower_validation.tfrecord", validation_images, validation_labels)
print("training_images: " + str(len(training_labels)))
print("validation_images: " + str(len(validation_labels)))
def read_record(file_regex="record/output_flower_*.tfrecord"):
files = tf.train.match_filenames_once(file_regex)
filename_queue = tf.train.string_input_producer(files, shuffle=False)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64)
})
image, label = features['image'], tf.cast(features['label'], tf.int32)
height, width = tf.cast(features['height'], tf.int32), tf.cast(features['width'], tf.int32)
channels = tf.cast(features['channels'], tf.int32)
# image decoding
decoded_img = tf.decode_raw(image, tf.float32)
# decoded_img.set_shape(268203)
decoded_img = tf.reshape(decoded_img,
shape=[height, width, channels])
return decoded_img, label
def tfrecord_parser(record):
features = tf.parse_single_example(
record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64)
})
image, label = features['image'], tf.cast(features['label'], tf.int32)
height, width = tf.cast(features['height'], tf.int32), tf.cast(features['width'], tf.int32)
channels = tf.cast(features['channels'], tf.int32)
# image decoding
decoded_img = tf.decode_raw(image, tf.uint8)
# decoded_img.set_shape(268203)
# decoded_img.set_shape([height, width, channels])
decoded_img = tf.reshape(decoded_img,
shape=[height, width, channels])
return decoded_img, label
# ** wrong image dtype may cause " Input to reshape is a tensor with xxx values, but the requested shape has xxx "
# such as uint8 and float32, float32 is usually used for training, whereas uint8 more likely used for image storage
# ** must have channel 3 but has channels 1 problem is caused by image preprocessing
def process_data(doTrain=True):
image_size = 28
num_channels = 1
num_of_labels = 10
min_after_dequeue = 2000
shuffle_buffer = 10000
num_epochs = 50 # same effect as training_rounds
batch_size = 1000
training_rounds = 5000
training_images = 55000 # 362
validation_images = 5000 # 367
test_images = 10000
train_files = tf.train.match_filenames_once("record/mnist_train.tfrecord")
validation_files = tf.train.match_filenames_once("record/mnist_validation.tfrecord")
test_files = tf.train.match_filenames_once("record/mnist_test.tfrecord")
# ********** define neural network structure and forward propagation **********
learning_rate_base = 0.8
learning_rate_decay = 0.99
regularization_rate = 0.0001
moving_average_decay = 0.99
x = tf.placeholder(tf.float32, [None,
image_size,
image_size,
num_channels], name='x-input')
y_ = tf.placeholder(tf.float32, [None], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)
y = mnist_inference.inference(x, True, regularizer)
global_step = tf.Variable(0, trainable=False)
# moving average, cross entropy, loss function with regularization and learning rate
with tf.name_scope("moving_average"):
variable_average = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)
variable_average_op = variable_average.apply(tf.trainable_variables())
# calc loss
with tf.name_scope("loss_function"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.cast(y_, tf.int32))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
with tf.name_scope("train_step"):
learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step,
training_images / batch_size,
learning_rate_decay
)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variable_average_op]):
train_op = tf.no_op(name='train')
# define accuracy
with tf.name_scope("accuracy_calc"):
prediction = tf.argmax(y, 1)
answer = tf.cast(y_, tf.int64)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.cast(y_, tf.int64))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
# test_result = list(range(int(training_rounds / 500)))
# # ********** original tfrecord data operator **********
# decoded_img, label = read_record("record/mnist_train.tfrecord")
# # img preprocessing
# # distorted_img = tf.image.resize_images(decoded_img, [image_size, image_size], method=0)
# distorted_img = preprocessing.process_for_train(decoded_img, image_size, image_size, None, 1)
# distorted_img.set_shape([image_size, image_size, num_channels])
# # print(distorted_img.shape)
#
# # create batch
# total_sample = training_images + validation_images
# capacity = min_after_dequeue + batch_size * 3
# image_batch, label_batch = tf.train.shuffle_batch([distorted_img, label], batch_size=batch_size,
# capacity=capacity, num_threads=64,
# min_after_dequeue=min_after_dequeue)
# ********** tfrecord dataset **********
dataset = tf.data.TFRecordDataset(train_files)
dataset = dataset.map(tfrecord_parser)
dataset = dataset.map(
lambda image, label: (
preprocessing.process_for_train(tf.image.convert_image_dtype(image, dtype=tf.float32), image_size,
image_size, None, 1), label
# tf.image.resize_images(tf.image.convert_image_dtype(image, dtype=tf.float32), [image_size, image_size]), label
))
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
dataset = dataset.repeat(num_epochs)
# match_filename_once has similar mechanism as placeholder
iterator = dataset.make_initializable_iterator()
image_batch, label_batch = iterator.get_next()
# ********** validation dataset **********
validation_dataset = tf.data.TFRecordDataset(validation_files)
validation_dataset = validation_dataset.map(tfrecord_parser).map(
lambda image, label: (
tf.image.resize_images(tf.image.convert_image_dtype(image, dtype=tf.float32), [image_size, image_size]),
label
))
validation_dataset = validation_dataset.batch(validation_images)
validation_dataset = validation_dataset.repeat(None)
validation_iterator = validation_dataset.make_initializable_iterator()
validation_image_batch, validation_label_batch = validation_iterator.get_next()
# ********** test dataset **********
test_dataset = tf.data.TFRecordDataset(test_files)
test_dataset = test_dataset.map(tfrecord_parser).map(
lambda image, label: (
tf.image.resize_images(tf.image.convert_image_dtype(image, dtype=tf.float32), [image_size, image_size]),
label
))
test_dataset = test_dataset.batch(test_images)
test_iterator = test_dataset.make_initializable_iterator()
test_image_batch, test_label_batch = test_iterator.get_next()
# logit = inference(image_batch)
# loss = calc_loss(logit, label_batch)
# train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# initialize persistence class
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
merged = tf.summary.merge_all()
with tf.Session(config=config) as sess:
writer = tf.summary.FileWriter("log", sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# print(sess.run(tf.cast(features['label'], tf.int32)))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print("start training........")
# for i in range(training_rounds):
i = 0
step = 0
if doTrain:
sess.run(iterator.initializer)
sess.run(validation_iterator.initializer)
while True:
i += 1
try:
# img = sess.run(distorted_img)
# plt.imshow(img)
# plt.show()
xs, ys = sess.run([image_batch, label_batch])
# print(xs.shape)
# print(ys.shape)
if i % 200 == 0:
# config necessary info when training
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE
)
# record proto when training
run_metadata = tf.RunMetadata()
summary, _, loss_value, step = sess.run([merged, train_op, loss, global_step],
feed_dict={x: xs, y_: ys},
options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%03d' % i)
writer.add_summary(summary, i)
vxs, vys = sess.run([validation_image_batch, validation_label_batch])
p, a, accuracy_score = sess.run([prediction, answer, accuracy], feed_dict={x: vxs, y_: vys})
print("prediction: \t%s, \nanswer: \t\t%s" % (p[0:10], a[0:10]))
print("after %d steps, loss: %.3f, accuracy: | |
<gh_stars>0
""" XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# MOD INFO
XFW_MOD_INFO = {
# mandatory
'VERSION': '0.9.19.0.1',
'URL': 'http://www.modxvm.com/',
'UPDATE_URL': 'http://www.modxvm.com/en/download-xvm/',
'GAME_VERSIONS': ['0.9.19.0.1'],
# optional
}
#####################################################################
# imports
import traceback
import sys
from math import degrees, pi
import BigWorld
import game
import gui.shared.tooltips.vehicle as tooltips_vehicle
from gun_rotation_shared import calcPitchLimitsFromDesc
from helpers import i18n
from gui import g_htmlTemplates
from gui.shared import g_eventBus
from gui.shared.formatters import text_styles
from gui.shared.tooltips import formatters
from gui.shared.gui_items import GUI_ITEM_TYPE
from gui.Scaleform.locale.MENU import MENU
from gui.shared.items_parameters import formatters as param_formatter
from gui.shared.items_parameters.formatters import measureUnitsForParameter
from gui.shared.items_parameters.params_helper import getParameters as getParameters_helper
from gui.shared.items_parameters.params_helper import idealCrewComparator as idealCrewComparator_helper
from gui.shared.utils.requesters.ItemsRequester import ItemsRequester
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.Scaleform.framework.ToolTip import ToolTip
from gui.Scaleform.daapi.view.battle.shared.consumables_panel import ConsumablesPanel
from gui.Scaleform.daapi.view.meta.ModuleInfoMeta import ModuleInfoMeta
from gui.shared.tooltips.module import ModuleBlockTooltipData
from helpers import dependency
from skeletons.gui.shared import IItemsCache
from xfw import *
import xvm_main.python.config as config
from xvm_main.python.consts import *
from xvm_main.python.logger import *
from xvm_main.python.vehinfo import _getRanges
from xvm_main.python.vehinfo_tiers import getTiers
from xvm_main.python.xvm import l10n
#####################################################################
# globals
shells_vehicles_compatibility = {}
carousel_tooltips_cache = {}
styles_templates = {}
toolTipDelayIntervalId = None
weightTooHeavy = False
p_replacement = None # will be something like <font size... color...>
#####################################################################
# initialization/finalization
def start():
g_eventBus.addListener(XVM_EVENT.CONFIG_LOADED, tooltips_clear_cache)
BigWorld.callback(0, start)
@registerEvent(game, 'fini')
def fini():
g_eventBus.removeListener(XVM_EVENT.CONFIG_LOADED, tooltips_clear_cache)
#####################################################################
# handlers
# tooltip delay to resolve performance issue
@overrideMethod(ToolTip, 'onCreateComplexTooltip')
def ToolTip_onCreateComplexTooltip(base, self, tooltipId, stateType):
# log('ToolTip_onCreateComplexTooltip')
_createTooltip(self, lambda:_onCreateComplexTooltip_callback(base, self, tooltipId, stateType))
# tooltip delay to resolve performance issue
# suppress carousel tooltips
@overrideMethod(ToolTip, 'onCreateTypedTooltip')
def ToolTip_onCreateTypedTooltip(base, self, type, *args):
# log('ToolTip_onCreateTypedTooltip')
try:
if type == TOOLTIPS_CONSTANTS.CAROUSEL_VEHICLE and config.get('hangar/carousel/suppressCarouselTooltips'):
return
except Exception as ex:
err(traceback.format_exc())
_createTooltip(self, lambda:_onCreateTypedTooltip_callback(base, self, type, *args))
# adds delay for tooltip appearance
def _createTooltip(self, func):
try:
global toolTipDelayIntervalId
self.xvm_hide()
tooltipDelay = config.get('tooltips/tooltipsDelay', 0.4)
toolTipDelayIntervalId = BigWorld.callback(tooltipDelay, func)
except Exception as ex:
err(traceback.format_exc())
def _onCreateTypedTooltip_callback(base, self, type, *args):
# log('ToolTip_onCreateTypedTooltip_callback')
global toolTipDelayIntervalId
toolTipDelayIntervalId = None
base(self, type, *args)
def _onCreateComplexTooltip_callback(base, self, tooltipId, stateType):
# log('_onCreateComplexTooltip_callback')
global toolTipDelayIntervalId
toolTipDelayIntervalId = None
base(self, tooltipId, stateType)
def _ToolTip_xvm_hide(self):
# log('_ToolTip_xvm_hide')
global toolTipDelayIntervalId
if toolTipDelayIntervalId is not None:
BigWorld.cancelCallback(toolTipDelayIntervalId)
toolTipDelayIntervalId = None
ToolTip.xvm_hide = _ToolTip_xvm_hide
#############################
# carousel events
@overrideMethod(tooltips_vehicle.VehicleInfoTooltipData, '_packBlocks')
def VehicleInfoTooltipData_packBlocks(base, self, *args, **kwargs):
result = base(self, *args, **kwargs)
result = [item for item in result if item.get('data', {}).get('blocksData')]
return result
@overrideMethod(tooltips_vehicle.SimplifiedStatsBlockConstructor, 'construct')
def SimplifiedStatsBlockConstructor_construct(base, self):
if config.get('tooltips/hideSimplifiedVehParams'):
return []
else:
return base(self)
@overrideMethod(tooltips_vehicle.AdditionalStatsBlockConstructor, 'construct')
def AdditionalStatsBlockConstructor_construct(base, self):
if config.get('tooltips/hideBottomText'):
return []
else:
return base(self)
@overrideMethod(text_styles, "_getStyle")
def text_styles_getStyle(base, style, ctx = None):
if ctx is None:
ctx = {}
try:
if style not in styles_templates:
template = g_htmlTemplates['html_templates:lobby/textStyle'][style].source
template_string = template if type(template) is str else template['text']
if "size='14'" in template_string and "face='$FieldFont'" in template_string:
template_string = template_string \
.replace("size='14'", "size='%s'" % config.get('tooltips/fontSize', 14)) \
.replace("face='$FieldFont'", "face='%s'" % config.get('tooltips/fontName', '$FieldFont'))
styles_templates[style] = template_string if type(template) is str else {'text': template_string}
if type(styles_templates[style]) is str:
return styles_templates[style]
else:
if ctx:
return styles_templates[style]['text'] % ctx
else:
return base(style, ctx)
except Exception as ex:
err(traceback.format_exc())
return base(style, ctx)
def tooltip_add_param(self, result, param0, param1):
result.append(formatters.packTextParameterBlockData(name=text_styles.main(param0), value=text_styles.stats(param1), valueWidth=107, padding=formatters.packPadding(left=self.leftPadding, right=self.rightPadding)))
def tooltip_with_units(value, units):
return '%s %s' % (value, text_styles.standard(units))
def getParameterValue(paramName):
return text_styles.main(i18n.makeString(MENU.tank_params(paramName))) + text_styles.standard(measureUnitsForParameter(paramName))
def formatNumber(value):
if value > 99:
value = round(value)
elif value > 9:
value = round(value, 1)
else:
value = round(value, 2)
return str(BigWorld.wg_getNiceNumberFormat(value))
# replace <h>text1 <p>text2</p></h> with: text1 text_styles.standard(text2)
def replace_p(text):
global p_replacement
if not p_replacement:
p_replacement = text_styles.standard('').split('>', 1)[0] + '>'
return text.replace('<p>', p_replacement).replace('</p>', '</font>').replace('<h>', '').replace('</h>', '')
# overriding tooltips for tanks in hangar, configuration in tooltips.xc
@overrideMethod(tooltips_vehicle.CommonStatsBlockConstructor, 'construct')
def CommonStatsBlockConstructor_construct(base, self):
try:
self.leftPadding = -15
vehicle = self.vehicle
cache_result = carousel_tooltips_cache.get(vehicle.intCD)
if cache_result:
return cache_result
result = []
if not config.get('tooltips/hideSimplifiedVehParams'):
result.append(formatters.packTitleDescBlock(text_styles.middleTitle(i18n.makeString(TOOLTIPS.TANKCARUSEL_MAINPROPERTY)), padding=formatters.packPadding(left=0, right=self.rightPadding, bottom=8)))
params = self.configuration.params
veh_descr = vehicle.descriptor
gun = vehicle.gun.descriptor
turret = vehicle.turret.descriptor
comparator = idealCrewComparator_helper(vehicle)
vehicleCommonParams = getParameters_helper(vehicle)
veh_type_inconfig = vehicle.type.replace('AT-SPG', 'TD')
clipGunInfoShown = False
premium_shells = {}
for shell in vehicle.shells:
premium_shells[shell.intCompactDescr] = shell.isPremium
if params:
values = config.get('tooltips/%s' % veh_type_inconfig)
if values and len(values):
params_list = values # overriding parameters
else:
params_list = self.PARAMS.get(vehicle.type, 'default') # original parameters
paramInfo = None
for paramName in params_list:
if paramName is None:
continue
if paramName == 'rateOfFire':
paramName = 'reloadTime'
elif paramName == 'traverseLimits':
paramName = 'gunYawLimits' if 'gunYawLimits' in vehicleCommonParams else 'turretYawLimits'
elif paramName == 'radioRange':
paramName = 'radioDistance'
elif paramName == 'reloadTimeSecs' and vehicle.gun.isClipGun():
paramName = 'clipFireRate'
elif paramName == 'turretRotationSpeed' and not vehicle.hasTurrets:
paramName = 'gunRotationSpeed'
if paramName in vehicleCommonParams:
paramInfo = comparator.getExtendedData(paramName)
if paramName == 'turretArmor' and not vehicle.hasTurrets:
continue
#maxHealth
elif paramName == 'maxHealth':
tooltip_add_param(self, result, i18n.makeString('#menu:vehicleInfo/params/maxHealth'), formatNumber(veh_descr.maxHealth))
#battle tiers
elif paramName == 'battleTiers':
(minTier, maxTier) = getTiers(vehicle.level, vehicle.type, vehicle.name)
tooltip_add_param(self, result, l10n('Battle tiers'), '%s..%s' % (minTier, maxTier))
#explosionRadius
elif paramName == 'explosionRadius':
explosionRadiusMin = 999
explosionRadiusMax = 0
for shot in gun['shots']:
if 'explosionRadius' in shot['shell']:
if shot['shell']['explosionRadius'] < explosionRadiusMin:
explosionRadiusMin = shot['shell']['explosionRadius']
if shot['shell']['explosionRadius'] > explosionRadiusMax:
explosionRadiusMax = shot['shell']['explosionRadius']
if explosionRadiusMax == 0: # no HE
continue
explosionRadius_str = formatNumber(explosionRadiusMin)
if explosionRadiusMin != explosionRadiusMax:
explosionRadius_str += '/%s' % gold_pad(formatNumber(explosionRadiusMax))
tooltip_add_param(self, result, getParameterValue(paramName), explosionRadius_str)
#shellSpeedSummary
elif paramName == 'shellSpeedSummary':
shellSpeedSummary_arr = []
for shot in gun['shots']:
shellSpeed_str = '%g' % round(shot['speed'] * 1.25)
if premium_shells[shot['shell']['compactDescr']]:
shellSpeed_str = gold_pad(shellSpeed_str)
shellSpeedSummary_arr.append(shellSpeed_str)
shellSpeedSummary_str = '/'.join(shellSpeedSummary_arr)
tooltip_add_param(self, result, tooltip_with_units(l10n('shellSpeed'), l10n('(m/sec)')), shellSpeedSummary_str)
#piercingPowerAvg
elif paramName == 'piercingPowerAvg':
piercingPowerAvg = formatNumber(veh_descr.shot['piercingPower'][0])
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgPiercingPower')), piercingPowerAvg)
#piercingPowerAvgSummary
elif paramName == 'piercingPowerAvgSummary':
piercingPowerAvgSummary_arr = []
for shot in gun['shots']:
piercingPower_str = formatNumber(shot['piercingPower'][0])
if premium_shells[shot['shell']['compactDescr']]:
piercingPower_str = gold_pad(piercingPower_str)
piercingPowerAvgSummary_arr.append(piercingPower_str)
piercingPowerAvgSummary_str = '/'.join(piercingPowerAvgSummary_arr)
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgPiercingPower')), piercingPowerAvgSummary_str)
#damageAvgSummary
elif paramName == 'damageAvgSummary':
damageAvgSummary_arr = []
for shot in gun['shots']:
damageAvg_str = formatNumber(shot['shell']['damage'][0])
if premium_shells[shot['shell']['compactDescr']]:
damageAvg_str = gold_pad(damageAvg_str)
damageAvgSummary_arr.append(damageAvg_str)
damageAvgSummary_str = '/'.join(damageAvgSummary_arr)
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgDamage')), damageAvgSummary_str)
#magazine loading
# elif (paramName == 'reloadTimeSecs' or paramName == 'rateOfFire') and vehicle.gun.isClipGun():
# if clipGunInfoShown:
# continue
# (shellsCount, shellReloadingTime) = gun['clip']
# reloadMagazineTime = gun['reloadTime']
# shellReloadingTime_str = formatNumber(shellReloadingTime)
# reloadMagazineTime_str = formatNumber(reloadMagazineTime)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/shellsCount')), shellsCount)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/shellReloadingTime')), shellReloadingTime_str)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/reloadMagazineTime')), reloadMagazineTime_str)
# clipGunInfoShown = True
#rate of fire
# elif paramName == 'rateOfFire' and not vehicle.gun.isClipGun():
# rateOfFire_str = formatNumber(60 / gun['reloadTime'])
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/reloadTime')), rateOfFire_str)
# gun traverse limits
# elif paramName == 'traverseLimits' and gun['turretYawLimits']:
# (traverseMin, traverseMax) = gun['turretYawLimits']
# traverseLimits_str = '%g..+%g' % (round(degrees(traverseMin)), round(degrees(traverseMax)))
# tooltip_add_param(self, result, l10n('traverseLimits'), traverseLimits_str)
# elevation limits (front)
# elif paramName == 'pitchLimits':
# (pitchMax, pitchMin) = calcPitchLimitsFromDesc(0, gun['pitchLimits'])
# pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
# tooltip_add_param(self, result, l10n('pitchLimits'), pitchLimits_str)
# elevation limits (side)
elif paramName == 'pitchLimitsSide':
if gun['turretYawLimits'] and abs(degrees(gun['turretYawLimits'][0])) < 89: continue # can't look aside 90 degrees
(pitchMax, pitchMin) = calcPitchLimitsFromDesc(pi / 2, gun['pitchLimits'])
pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
tooltip_add_param(self, result, l10n('pitchLimitsSide'), pitchLimits_str)
# elevation limits (rear)
elif paramName == 'pitchLimitsRear':
if gun['turretYawLimits']: continue # can't look back
(pitchMax, pitchMin) = calcPitchLimitsFromDesc(pi, gun['pitchLimits'])
pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
tooltip_add_param(self, result, l10n('pitchLimitsRear'), pitchLimits_str)
# shooting range
elif paramName == 'shootingRadius':
viewRange, shellRadius, artiRadius = _getRanges(turret, gun, vehicle.nationName, vehicle.type)
if vehicle.type == 'SPG':
tooltip_add_param(self, result, tooltip_with_units(l10n('shootingRadius'), l10n('(m)')), artiRadius)
elif shellRadius < 707:
tooltip_add_param(self, result, tooltip_with_units(l10n('shootingRadius'), l10n('(m)')), shellRadius)
#reverse max speed
elif paramName == 'speedLimits':
(speedLimitForward, speedLimitReverse) = veh_descr.physics['speedLimits']
speedLimits_str = str(int(speedLimitForward * 3.6)) + '/' + str(int(speedLimitReverse * 3.6))
tooltip_add_param(self, result, getParameterValue(paramName), speedLimits_str)
#turret rotation speed
# elif paramName == 'turretRotationSpeed' or paramName == 'gunRotationSpeed':
# if not vehicle.hasTurrets:
# paramName = 'gunRotationSpeed'
# turretRotationSpeed_str = str(int(degrees(veh_descr.turret['rotationSpeed'])))
# tooltip_add_param(self, result, tooltip_with_units(i18n.makeString('#menu:tank_params/%s' % paramName).rstrip(), i18n.makeString('#menu:tank_params/gps')), turretRotationSpeed_str)
#terrain resistance
elif paramName == 'terrainResistance':
resistances_arr = []
for key in veh_descr.chassis['terrainResistance']:
resistances_arr.append(formatNumber(key))
terrainResistance_str = '/'.join(resistances_arr)
tooltip_add_param(self, result, l10n('terrainResistance'), terrainResistance_str)
#radioRange
# elif paramName == 'radioRange':
# radioRange_str = '%s' % int(vehicle.radio.descriptor['distance'])
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/radioDistance')), radioRange_str)
#gravity
elif paramName == 'gravity':
gravity_str = formatNumber(veh_descr.shot['gravity'])
tooltip_add_param(self, result, l10n('gravity'), gravity_str)
#inner name, for example - ussr:R100_SU122A
elif paramName == 'innerName':
tooltip_add_param(self, result, vehicle.name, '')
#custom text
elif paramName.startswith('TEXT:'):
customtext = paramName[5:]
tooltip_add_param(self, result, l10n(customtext), '')
elif paramInfo is not None and paramName in paramInfo.name:
valueStr = str(param_formatter.formatParameter(paramName, paramInfo.value))
tooltip_add_param(self, result, getParameterValue(paramName), valueStr)
if vehicle.isInInventory:
# optional devices icons, must be in the end
if 'optDevicesIcons' in params_list:
optDevicesIcons_arr = []
for key in vehicle.optDevices:
if key:
imgPath = 'img://gui' + key.icon.lstrip('.')
else:
imgPath = 'img://gui/maps/icons/artefact/empty.png'
optDevicesIcons_arr.append('<img src="%s" height="16" width="16">' % imgPath)
optDevicesIcons_str = ' '.join(optDevicesIcons_arr)
tooltip_add_param(self, result, optDevicesIcons_str, '')
# equipment icons, must be in the end
if 'equipmentIcons' in params_list:
equipmentIcons_arr = []
for key | |
}}
will be rendered as:
.. code-block:: text
['a', 'b', 'c']
'''
ret = None
if isinstance(values, Hashable):
ret = set(values)
else:
ret = []
for value in values:
if value not in ret:
ret.append(value)
return ret
@jinja_filter('min')
def lst_min(obj):
'''
Returns the min value.
.. code-block:: jinja
{% set my_list = [1,2,3,4] -%}
{{ my_list | min }}
will be rendered as:
.. code-block:: text
1
'''
return min(obj)
@jinja_filter('max')
def lst_max(obj):
'''
Returns the max value.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | max }}
will be rendered as:
.. code-block:: text
4
'''
return max(obj)
@jinja_filter('avg')
def lst_avg(lst):
'''
Returns the average value of a list.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | avg }}
will be rendered as:
.. code-block:: yaml
2.5
'''
if not isinstance(lst, Hashable):
return float(sum(lst) / len(lst))
return float(lst)
@jinja_filter('union')
def union(lst1, lst2):
'''
Returns the union of two lists.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | union([2, 4, 6]) }}
will be rendered as:
.. code-block:: text
[1, 2, 3, 4, 6]
'''
if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
return set(lst1) | set(lst2)
return unique(lst1 + lst2)
@jinja_filter('intersect')
def intersect(lst1, lst2):
'''
Returns the intersection of two lists.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | intersect([2, 4, 6]) }}
will be rendered as:
.. code-block:: text
[2, 4]
'''
if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
return set(lst1) & set(lst2)
return unique([ele for ele in lst1 if ele in lst2])
@jinja_filter('difference')
def difference(lst1, lst2):
'''
Returns the difference of two lists.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | difference([2, 4, 6]) }}
will be rendered as:
.. code-block:: text
[1, 3, 6]
'''
if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
return set(lst1) - set(lst2)
return unique([ele for ele in lst1 if ele not in lst2])
@jinja_filter('symmetric_difference')
def symmetric_difference(lst1, lst2):
'''
Returns the symmetric difference of two lists.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | symmetric_difference([2, 4, 6]) }}
will be rendered as:
.. code-block:: text
[1, 3]
'''
if isinstance(lst1, Hashable) and isinstance(lst2, Hashable):
return set(lst1) ^ set(lst2)
return unique([ele for ele in union(lst1, lst2) if ele not in intersect(lst1, lst2)])
@jinja2.contextfunction
def show_full_context(ctx):
return salt.utils.data.simple_types_filter({key: value for key, value in ctx.items()})
class SerializerExtension(Extension, object):
'''
Yaml and Json manipulation.
**Format filters**
Allows jsonifying or yamlifying any data structure. For example, this dataset:
.. code-block:: python
data = {
'foo': True,
'bar': 42,
'baz': [1, 2, 3],
'qux': 2.0
}
.. code-block:: jinja
yaml = {{ data|yaml }}
json = {{ data|json }}
python = {{ data|python }}
xml = {{ {'root_node': data}|xml }}
will be rendered as::
yaml = {bar: 42, baz: [1, 2, 3], foo: true, qux: 2.0}
json = {"baz": [1, 2, 3], "foo": true, "bar": 42, "qux": 2.0}
python = {'bar': 42, 'baz': [1, 2, 3], 'foo': True, 'qux': 2.0}
xml = """<<?xml version="1.0" ?>
<root_node bar="42" foo="True" qux="2.0">
<baz>1</baz>
<baz>2</baz>
<baz>3</baz>
</root_node>"""
The yaml filter takes an optional flow_style parameter to control the
default-flow-style parameter of the YAML dumper.
.. code-block:: jinja
{{ data|yaml(False) }}
will be rendered as:
.. code-block:: yaml
bar: 42
baz:
- 1
- 2
- 3
foo: true
qux: 2.0
**Load filters**
Strings and variables can be deserialized with **load_yaml** and
**load_json** tags and filters. It allows one to manipulate data directly
in templates, easily:
.. code-block:: jinja
{%- set yaml_src = "{foo: it works}"|load_yaml %}
{%- set json_src = "{'bar': 'for real'}"|load_json %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered as::
Dude, it works for real!
**Load tags**
Salt implements ``load_yaml`` and ``load_json`` tags. They work like
the `import tag`_, except that the document is also deserialized.
Syntaxes are ``{% load_yaml as [VARIABLE] %}[YOUR DATA]{% endload %}``
and ``{% load_json as [VARIABLE] %}[YOUR DATA]{% endload %}``
For example:
.. code-block:: jinja
{% load_yaml as yaml_src %}
foo: it works
{% endload %}
{% load_json as json_src %}
{
"bar": "for real"
}
{% endload %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered as::
Dude, it works for real!
**Import tags**
External files can be imported and made available as a Jinja variable.
.. code-block:: jinja
{% import_yaml "myfile.yml" as myfile %}
{% import_json "defaults.json" as defaults %}
{% import_text "completeworksofshakespeare.txt" as poems %}
**Catalog**
``import_*`` and ``load_*`` tags will automatically expose their
target variable to import. This feature makes catalog of data to
handle.
for example:
.. code-block:: jinja
# doc1.sls
{% load_yaml as var1 %}
foo: it works
{% endload %}
{% load_yaml as var2 %}
bar: for real
{% endload %}
.. code-block:: jinja
# doc2.sls
{% from "doc1.sls" import var1, var2 as local2 %}
{{ var1.foo }} {{ local2.bar }}
** Escape Filters **
.. versionadded:: 2017.7.0
Allows escaping of strings so they can be interpreted literally by another
function.
For example:
.. code-block:: jinja
regex_escape = {{ 'https://example.com?foo=bar%20baz' | regex_escape }}
will be rendered as::
regex_escape = https\\:\\/\\/example\\.com\\?foo\\=bar\\%20baz
** Set Theory Filters **
.. versionadded:: 2017.7.0
Performs set math using Jinja filters.
For example:
.. code-block:: jinja
unique = {{ ['foo', 'foo', 'bar'] | unique }}
will be rendered as::
unique = ['foo', 'bar']
.. _`import tag`: http://jinja.pocoo.org/docs/templates/#import
'''
tags = {'load_yaml', 'load_json', 'import_yaml', 'import_json', 'load_text', 'import_text'}
def __init__(self, environment):
super(SerializerExtension, self).__init__(environment)
self.environment.filters.update({
'yaml': self.format_yaml,
'json': self.format_json,
'xml': self.format_xml,
'python': self.format_python,
'load_yaml': self.load_yaml,
'load_json': self.load_json,
'load_text': self.load_text,
})
if self.environment.finalize is None:
self.environment.finalize = self.finalizer
else:
finalizer = self.environment.finalize
@wraps(finalizer)
def wrapper(self, data):
return finalizer(self.finalizer(data))
self.environment.finalize = wrapper
def finalizer(self, data):
'''
Ensure that printed mappings are YAML friendly.
'''
def explore(data):
if isinstance(data, (dict, OrderedDict)):
return PrintableDict(
[(key, explore(value)) for key, value in six.iteritems(data)]
)
elif isinstance(data, (list, tuple, set)):
return data.__class__([explore(value) for value in data])
return data
return explore(data)
def format_json(self, value, sort_keys=True, indent=None):
json_txt = salt.utils.json.dumps(value, sort_keys=sort_keys, indent=indent).strip()
try:
return Markup(json_txt)
except UnicodeDecodeError:
return Markup(salt.utils.stringutils.to_unicode(json_txt))
def format_yaml(self, value, flow_style=True):
yaml_txt = salt.utils.yaml.safe_dump(
value, default_flow_style=flow_style).strip()
if yaml_txt.endswith(str('\n...')): # future lint: disable=blacklisted-function
yaml_txt = yaml_txt[:len(yaml_txt)-4]
try:
return Markup(yaml_txt)
except UnicodeDecodeError:
return Markup(salt.utils.stringutils.to_unicode(yaml_txt))
def format_xml(self, value):
"""Render a formatted multi-line XML string from a complex Python
data structure. Supports tag attributes and nested dicts/lists.
:param value: Complex data structure representing XML contents
:returns: Formatted XML string rendered with newlines and indentation
:rtype: str
"""
def normalize_iter(value):
if isinstance(value, (list, tuple)):
if isinstance(value[0], str):
xmlval = value
else:
xmlval = []
elif isinstance(value, dict):
xmlval = list(value.items())
else:
raise TemplateRuntimeError(
'Value is not a dict or list. Cannot render as XML')
return xmlval
def recurse_tree(xmliter, element=None):
sub = None
for tag, attrs in xmliter:
if isinstance(attrs, list):
for attr in attrs:
recurse_tree(((tag, attr),), element)
elif element is not None:
sub = SubElement(element, tag)
else:
sub = Element(tag)
if isinstance(attrs, (str, int, bool, float)):
sub.text = six.text_type(attrs)
continue
if isinstance(attrs, dict):
sub.attrib = {attr: six.text_type(val) for attr, val in attrs.items()
if not isinstance(val, (dict, list))}
for tag, val in [item for item in normalize_iter(attrs) if
isinstance(item[1], (dict, list))]:
recurse_tree(((tag, val),), sub)
return sub
return Markup(minidom.parseString(
tostring(recurse_tree(normalize_iter(value)))
).toprettyxml(indent=" "))
def format_python(self, value):
return Markup(pprint.pformat(value).strip())
def load_yaml(self, value):
if isinstance(value, TemplateModule):
value = six.text_type(value)
try:
return salt.utils.data.decode(salt.utils.yaml.safe_load(value))
except salt.utils.yaml.YAMLError as exc:
msg = 'Encountered error loading yaml: '
try:
# Reported line is off by one, add 1 to correct it
line = exc.problem_mark.line + 1
buf = exc.problem_mark.buffer
problem = exc.problem
except AttributeError:
# No context information available in the exception, fall back
# to the stringified version of the exception.
msg += six.text_type(exc)
else:
msg += '{0}\n'.format(problem)
msg += salt.utils.stringutils.get_context(
buf,
line,
marker=' <======================')
raise TemplateRuntimeError(msg)
except AttributeError:
raise TemplateRuntimeError(
'Unable to load yaml from {0}'.format(value))
def load_json(self, value):
if isinstance(value, TemplateModule):
value = six.text_type(value)
try:
return salt.utils.json.loads(value)
except (ValueError, TypeError, AttributeError):
raise TemplateRuntimeError(
'Unable to load json from {0}'.format(value))
def | |
is connected backwards
sectionID = k
_from = v["tonodeid"]
_to = v["fromnodeid"]
phases = list(v["phase"])
try:
api_source = PowerSource(model)
except:
pass
api_source.name = _from + "_src"
try:
api_source.nominal_voltage = (
float(source_equivalent_data["voltage"]) * 10 ** 3
)
except:
pass
try:
api_source.phases = phases
except:
pass
api_source.is_sourcebus = True
try:
api_source.rated_power = 10 ** 3 * float(
source_equivalent_data["mva"]
) # Modified from source cases where substations can be used.
except:
pass
# TODO: connection_type
try:
api_source.phase_angle = source_equivalent_data["operatingangle1"]
except:
pass
# try:
if "positivesequenceresistance" in source_equivalent_data:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["positivesequenceresistance"]),
float(source_equivalent_data["positivesequencereactance"]),
)
else:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr1"]),
float(source_equivalent_data["firstlevelx1"]),
)
# except:
# pass
if "zerosequenceresistance" in source_equivalent_data:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["zerosequenceresistance"]),
float(source_equivalent_data["zerosequencereactance"]),
)
else:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr0"]),
float(source_equivalent_data["firstlevelx0"]),
)
try:
api_source.connecting_element = _from
except:
pass
else:
for sid, sdata in sources.items():
source_equivalent_data = None
if "nodeid" in sdata and sdata["nodeid"] in source_equivalents:
source_equivalent_data = source_equivalents[sdata["nodeid"]]
if sid in subs:
# Find the section
for k, v in self.section_phase_mapping.items():
if v["fromnodeid"] == sdata["nodeid"]:
sectionID = k
_from = v["fromnodeid"]
_to = v["tonodeid"]
phases = list(v["phase"])
if v["tonodeid"] == sdata["nodeid"]: #If it's backwards
sectionID = k
_to = v["fromnodeid"]
_from = v["tonodeid"]
phases = list(v["phase"])
try:
api_source = PowerSource(model)
except:
pass
api_source.name = _from + "_src"
try:
if "desiredvoltage" in sdata:
api_source.nominal_voltage = (
float(sdata["desiredvoltage"]) * 10 ** 3
)
else:
api_source.nominal_voltage = (
float(source_equivalent_data["voltage"]) * 10 ** 3
)
except:
pass
try:
api_source.phases = phases
except:
pass
api_source.is_sourcebus = True
try:
api_source.rated_power = 10 ** 3 * float(subs[sid]["mva"])
except:
pass
# TODO: connection_type
try:
api_source.phase_angle = source_equivalent_data[
"operatingangle1"
]
except:
pass
# try:
if "positivesequenceresistance" in source_equivalent_data:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["positivesequenceresistance"]),
float(source_equivalent_data["positivesequencereactance"]),
)
else:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr1"]),
float(source_equivalent_data["firstlevelx1"]),
)
# except:
# pass
if "zerosequenceresistance" in source_equivalent_data:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["zerosequenceresistance"]),
float(source_equivalent_data["zerosequencereactance"]),
)
else:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr0"]),
float(source_equivalent_data["firstlevelx0"]),
)
try:
api_source.zero_sequence_impedance = complex(
source_equivalent_data["zerosequenceresistance"],
source_equivalent_data["zerosequencereactance"],
)
except:
pass
try:
api_source.connecting_element = _from
except:
pass
# try:
# api_transformer=PowerTransformer(model)
# except:
# pass
# try:
# api_transformer.is_substation=1
# except:
# pass
# try:
# api_transformer.name=sid
# except:
# pass
# try:
# api_transformer.rated_power=10**3*float(subs[sid]['mva'])
# except:
# pass
# try:
# api_transformer.from_element=_from
# except:
# pass
# try:
# api_transformer.to_element=_to
# except:
# pass
# for w in range(2):
# try:
# api_winding=Winding(model)
# except:
# pass
# try:
# api_winding.connection_type=self.transformer_connection_configuration_mapping(subs[sid]['conn'])
# except:
# pass
# try:
# api_winding.nominal_voltage=10**3*float(subs[sid]['kvll'])
# except:
# pass
# try:
# api_winding.rated_power=10**6*float(subs[sid]['mva'])
# except:
# pass
# for p in phases:
# try:
# api_phase_winding=PhaseWinding(model)
# except:
# pass
# try:
# api_phase_winding.phase=self.phase_mapping(p)
# except:
# pass
# api_winding.phase_windings.append(api_phase_winding)
# api_transformer.windings.append(api_winding)
def parse_nodes(self, model):
"""
Parse the nodes from CYME to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
"""
self._nodes = []
# Open the network file
self.get_file_content("network")
# Default mapp (positions if all fields are present in the format)
mapp = {
"nodeid": 0,
"ratedvoltage": 48,
"coordx": 2,
"coordy": 3,
"coordx1": 2,
"coordy1": 3,
"coordx2": 4,
"coordy2": 5,
}
nodes = {}
node_connectors = {}
kwargs = {
"additional_attributes_list": [
"nodeid",
"coordx1",
"coordy1",
"coordx2",
"coordy2",
"ratedvoltage",
]
} # In case there are buses included in the node list with x1, y1, x2, y2 positions
for line in self.content:
nodes.update(
self.parser_helper(
line,
["node"],
["nodeid", "coordx", "coordy", "ratedvoltage"],
mapp,
**kwargs
)
)
self.get_file_content("network")
for line in self.content:
node_connectors.update(
self.parser_helper(
line, ["node_connector"], ["nodeid", "coordx", "coordy"], mapp
)
)
for ID, node in nodes.items():
# Create a new DiTTo node object
api_node = Node(model)
# Set the name
try:
api_node.name = ID
except:
pass
# Set the coordinates
try:
if "coordx" in node:
position = Position(model)
position.long = float(node["coordx"])
position.lat = float(node["coordy"])
position.elevation = 0
api_node.positions.append(position)
elif "coordx1" in node:
api_node.positions = []
position1 = Position(model)
position1.long = float(node["coordx1"])
position1.lat = float(node["coordy1"])
position1.elevation = 0
api_node.positions.append(position1)
if ID in node_connectors:
ID_inc = ID
while ID_inc in node_connectors:
values = node_connectors[ID_inc]
position_i = Position(model)
position_i.long = float(values["coordx"])
position_i.lat = float(values["coordy"])
position_i.elevation = 0
api_node.positions.append(position_i)
ID_inc += "*"
position2 = Position(model)
position2.long = float(node["coordx2"])
position2.lat = float(node["coordy2"])
position2.elevation = 0
api_node.positions.append(position2)
except:
pass
# Set the nominal voltage
try:
api_node.nominal_voltage = float(node["ratedvoltage"])
except:
pass
# Add the node to the list
self._nodes.append(api_node)
return 1
def configure_wire(
self,
model,
conductor_data,
spacing_data,
phase,
is_switch,
is_fuse,
is_open,
is_network_protector,
is_breaker,
is_recloser,
is_sectionalizer,
):
"""Helper function that creates a DiTTo wire object and configures it."""
# Instanciate the wire DiTTo object
api_wire = Wire(model)
# Set the phase of the wire
try:
api_wire.phase = phase
except:
pass
try:
api_wire.nameclass = conductor_data["id"]
except:
pass
# Set the flags
api_wire.is_switch = is_switch
api_wire.is_open = is_open
api_wire.is_fuse = is_fuse
api_wire.is_network_protector = is_network_protector
api_wire.is_breaker = is_breaker
api_wire.is_recloser = is_recloser
api_wire.is_sectionalizer = is_sectionalizer
# Set the diameter of the wire
try:
api_wire.diameter = float(conductor_data["diameter"])
except:
pass
# Set the nameclass
try:
api.wire.nameclass = conductor_data["nameclass"]
except:
pass
# Set the GMR of the wire
try:
api_wire.gmr = float(conductor_data["gmr"])
except:
pass
# Set the ampacity of the wire
try:
api_wire.ampacity = float(conductor_data["amps"])
except:
pass
# Set the interupting current of the wire if it is a network protectors, a fuse, a sectionalizer, a breaker, or a recloser
if (
is_network_protector
or is_fuse
or is_sectionalizer
or is_breaker
or is_recloser
):
try:
api_wire.interrupting_rating = float(
conductor_data["interruptingrating"]
)
except:
pass
# Set the emergency ampacity of the wire
try:
api_wire.emergency_ampacity = float(conductor_data["withstandrating"])
except:
pass
# Set the X spacing
x_map = {
"A": "posofcond1_x",
"B": "posofcond2_x",
"C": "posofcond3_x",
"N": "posofneutralcond_x",
"N2": "posofneutralcond_n2_x",
}
try:
api_wire.X = spacing_data[x_map[phase]]
except:
pass
# Set the Y spacing
y_map = {
"A": "posofcond1_y",
"B": "posofcond2_y",
"C": "posofcond3_y",
"N": "posofneutralcond_y",
"N2": "posofneutralcond_n2_y",
}
try:
api_wire.Y = spacing[y_map[phase]]
except:
pass
return api_wire
def parse_sections(self, model):
"""
This function is responsible for parsing the sections. It is expecting the following structure:
...
[SECTION]
FORMAT_section=sectionid,fromnodeid,tonodeid,phase
FORMAT_Feeder=networkid,headnodeid
Feeder=feeder_1,head_feeder_1
section_1_feeder_1,node_1,node_2,ABC
...
...
Feeder=feeder_2,head_feeder_2
section_1_feeder_2,node_1,node_2,ABC
...
...
**What is done in this function:**
- We need to create a clear and fast mapping between feeders and sectionids
- Same thing, mapping between sectionids and nodes/phases
- Since we will be using these structures a lot in the reader, we need something fast that does not involve looping like crazy
**Data structures:**
1) feeder_section_mapping: dictionary where keys are network_ids and values are lists of section id_s
2) section_feeder_mapping: dictionary where keys are section ids and values are network_ids
(to perform the opposite query as 1) without having to look in every lists of section ids until we find the good one...)
3) section_phase_mapping: dictionary where keys are section ids and values are tuples (node_1, node_2, phase)
.. warning:: This should be called prior to any other parser because the other parsers rely on these 3 data structures.
"""
self.feeder_section_mapping = {}
self.section_feeder_mapping = {}
self.section_phase_mapping = {}
self.network_data = {}
format_section = None
format_feeder = None
_netID = None
job_is_done = False
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
# This will stop reading the file if we have already worked on the sections
if job_is_done:
break
# Find the section section
if "[SECTION]" in line:
job_is_done = True
line = next(self.content)
# Until we meet the next section header, work...
while len(line) > 2 and (
line[0] != "["
or line[0] != " "
or line[0] != "\n"
or line[0] != "\t\n"
):
# First, we grab the format used to define sections
if "format_section" in line.lower():
format_section = list(
map(
lambda x: x.strip(),
map(lambda x: x.lower(), line.split("=")[1].split(",")),
)
)
# Then, we grab the format used to define feeders
elif (
"format_feeder" in line.lower()
or "format_substation" in line.lower()
or "format_generalnetwork" in line.lower()
):
format_feeder = list(
map(
lambda x: x.strip(),
map(lambda x: x.lower(), line.split("=")[1].split(",")),
)
)
# If we have a new feeder declaration
elif len(line) >= 7 and (
line[:7].lower() == "feeder="
or line[:11].lower() | |
settle.." % (self.hardwareDelay+3))
time.sleep(self.hardwareDelay+3)
def toggleLvSupplies(self):
currentState = self.app_main.pwr_card.lvEnableGet()
nextState = not currentState
self.app_main.pwr_card.lvEnableSet(nextState)
stateNow = self.app_main.pwr_card.lvEnableGet()
if nextState != None and stateNow != nextState:
self.msgPrint("ERROR: failed to switch LV enable to %d" % nextState, bError=True)
else:
self.app_main.mainWindow.pwrTab.powerBtnStateUpdate('lv', stateNow)
self.app_main.mainWindow.pwrTab.powerStatusUpdateDone()
time.sleep(self.hardwareDelay)
def toggleHvSupplies(self):
currentState = self.app_main.pwr_card.hvEnableGet()
nextState = not currentState
self.app_main.pwr_card.hvEnableSet(nextState)
stateNow = self.app_main.pwr_card.hvEnableGet()
if nextState != None and stateNow != nextState:
self.msgPrint("ERROR: failed to switch HV enable to %d" % nextState, bError=True)
else:
self.app_main.mainWindow.pwrTab.powerBtnStateUpdate('hv', stateNow)
self.app_main.mainWindow.pwrTab.powerStatusUpdateDone()
time.sleep(self.hardwareDelay)
def obtainPowerSuppliesState(self, powerState):
# Loop over LHS and RHS power cards and update display
for powerCard in range(self.app_main.pwr_card.numPowerCards):
paramName = 'asicPowerEnable' + str(powerCard)
self.lvState[powerCard] = powerState[paramName] # 0 = Off, 1 = On
#self.lvState[powerCard] = 1
paramName = 'sensorBiasEnable' + str(powerCard)
self.hvState[powerCard] = powerState[paramName] # 0 = Off, 1 = On
#self.hvState[powerCard] = 1
paramName = 'sensorBias' + str(powerCard)
self.biasState[powerCard] = powerState[paramName]
#self.biasState[powerCard] = 1
if self.lvState[0] != self.lvState[1]:
self.msgPrint("LpdAsicTester Error: LV status mismatch between power card", bError=True)
if self.hvState[0] != self.hvState[1]:
self.msgPrint("LpdAsicTester Error: HV status mismatch between power card", bError=True)
def checkLeakageCurrent(self):
''' Check leakage current by looking at the first and the last image, comparing differences pixel by pixel '''
# Open datafile to find maximum train and image number
self.train = 0
self.image = 0
# Check hdf5 filename exist before opening it
if os.path.isfile(self.file_name):
if not self.analyseFile():
self.msgPrint("Error opening captured file: %s" % self.file_name, bError=True)
return -1
# Open again looking for the very last image (of the last train)
self.train = self.maxTrainNumber
self.image = self.maxImageNumber
self.analyseFile()
else:
self.msgPrint("Analysis Error: File (%s) doesn't exist" % self.file_name, bError=True)
return -1
# Note unconnected in an array (to fill the second plot)
unconnectedPixelsArray = np.zeros(32*128, dtype=np.uint16)
unconnectedPixelsArray = np.reshape(unconnectedPixelsArray,(32, 128))
# Note threshold in an array (to fill the third plot)
thresholdPixelsArray = np.zeros(32*128, dtype=np.uint16)
thresholdPixelsArray = np.reshape(thresholdPixelsArray,(32, 128))
numImagesPerTrain = self.maxImageNumber+1 # Numbering begins from 0..
(rowStart, colStart) = self.asicStartingRowColumn(self.moduleNumber)
# Open the current data file, reading in the data but skipping the first train
leakageFile = h5py.File(self.file_name, 'r')
leakageData = (leakageFile['/lpd/data/image'][numImagesPerTrain:,rowStart:rowStart+self.numRows, colStart:colStart+self.numCols] & 0xFFF)
resultArray = np.zeros((numImagesPerTrain, self.numRows, self.numCols))
# Work out the average of each image across the trains
for trig in range(numImagesPerTrain):
resultArray[trig,::] = np.mean(leakageData[trig::numImagesPerTrain,::],0)
self.firstImageAveraged = resultArray[0,:,:]
self.lastImageAveraged = resultArray[numImagesPerTrain-1,:,:]
# (posCount, negCount) = (0, 0)
# The first image generally contains higher values than the last
difference = 0
threshold = 300
numUnconnectedPixels = 0
for row in range(self.numRows):
for column in range(self.numCols):
difference = self.firstImageAveraged[row][column] - self.lastImageAveraged[row][column]
unconnectedPixelsArray[row][column] = abs(difference)
# If pixels differ by less than threshold, they are unconnected
if difference < threshold:
thresholdPixelsArray[row][column] = 1
numUnconnectedPixels += 1
# if difference > 0:
# posCount += 1
# else:
# negCount += 1
# print >> sys.stderr, "positive versus negative: %d versus %d" % (posCount, negCount)
# Signal hdf5 image (data)
self.app_main.asic_window.dataSignal.emit(self.moduleData, unconnectedPixelsArray, thresholdPixelsArray, self.moduleDescription, self.moduleNumber, threshold, "Leakage Current")
return numUnconnectedPixels
def checkOutOfRangePixels(self, train, image, miscDescription="", bSuppressPixelInfo=False):
''' Check self.file_name's pixels, image 0 for out of range pixels
out of range being described as greater than 2 standard deviations '''
self.train = train
self.image = image
# Check hdf5 filename exist before opening it
if os.path.isfile(self.file_name):
if not self.analyseFile():
self.msgPrint("Error opening captured file: %s" % self.file_name, bError=True)
return -1
else:
self.msgPrint("Analysis Error: File (%s) doesn't exist" % self.file_name, bError=True)
return -1
# Check for bad pixel(s)
deviatedPixels = self.testPixelsStandardDeviation()
# Plot results
self.plotFaultyPixels(deviatedPixels, miscDescription)
numBadPixels = 0
# Display bad pixels (unless suppressed)
if not bSuppressPixelInfo:
lastRow = 0
badPixelsString = ""
for pair in deviatedPixels:
if lastRow != pair[0]:
self.msgPrint("Row %2d detected pixel(s) at column: %s" % (lastRow, badPixelsString[:-2]))
lastRow = pair[0]
badPixelsString = ""
badPixelsString += str(pair[1]) + ", "
numBadPixels = deviatedPixels.__len__()
# If bad pixel(s) detected, display last row and number of bad pixels:
if numBadPixels > 0:
self.msgPrint("Row %2d detected pixel(s) at column: %s" % (lastRow, badPixelsString[:-2]))
self.checkTheColumns()
return numBadPixels
def plotFaultyPixels(self, deviatedPixels, miscDescription):
''' Display plot of black/white image indicating faulty pixel(s) '''
# Create empty array
faultyPixelsArray = np.zeros(32*128, dtype=np.uint16)
faultyPixelsArray = np.reshape(faultyPixelsArray, (32, 128))
for row, column in deviatedPixels:
faultyPixelsArray[row][column] = 1
# Signal hdf5 image (data)
self.app_main.asic_window.dataSignal.emit(self.moduleData, faultyPixelsArray, faultyPixelsArray, self.moduleDescription, self.moduleNumber, -1, miscDescription)
def analyseFile(self):
''' Open file, extracting one image along with meta data and calculate standard deviation and average '''
with h5py.File(self.file_name, 'r') as hdfFile:
try:
# Read in the train, image counter and timestamp arrays
trainNumber = hdfFile['/lpd/data/trainNumber'][...]
imageNumber = hdfFile['/lpd/data/imageNumber'][...]
timeStamp = hdfFile['/lpd/data/timeStamp'][...]
# Get max train and image number form arrays
self.maxImageNumber = np.amax(imageNumber)
self.maxTrainNumber = np.amax(trainNumber)
# print >> sys.stderr, "self_MaxImageNumber: ", self.maxImageNumber
# print >> sys.stderr, "self_MaxTrainNumber: ", self.maxTrainNumber
# Read in the metadata
meta = hdfFile['/lpd/metadata']
# Parse the readout configuration XML blob
readoutConfig = LpdReadoutConfig(meta['readoutParamFile'][0])
readoutParams = {}
for (param, val) in readoutConfig.parameters():
readoutParams[param] = val
# Get number of trains from metadata and check array against argument
numTrains = meta.attrs['numTrains']
# Calculate image offset into array and range check (should be OK)
imgOffset = (self.train * (self.maxImageNumber + 1)) + self.image
if imgOffset > imageNumber.size:
self.msgPrint("Analysis Error: Requested image (%d) exceeds number of images available (%d)" \
% (imgOffset, imageNumber.size), bError=True)
return False
# Read in the image array
image = hdfFile['/lpd/data/image']
imageData = image[imgOffset,:,:] # Only read in the specified image
# Determine row/col coordinates according to selected ASIC module
(rowStart, colStart) = self.asicStartingRowColumn(self.moduleNumber)
self.moduleData = imageData[rowStart:rowStart+self.numRows, colStart:colStart+self.numCols]
self.moduleStd = np.std(self.moduleData)
self.moduleAverage = np.mean(self.moduleData)
except Exception as e:
self.msgPrint("Analysis Error while processing file: %s" % e, bError=True)
return False
self.app_main.asic_window.timeStampSignal.emit(timeStamp[imgOffset])
self.app_main.asic_window.trainSignal.emit(trainNumber[imgOffset])
self.app_main.asic_window.imageSignal.emit(imageNumber[imgOffset])
return True
def checkTheColumns(self):
''' Print whether data contains dead column(s) or nonesafer '''
# Look for dead column(s)
deadColumns = self.detectDeadColumns()
numDeadColumns = deadColumns.__len__()
if numDeadColumns > 0:
self.msgPrint("There are %d dead column(s)" % numDeadColumns)
# Which ASIC and ADC numbers to they correspond to?
for column in deadColumns:
(ASIC, ADC) = self.identifyAdcLocations(column)
self.msgPrint("Dead column detected in ASIC: %1d ADC: %2d" % (ASIC, ADC))
else:
self.msgPrint("There are no dead columns")
def detectDeadColumns(self):
''' Check moduleData (32 x 128 ASIC) whether any dead column(s),
if column average's is beyond 2 standard deviations of module's average
Returns a(n empty) list containing column number(s) '''
deadColumns = []
for column in range(self.moduleData.shape[1]):
columnTotal = 0
for row in range(self.moduleData.shape[0]):
columnTotal += self.moduleData[row][column]
columnAverage = columnTotal/32.0
difference = 0
# Is column average greater than module average?
if columnAverage > self.moduleAverage:
difference = columnAverage - self.moduleAverage
else:
difference = self.moduleAverage - columnAverage
# Does different exceeded 2 standard deviations?
if difference > (2*self.moduleStd):
deadColumns.append(column)
return deadColumns
def identifyAdcLocations(self, column):
''' Convert column number into ASIC, ADC location
Column ASIC ADCs
0-15 0 0-15
16-31 1 0-15
32-47 2 0-15
etc
'''
asicNum = -1
if (-1 < column < 16):
asicNum = 0
elif (15 < column < 32):
asicNum = 1
elif (31 < column < 48):
asicNum = 2
elif (47 < column < 64):
asicNum = 3
elif (63 < column < 80):
asicNum = 4
elif (79 < column < 96):
asicNum = 5
elif (95 < column < 112):
asicNum = 6
elif (111 < column < 128):
asicNum = 7
adcNum = column % 16
return (asicNum, adcNum)
def checkPixelAgainstStd(self, row, col):
''' Is 'pixel' outside +/- 2 standard deviations? '''
bPixelDifferent = False
pixel = self.moduleData[row][col]
difference = 0
if pixel > self.moduleAverage: difference = pixel - self.moduleAverage
else: difference = self.moduleAverage - pixel
# Does difference exceed 2 standard deviations?
if difference > (2*self.moduleStd):
bPixelDifferent = True
return bPixelDifferent
def asicStartingRowColumn(self, module):
''' Determining upper left corner's row/col coordinates according to selected ASIC module '''
(row, column) = (-1, -1)
if module == 0: (row, column) = (0, 128) # ASIC module #1
| |
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
A = np.reshape(vector[vector_index:vector_index+n**2], (n, n))
var_dict[self.name] = A
return vector_index+n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {1} matrix".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n3 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if n != n3:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[0])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_A = np.zeros((n,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=LQinv.shape)
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class SquareMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+num_states*n**2],
(num_states, n, n))
var_dict[self.name] = A
return vector_index+num_states*n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {2} of {1} by {1} matrices".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n3 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if (n != n3) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv_vec = getattr(parameters, self._lt_vec_name)
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in LQinv_vec])
else:
LQinvs = np.array([np.eye(n) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[1]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[0])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((num_states, n,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv[k].shape)
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Rectangular (m by n)
class RectMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m','n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
A[0:k, 0:k] = np.eye(k)
param.var_dict[self.name] | |
<gh_stars>0
import gzip
import random
import re
import statistics
import subprocess
from distutils.spawn import find_executable
"""
Run doctests:
python3 -m doctest gplib.py
"""
#######################################################################
def graphprot_predictions_get_median(predictions_file):
"""
Given a GraphProt .predictions file, read in site scores and return
the median value.
>>> test_file = "test-data/test.predictions"
>>> graphprot_predictions_get_median(test_file)
0.571673
"""
# Site scores list.
sc_list = []
with open(predictions_file) as f:
for line in f:
cols = line.strip().split("\t")
score = float(cols[2])
sc_list.append(score)
f.close()
# Return the median.
return statistics.median(sc_list)
#######################################################################
def graphprot_profile_get_tsm(
profile_file, profile_type="profile", avg_profile_extlr=5
):
"""
Given a GraphProt .profile file, extract for each site (identified by
column 1 ID) the top (= highest) score. Then return the median of these
top scores.
profile_type can be either "profile" or "avg_profile".
"avg_profile means that the position-wise scores will first get smoothed
out by calculating for each position a new score through taking a
sequence window -avg_profile_extlr to +avg_profile_extlr of the position
and calculate the mean score over this window and assign it to the
position. After that, the maximum score of each site is chosen, and the
median over all maximum scores is returned.
"profile" leaves the position-wise scores as they are, directly extracting
the maximum for each site and then reporting the median.
>>> test_file = "test-data/test.profile"
>>> graphprot_profile_get_tsm(test_file)
3.2
"""
# Dictionary of lists, with list of scores (value) for each site (key).
lists_dic = {}
with open(profile_file) as f:
for line in f:
cols = line.strip().split("\t")
seq_id = cols[0]
score = float(cols[2])
if seq_id in lists_dic:
lists_dic[seq_id].append(score)
else:
lists_dic[seq_id] = []
lists_dic[seq_id].append(score)
f.close()
# For each site, extract maximum and store in new list.
max_list = []
for seq_id in lists_dic:
if profile_type == "profile":
max_sc = max(lists_dic[seq_id])
max_list.append(max_sc)
elif profile_type == "avg_profile":
# Convert profile score list to average profile scores list.
aps_list = list_moving_window_average_values(
lists_dic[seq_id], win_extlr=avg_profile_extlr
)
max_sc = max(aps_list)
max_list.append(max_sc)
else:
assert 0, 'invalid profile_type argument given: "%s"' % (profile_type)
# Return the median.
return statistics.median(max_list)
#######################################################################
def list_moving_window_average_values(in_list, win_extlr=5, method=1):
"""
Take a list of numeric values, and calculate for each position a new value,
by taking the mean value of the window of positions -win_extlr and
+win_extlr. If full extension is not possible (at list ends), it just
takes what it gets.
Two implementations of the task are given, chose by method=1 or method=2.
>>> test_list = [2, 3, 5, 8, 4, 3, 7, 1]
>>> list_moving_window_average_values(test_list, win_extlr=2, method=1)
[3.3333333333333335, 4.5, 4.4, 4.6, 5.4, 4.6, 3.75, 3.6666666666666665]
>>> list_moving_window_average_values(test_list, win_extlr=2, method=2)
[3.3333333333333335, 4.5, 4.4, 4.6, 5.4, 4.6, 3.75, 3.6666666666666665]
"""
l_list = len(in_list)
assert l_list, "Given list is empty"
new_list = [0] * l_list
if win_extlr == 0:
return l_list
if method == 1:
for i in range(l_list):
s = i - win_extlr
e = i + win_extlr + 1
if s < 0:
s = 0
if e > l_list:
e = l_list
# Extract portion and assign value to new list.
new_list[i] = statistics.mean(in_list[s:e])
elif method == 2:
for i in range(l_list):
s = i - win_extlr
e = i + win_extlr + 1
if s < 0:
s = 0
if e > l_list:
e = l_list
ln = e - s
sc_sum = 0
for j in range(ln):
sc_sum += in_list[s + j]
new_list[i] = sc_sum / ln
else:
assert 0, "invalid method ID given (%i)" % (method)
return new_list
#######################################################################
def echo_add_to_file(echo_string, out_file):
"""
Add a string to file, using echo command.
"""
check_cmd = 'echo "%s" >> %s' % (echo_string, out_file)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert not error, "echo is complaining:\n%s\n%s" % (check_cmd, output)
#######################################################################
def is_tool(name):
"""Check whether tool "name" is in PATH."""
return find_executable(name) is not None
#######################################################################
def count_fasta_headers(fasta_file):
"""
Count number of FASTA headers in fasta_file using grep.
>>> test_file = "test-data/test.fa"
>>> count_fasta_headers(test_file)
2
>>> test_file = "test-data/empty_file"
>>> count_fasta_headers(test_file)
0
"""
check_cmd = 'grep -c ">" ' + fasta_file
output = subprocess.getoutput(check_cmd)
row_count = int(output.strip())
return row_count
#######################################################################
def make_file_copy(in_file, out_file):
"""
Make a file copy by copying in_file to out_file.
"""
check_cmd = "cat " + in_file + " > " + out_file
assert in_file != out_file, "cat does not like to cat file into same file (%s)" % (
check_cmd
)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert not error, "cat did not like your input (in_file: %s, out_file: %s):\n%s" % (
in_file,
out_file,
output,
)
#######################################################################
def split_fasta_into_test_train_files(
in_fasta, test_out_fa, train_out_fa, test_size=500
):
"""
Split in_fasta .fa file into two files (e.g. test, train).
"""
# Read in in_fasta.
seqs_dic = read_fasta_into_dic(in_fasta)
# Shuffle IDs.
rand_ids_list = random_order_dic_keys_into_list(seqs_dic)
c_out = 0
TESTOUT = open(test_out_fa, "w")
TRAINOUT = open(train_out_fa, "w")
for seq_id in rand_ids_list:
seq = seqs_dic[seq_id]
if c_out >= test_size:
TRAINOUT.write(">%s\n%s\n" % (seq_id, seq))
else:
TESTOUT.write(">%s\n%s\n" % (seq_id, seq))
c_out += 1
TESTOUT.close()
TRAINOUT.close()
#######################################################################
def check_seqs_dic_format(seqs_dic):
"""
Check sequence dictionary for lowercase-only sequences or sequences
wich have lowercase nts in between uppercase nts.
Return suspicious IDs as list or empty list if not hits.
IDs with lowercase-only sequences.
>>> seqs_dic = {"id1" : "acguACGU", "id2" : "acgua", "id3" : "acgUUaUcc"}
>>> check_seqs_dic_format(seqs_dic)
['id2', 'id3']
>>> seqs_dic = {"id1" : "acgAUaa", "id2" : "ACGUACUA"}
>>> check_seqs_dic_format(seqs_dic)
[]
"""
assert seqs_dic, "given seqs_dic empty"
bad_seq_ids = []
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if re.search("^[acgtun]+$", seq):
bad_seq_ids.append(seq_id)
if re.search("[ACGTUN][acgtun]+[ACGTUN]", seq):
bad_seq_ids.append(seq_id)
return bad_seq_ids
#######################################################################
def read_fasta_into_dic(
fasta_file,
seqs_dic=False,
ids_dic=False,
read_dna=False,
short_ensembl=False,
reject_lc=False,
convert_to_uc=False,
skip_n_seqs=True,
):
"""
Read in FASTA sequences, convert to RNA, store in dictionary
and return dictionary.
>>> test_fasta = "test-data/test.fa"
>>> read_fasta_into_dic(test_fasta)
{'seq1': 'acguACGUacgu', 'seq2': 'ugcaUGCAugcaACGUacgu'}
>>> test_fasta = "test-data/test2.fa"
>>> read_fasta_into_dic(test_fasta)
{}
>>> test_fasta = "test-data/test.ensembl.fa"
>>> read_fasta_into_dic(test_fasta, read_dna=True, short_ensembl=True)
{'ENST00000415118': 'GAAATAGT', 'ENST00000448914': 'ACTGGGGGATACGAAAA'}
>>> test_fasta = "test-data/test4.fa"
>>> read_fasta_into_dic(test_fasta)
{'1': 'gccuAUGUuuua', '2': 'cugaAACUaugu'}
"""
if not seqs_dic:
seqs_dic = {}
seq_id = ""
seq = ""
# Go through FASTA file, extract sequences.
if re.search(r".+\.gz$", fasta_file):
f = gzip.open(fasta_file, "rt")
else:
f = open(fasta_file, "r")
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
# If there is a ".", take only first part of header.
# This assumes ENSEMBL header format ">ENST00000631435.1 cdna ..."
if short_ensembl:
if re.search(r".+\..+", seq_id):
m = re.search(r"(.+?)\..+", seq_id)
seq_id = m.group(1)
assert seq_id not in seqs_dic, 'non-unique FASTA header "%s" in "%s"' % (
seq_id,
fasta_file,
)
if ids_dic:
if seq_id in ids_dic:
seqs_dic[seq_id] = ""
else:
seqs_dic[seq_id] = ""
elif re.search("[ACGTUN]+", line, re.I):
if seq_id in seqs_dic:
m = re.search("([ACGTUN]+)", line, re.I)
seq = m.group(1)
if reject_lc:
assert not re.search(
"[a-z]", seq
), 'lc char detected in seq "%i" (reject_lc=True)' % (seq_id)
if convert_to_uc:
seq = seq.upper()
# If sequences with N nucleotides should be skipped.
if skip_n_seqs:
if "n" in m.group(1) or "N" in m.group(1):
print(
'WARNING: "%s" contains N. Discarding '
"sequence ... " % (seq_id)
)
del seqs_dic[seq_id]
continue
# Convert to RNA, concatenate sequence.
if read_dna:
seqs_dic[seq_id] += m.group(1).replace("U", "T").replace("u", "t")
else:
seqs_dic[seq_id] += m.group(1).replace("T", "U").replace("t", "u")
f.close()
return seqs_dic
#######################################################################
def random_order_dic_keys_into_list(in_dic):
"""
Read in dictionary keys, and return random order list of IDs.
"""
id_list = []
for key in in_dic:
id_list.append(key)
random.shuffle(id_list)
return id_list
#######################################################################
def graphprot_get_param_string(params_file):
"""
Get parameter string from GraphProt .params file.
>>> test_params = "test-data/test.params"
>>> graphprot_get_param_string(test_params)
'-epochs 20 -lambda 0.01 -R 1 -D 3 -bitsize 14 -onlyseq '
"""
param_string = ""
with open(params_file) as f:
for line in f:
cols = line.strip().split(" ")
param = cols[0]
setting = cols[1]
if re.search(".+:", param):
m = re.search("(.+):", line)
par = m.group(1)
if re.search("pos_train.+", line):
continue
if par == "model_type":
if setting == "sequence":
param_string += "-onlyseq "
else:
param_string += "-%s %s " % (par, setting)
else:
assert 0, 'pattern matching failed for string "%s"' % (param)
return param_string
#######################################################################
def seqs_dic_count_uc_nts(seqs_dic):
"""
Count number of uppercase nucleotides in sequences stored in sequence
dictionary.
>>> seqs_dic = {'seq1': "acgtACGTacgt", 'seq2': 'acgtACacgt'}
>>> seqs_dic_count_uc_nts(seqs_dic)
6
>>> seqs_dic = {'seq1': "acgtacgt", 'seq2': 'acgtacgt'}
>>> seqs_dic_count_uc_nts(seqs_dic)
0
"""
assert seqs_dic, | |
the visibilities from frequency axis onto delay (time) axis
using an IFFT. This is performed for noiseless sky visibilities, thermal
noise in visibilities, and observed visibilities.
Inputs:
pad [scalar] Non-negative scalar indicating padding fraction
relative to the number of frequency channels. For e.g., a
pad of 1.0 pads the frequency axis with zeros of the same
width as the number of channels. After the delay transform,
the transformed visibilities are downsampled by a factor of
1+pad. If a negative value is specified, delay transform
will be performed with no padding
freq_wts [numpy vector or array] window shaping to be applied before
computing delay transform. It can either be a vector or size
equal to the number of channels (which will be applied to all
time instances for all baselines), or a nchan x n_snapshots
numpy array which will be applied to all baselines, or a
n_baselines x nchan numpy array which will be applied to all
timestamps, or a n_baselines x nchan x n_snapshots numpy
array. Default (None) will not apply windowing and only the
inherent bandpass will be used.
downsample [boolean] If set to True (default), the delay transform
quantities will be downsampled by exactly the same factor
that was used in padding. For instance, if pad is set to
1.0, the downsampling will be by a factor of 2. If set to
False, no downsampling will be done even if the original
quantities were padded
verbose [boolean] If set to True (default), print diagnostic and
progress messages. If set to False, no such messages are
printed.
------------------------------------------------------------------------
"""
if verbose:
print 'Preparing to compute delay transform...\n\tChecking input parameters for compatibility...'
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
print '\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).'
if freq_wts is not None:
if freq_wts.size == self.f.size:
freq_wts = NP.repeat(NP.expand_dims(NP.repeat(freq_wts.reshape(1,-1), self.ia.baselines.shape[0], axis=0), axis=2), self.n_acc, axis=2)
elif freq_wts.size == self.f.size * self.n_acc:
freq_wts = NP.repeat(NP.expand_dims(freq_wts.reshape(self.f.size, -1), axis=0), self.ia.baselines.shape[0], axis=0)
elif freq_wts.size == self.f.size * self.ia.baselines.shape[0]:
freq_wts = NP.repeat(NP.expand_dims(freq_wts.reshape(-1, self.f.size), axis=2), self.n_acc, axis=2)
elif freq_wts.size == self.f.size * self.ia.baselines.shape[0] * self.n_acc:
freq_wts = freq_wts.reshape(self.ia.baselines.shape[0], self.f.size, self.n_acc)
else:
raise ValueError('window shape dimensions incompatible with number of channels and/or number of tiemstamps.')
self.bp_wts = freq_wts
if verbose:
print '\tFrequency window weights assigned.'
if not isinstance(downsample, bool):
raise TypeError('Input downsample must be of boolean type')
if verbose:
print '\tInput parameters have been verified to be compatible.\n\tProceeding to compute delay transform.'
self.lags = DSP.spectral_axis(int(self.f.size*(1+pad)), delx=self.df, use_real=False, shift=True)
if pad == 0.0:
self.vis_lag = DSP.FT1D(self.ia.vis_freq * self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.f.size * self.df
self.skyvis_lag = DSP.FT1D(self.ia.skyvis_freq * self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.f.size * self.df
self.vis_noise_lag = DSP.FT1D(self.ia.vis_noise_freq * self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.f.size * self.df
self.lag_kernel = DSP.FT1D(self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.f.size * self.df
if verbose:
print '\tDelay transform computed without padding.'
else:
npad = int(self.f.size * pad)
self.vis_lag = DSP.FT1D(NP.pad(self.ia.vis_freq * self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.skyvis_lag = DSP.FT1D(NP.pad(self.ia.skyvis_freq * self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.vis_noise_lag = DSP.FT1D(NP.pad(self.ia.vis_noise_freq * self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.lag_kernel = DSP.FT1D(NP.pad(self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
if verbose:
print '\tDelay transform computed with padding fraction {0:.1f}'.format(pad)
if downsample:
self.vis_lag = DSP.downsampler(self.vis_lag, 1+pad, axis=1)
self.skyvis_lag = DSP.downsampler(self.skyvis_lag, 1+pad, axis=1)
self.vis_noise_lag = DSP.downsampler(self.vis_noise_lag, 1+pad, axis=1)
self.lag_kernel = DSP.downsampler(self.lag_kernel, 1+pad, axis=1)
self.lags = DSP.downsampler(self.lags, 1+pad)
self.lags = self.lags.flatten()
if verbose:
print '\tDelay transform products downsampled by factor of {0:.1f}'.format(1+pad)
print 'delay_transform() completed successfully.'
self.pad = pad
#############################################################################
def clean(self, pad=1.0, freq_wts=None, clean_window_buffer=1.0,
verbose=True):
"""
------------------------------------------------------------------------
TO BE DEPRECATED!!! USE MEMBER FUNCTION delayClean()
Transforms the visibilities from frequency axis onto delay (time) axis
using an IFFT and deconvolves the delay transform quantities along the
delay axis. This is performed for noiseless sky visibilities, thermal
noise in visibilities, and observed visibilities.
Inputs:
pad [scalar] Non-negative scalar indicating padding fraction
relative to the number of frequency channels. For e.g., a
pad of 1.0 pads the frequency axis with zeros of the same
width as the number of channels. If a negative value is
specified, delay transform will be performed with no padding
freq_wts [numpy vector or array] window shaping to be applied before
computing delay transform. It can either be a vector or size
equal to the number of channels (which will be applied to all
time instances for all baselines), or a nchan x n_snapshots
numpy array which will be applied to all baselines, or a
n_baselines x nchan numpy array which will be applied to all
timestamps, or a n_baselines x nchan x n_snapshots numpy
array. Default (None) will not apply windowing and only the
inherent bandpass will be used.
verbose [boolean] If set to True (default), print diagnostic and
progress messages. If set to False, no such messages are
printed.
------------------------------------------------------------------------
"""
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
print '\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).'
if freq_wts is not None:
if freq_wts.size == self.f.size:
freq_wts = NP.repeat(NP.expand_dims(NP.repeat(freq_wts.reshape(1,-1), self.ia.baselines.shape[0], axis=0), axis=2), self.n_acc, axis=2)
elif freq_wts.size == self.f.size * self.n_acc:
freq_wts = NP.repeat(NP.expand_dims(freq_wts.reshape(self.f.size, -1), axis=0), self.ia.baselines.shape[0], axis=0)
elif freq_wts.size == self.f.size * self.ia.baselines.shape[0]:
freq_wts = NP.repeat(NP.expand_dims(freq_wts.reshape(-1, self.f.size), axis=2), self.n_acc, axis=2)
elif freq_wts.size == self.f.size * self.ia.baselines.shape[0] * self.n_acc:
freq_wts = freq_wts.reshape(self.ia.baselines.shape[0], self.f.size, self.n_acc)
else:
raise ValueError('window shape dimensions incompatible with number of channels and/or number of tiemstamps.')
self.bp_wts = freq_wts
if verbose:
print '\tFrequency window weights assigned.'
bw = self.df * self.f.size
pc = self.ia.phase_center
pc_coords = self.ia.phase_center_coords
if pc_coords == 'hadec':
pc_altaz = GEOM.hadec2altaz(pc, self.ia.latitude, units='degrees')
pc_dircos = GEOM.altaz2dircos(pc_altaz, units='degrees')
elif pc_coords == 'altaz':
pc_dircos = GEOM.altaz2dircos(pc, units='degrees')
npad = int(self.f.size * pad)
lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=False)
dlag = lags[1] - lags[0]
clean_area = NP.zeros(self.f.size + npad, dtype=int)
skyvis_lag = (npad + self.f.size) * self.df * DSP.FT1D(NP.pad(self.ia.skyvis_freq*self.bp*self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=False)
vis_lag = (npad + self.f.size) * self.df * DSP.FT1D(NP.pad(self.ia.vis_freq*self.bp*self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=False)
lag_kernel = (npad + self.f.size) * self.df * DSP.FT1D(NP.pad(self.bp, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=False)
ccomponents_noiseless = NP.zeros_like(skyvis_lag)
ccres_noiseless = NP.zeros_like(skyvis_lag)
ccomponents_noisy = NP.zeros_like(vis_lag)
ccres_noisy = NP.zeros_like(vis_lag)
for snap_iter in xrange(self.n_acc):
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Baselines '.format(self.ia.baselines.shape[0]), PGB.ETA()], maxval=self.ia.baselines.shape[0]).start()
for bl_iter in xrange(self.ia.baselines.shape[0]):
clean_area[NP.logical_and(lags <= self.horizon_delay_limits[snap_iter,bl_iter,1]+clean_window_buffer/bw, lags >= self.horizon_delay_limits[snap_iter,bl_iter,0]-clean_window_buffer/bw)] = 1
cc_noiseless, info_noiseless = _gentle_clean(skyvis_lag[bl_iter,:,snap_iter], lag_kernel[bl_iter,:,snap_iter], area=clean_area, stop_if_div=False, verbose=False, autoscale=True)
ccomponents_noiseless[bl_iter,:,snap_iter] = cc_noiseless
ccres_noiseless[bl_iter,:,snap_iter] = info_noiseless['res']
cc_noisy, info_noisy = _gentle_clean(vis_lag[bl_iter,:,snap_iter], lag_kernel[bl_iter,:,snap_iter], area=clean_area, stop_if_div=False, verbose=False, autoscale=True)
ccomponents_noisy[bl_iter,:,snap_iter] = cc_noisy
ccres_noisy[bl_iter,:,snap_iter] = info_noisy['res']
progress.update(bl_iter+1)
progress.finish()
deta = lags[1] - lags[0]
cc_skyvis = NP.fft.fft(ccomponents_noiseless, axis=1) * deta
cc_skyvis_res = NP.fft.fft(ccres_noiseless, axis=1) * deta
cc_vis = NP.fft.fft(ccomponents_noisy, axis=1) * deta
cc_vis_res = NP.fft.fft(ccres_noisy, axis=1) * deta
self.skyvis_lag = NP.fft.fftshift(skyvis_lag, axes=1)
self.vis_lag = NP.fft.fftshift(vis_lag, axes=1)
self.lag_kernel = NP.fft.fftshift(lag_kernel, axes=1)
self.cc_skyvis_lag = NP.fft.fftshift(ccomponents_noiseless, axes=1)
self.cc_skyvis_res_lag = NP.fft.fftshift(ccres_noiseless, axes=1)
self.cc_vis_lag = NP.fft.fftshift(ccomponents_noisy, axes=1)
self.cc_vis_res_lag = NP.fft.fftshift(ccres_noisy, axes=1)
self.cc_skyvis_net_lag = self.cc_skyvis_lag + self.cc_skyvis_res_lag
self.cc_vis_net_lag = self.cc_vis_lag + self.cc_vis_res_lag
self.lags = NP.fft.fftshift(lags)
self.cc_skyvis_freq = cc_skyvis
self.cc_skyvis_res_freq = cc_skyvis_res
self.cc_vis_freq = cc_vis
self.cc_vis_res_freq = cc_vis_res
self.cc_skyvis_net_freq = cc_skyvis + cc_skyvis_res
self.cc_vis_net_freq = cc_vis + cc_vis_res
self.clean_window_buffer = clean_window_buffer
#############################################################################
def delayClean(self, pad=1.0, freq_wts=None, clean_window_buffer=1.0,
gain=0.1, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.