input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import pickle
import random
import logging
import cv2
import torch
import numpy as np
from tqdm import tqdm, trange
from imgaug.augmenters import Resize
import os
from natsort import natsorted
import re
import imgaug.augmenters as iaa
from imgaug.augmentables.lines import LineString, LineStringsOnImage
from torchvision.transforms import ToTensor
from lib.lane import Lane
from PIL import Image
from torchvision import transforms
from scipy.interpolate import InterpolatedUnivariateSpline
from torchvision import utils
# class Lane:
# def __init__(self, points=None, invalid_value=-2., metadata=None):
# super(Lane, self).__init__()
# self.curr_iter = 0
# self.points = points
# self.invalid_value = invalid_value
# self.function = InterpolatedUnivariateSpline(points[:, 1], points[:, 0], k=min(3, len(points) - 1))
# self.min_y = points[:, 1].min() - 0.01
# self.max_y = points[:, 1].max() + 0.01
#
# self.metadata = metadata or {}
#
# def __repr__(self):
# return '[Lane]\n' + str(self.points) + '\n[/Lane]'
#
# def __call__(self, lane_ys):
# lane_xs = self.function(lane_ys)
#
# lane_xs[(lane_ys < self.min_y) | (lane_ys > self.max_y)] = self.invalid_value
# return lane_xs
#
# def __iter__(self):
# return self
#
# def __next__(self):
# if self.curr_iter < len(self.points):
# self.curr_iter += 1
# return self.points[self.curr_iter - 1]
# self.curr_iter = 0
# raise StopIteration
class Runner:
def __init__(self, cfg, exp, device, test_dataset, test_first_dir, test_second_dir, exp_name, hyper, hyper_param,
video_name, root_path, webcam=False, resume=False, view=None, deterministic=False):
self.cfg = cfg
self.exp = exp
self.device = device
self.resume = resume
self.view = view
self.test_dataset = test_dataset
self.test_first_dir = test_first_dir
self.test_second_dir = test_second_dir
self.logger = logging.getLogger(__name__)
self.dataset_type = hyper_param[3]
self.conf_threshold = hyper_param[0]
self.nms_thres = hyper_param[1]
self.nms_topk = hyper_param[2]
self.root = root_path
self.video_name = video_name
self.hyper = hyper
print(self.root)
self.exp_name = "/{}/{}/".format(exp_name, self.hyper)
self.name = test_first_dir + test_second_dir + test_dataset
print(self.name)
self.log_dir = self.name + self.exp_name # os.path.join(self.name,self.exp_name)
print(self.log_dir)
os.makedirs(self.log_dir, exist_ok=True)
# Fix seeds
torch.manual_seed(cfg['seed'])
np.random.seed(cfg['seed'])
random.seed(cfg['seed'])
if webcam:
self.img_h = 360
self.img_w = 640
if webcam:
self.vcap = cv2.VideoCapture(0)
self.vcap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # 세로 사이즈
self.vcap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360) # 가로 사이즈
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.to_tensor = ToTensor()
def _transform_annotations(self, idx):
self.logger.info("Transforming annotations to the model's target format...")
self.annotations = np.array(list(map(self.transform_annotation(idx), self._idx))) # datasets
self.logger.info('Done.')
def train(self):
self.exp.train_start_callback(self.cfg)
starting_epoch = 1
model = self.cfg.get_model()
model = model.to(self.device)
optimizer = self.cfg.get_optimizer(model.parameters())
scheduler = self.cfg.get_lr_scheduler(optimizer)
if self.resume:
last_epoch, model, optimizer, scheduler = self.exp.load_last_train_state(model, optimizer, scheduler)
starting_epoch = last_epoch + 1
max_epochs = self.cfg['epochs']
train_loader = self.get_train_dataloader()
loss_parameters = self.cfg.get_loss_parameters()
for epoch in trange(starting_epoch, max_epochs + 1, initial=starting_epoch - 1, total=max_epochs):
self.exp.epoch_start_callback(epoch, max_epochs)
model.eval()
pbar = tqdm(train_loader)
for i, (images, labels, _) in enumerate(pbar):
images = images.to(self.device)
labels = labels.to(self.device)
# Forward pass
outputs = model(images, **self.cfg.get_train_parameters())
loss, loss_dict_i = model.loss(outputs, labels, **loss_parameters)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Scheduler step (iteration based)
scheduler.step()
# Log
postfix_dict = {key: float(value) for key, value in loss_dict_i.items()}
postfix_dict['lr'] = optimizer.param_groups[0]["lr"]
self.exp.iter_end_callback(epoch, max_epochs, i, len(train_loader), loss.item(), postfix_dict)
postfix_dict['loss'] = loss.item()
pbar.set_postfix(ordered_dict=postfix_dict)
self.exp.epoch_end_callback(epoch, max_epochs, model, optimizer, scheduler)
# Validate
if (epoch + 1) % self.cfg['val_every'] == 0:
self.eval(epoch, on_val=True)
self.exp.train_end_callback()
def webcam(self, epoch, on_val=False, save_predictions=False):
# prediction_name="predictions_r34_culane"#
model = self.cfg.get_model()
model_path = self.exp.get_checkpoint_path(epoch)
self.logger.info('Loading model %s', model_path)
model.load_state_dict(self.exp.get_epoch_model(epoch))
model = model.to(self.device)
model.eval()
# ret, frame = self.vcap.read()
test_parameters = self.cfg.get_test_parameters()
predictions = []
self.exp.eval_start_callback(self.cfg)
self.img_h = 360
self.img_w = 640
while True:
self._idx = []
self._idx.append({'lanes': [], 'path': ''})
# self.dataset
self.max_lanes = 3
S = 72
self.n_strips = S - 1
self.n_offsets = S
# self._transform_annotations(self._idx)
self.strip_size = self.img_h / self.n_strips
self.offsets_ys = np.arange(self.img_h, -1, -self.strip_size)
augmentations = []
aug_chance = 0
self.annotations = np.array(list(map(self.transform_annotation, self._idx)))
self.img_h = 360
self.img_w = 640
transformations = iaa.Sequential([Resize({'height': self.img_h, 'width': self.img_w})])
self.transform = iaa.Sequential([iaa.Sometimes(then_list=augmentations, p=aug_chance), transformations])
ret, frame_ = self.vcap.read()
frame_ori, _, _ = self.__getitem__(_idx=self.annotations, frame=frame_)
_frame = frame_ori.cuda()
_frame = torch.unsqueeze(_frame, dim=0)
with torch.no_grad():
idx=0
output = model(_frame, **test_parameters)
prediction = model.decode(output, as_lanes=True)
predictions.extend(prediction)
frame___ = torch.squeeze(_frame, dim=0)
img = (frame___.cpu().permute(1, 2, 0).numpy() * 255).astype(np.uint8)
# img=255-img
img, fp, fn = self.draw_annotation(0, img=img, pred=prediction[0], frame=frame_)
cv2.imshow("Webcam_laneATT", img)
if cv2.waitKey(1) == 27:
vcap.release() # 메모리 해제
cv2.destroyAllWindows() # 모든창 제거, 특정 창만듣을 경우 ("VideoFrame")
break;
def label_to_lanes(self, label):
# print("here")
lanes = []
for l in label:
if l[1] == 0:
continue
xs = l[5:] / self.img_w
ys = self.offsets_ys / self.img_h
start = int(round(l[2] * self.n_strips))
length = int(round(l[4]))
xs = xs[start:start + length][::-1]
ys = ys[start:start + length][::-1]
xs = xs.reshape(-1, 1)
ys = ys.reshape(-1, 1)
points = np.hstack((xs, ys))
# print(Lane(points=points))
lanes.append(Lane(points=points))
return lanes
def draw_annotation(self, idx, label=None, pred=None, img=None, frame=None):
# Get image if not provided
# print(self.annotations)
if True:
_, label, _ = self.__getitem__(_idx=self.annotations, frame=frame)
label = self.label_to_lanes(label)
img = cv2.resize(img, (self.img_w, self.img_h))
img_h, _, _ = img.shape
# Pad image to visualize extrapolated predictions
data = [(None, None, label)]
if pred is not None:
fp, fn, matches, accs = 0, 0, [1] * len(pred), [1] * len(pred)
assert len(matches) == len(pred)
data.append((matches, accs, pred))
for matches, accs, datum in data:
num = 0
pad = 0
temp = []
for i, l in enumerate(datum):
temp.append(l.points)
if len(datum) != 0:
if len(datum) == 2:
if (sum(temp[0][:, 0])/len(temp[0][:, 0])) > (sum(temp[1][:, 0])/len(temp[1][:, 0])) :
color=[(255,0,0),(0,0,255)]
else:
color = [(0, 0, 255), (255, 0, 0)]
if len(datum) == 1:
#print(len(temp[0][0]))
if (sum(temp[0][:, 0])/len(temp[0][:, 0])) > 0.5:
color=[(255,0,0)]
else:
color = [(0, 0, 255)]
for i, l in enumerate(datum):
points = l.points
# print(points)
points[:, 0] *= img.shape[1]
points[:, 1] *= img.shape[0]
points = points.round().astype(int)
points += pad
xs, ys = points[:, 0], points[:, 1]
for curr_p, next_p in zip(points[:-1], points[1:]):
img = cv2.line(img,
tuple(curr_p),
tuple(next_p),
color=color[num],
thickness=3 if matches is None else 3)
num += 1
return img, fp, fn
def __getitem__(self, _idx=None, frame=None):
item = _idx[0]
img_org =frame #cv2.imread("/mnt/work/kim/KODAS1/Input/000397.jpg")
line_strings_org = self.lane_to_linestrings(item['old_anno']['lanes'])
line_strings_org = LineStringsOnImage(line_strings_org, shape=img_org.shape)
for i in range(30):
img, line_strings = self.transform(image=img_org.copy(), line_strings=line_strings_org)
line_strings.clip_out_of_image_()
new_anno = {'path': item['path'], 'lanes': self.linestrings_to_lanes(line_strings)}
try:
label = self.transform_annotation(new_anno, img_wh=(self.img_w, self.img_h))['label']
break
except:
if (i + 1) == 30:
self.logger.critical('Transform annotation failed 30 times :(')
exit()
img = img / 255.
img = self.to_tensor(img.astype(np.float32))
return (img, label, 0)
def eval(self, epoch, on_val=False, save_predictions=False):
# prediction_name="predictions_r34_culane"#
model = self.cfg.get_model()
model_path = self.exp.get_checkpoint_path(epoch)
self.logger.info('Loading model %s', model_path)
model.load_state_dict(self.exp.get_epoch_model(epoch))
model = model.to(self.device)
model.eval()
if on_val and self.test_dataset == None:
dataloader = self.get_val_dataloader()
elif self.test_dataset != None:
dataloader = self.get_kodas_test_dataloader()
else:
dataloader = self.get_test_dataloader()
test_parameters = self.cfg.get_test_parameters()
predictions = []
self.exp.eval_start_callback(self.cfg)
with torch.no_grad():
for idx, (images, _, _) in enumerate(tqdm(dataloader)):
images = images.to(self.device)
#utils.save_image(images, "a.png")
output = model(images, **test_parameters)
prediction = model.decode(output, as_lanes=True)
predictions.extend(prediction)
if self.view:
img = (images[0].cpu().permute(1, 2, 0).numpy() * 255).astype(np.uint8)
img, fp, fn = dataloader.dataset.draw_annotation(idx, img=img, pred=prediction[0])
if self.view == 'mistakes' and fp == 0 and fn == 0:
continue
__name = self.log_dir + str(idx) + '.jpg'
cv2.imwrite(__name, img)
cv2.waitKey(0)
image_folder = self.log_dir
video_name = self.log_dir + self.video_name + '.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
images = natsorted(images)
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 30, (width, height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
def lane_to_linestrings(self, lanes):
lines = []
for lane in lanes:
lines.append(LineString(lane))
return lines
def linestrings_to_lanes(self, lines):
lanes = []
for line in lines:
lanes.append(line.coords)
return lanes
def transform_annotation(self, anno, img_wh=None):
if img_wh is None:
img_h = 360 # self.dataset.get_img_heigth(anno['path'])
img_w = 640 # self.dataset.get_img_width(anno['path'])
else:
img_w, img_h = img_wh
old_lanes = anno['lanes']
old_lanes = filter(lambda x: len(x) > 1, old_lanes)
# sort lane points by Y (bottom to top of the image)
old_lanes = [sorted(lane, key=lambda x: -x[1]) for lane in old_lanes]
# remove points with same Y (keep first occurrence)
old_lanes = [self.filter_lane(lane) for lane in old_lanes]
# normalize the annotation coordinates
old_lanes = [[[x * self.img_w / float(img_w), y * self.img_h / float(img_h)] for x, y in lane]
for lane in old_lanes]
# create tranformed annotations
lanes = np.ones((self.max_lanes, 2 + 1 + 1 + 1 + self.n_offsets),
dtype=np.float32) * -1e5 # 2 scores, 1 start_y, 1 start_x, 1 length, S+1 coordinates
# lanes are invalid by default
lanes[:, 0] = 1
lanes[:, 1] = 0
for lane_idx, lane in enumerate(old_lanes):
try:
xs_outside_image, xs_inside_image = self.sample_lane(lane, self.offsets_ys)
except AssertionError:
continue
if len(xs_inside_image) == 0:
continue
all_xs = np.hstack((xs_outside_image, xs_inside_image))
lanes[lane_idx, 0] = 0
lanes[lane_idx, 1] = 1
lanes[lane_idx, 2] = | |
= []
for s in cvarstore:
ts.append(TupleVariation(s[0], s[1]))
fo['cvar'] = ttFont.newTable('cvar')
fo['cvar'].variations = ts
def make_coordinate_index(glist, fo):
""" glyph list, the font object. """
coordinateIndex = {}
for gn in glist:
currentGlyph = fo['glyf'][gn]
if not currentGlyph.isComposite():
c = currentGlyph.getCoordinates(fo['glyf'])
pointIndex = 0
for point in zip(c[0], c[2]):
if point[1] & 0x01 == 0x01:
pp = gn + "@" + str(point[0]).replace('(','').replace(')','').replace(',','x').replace(' ','')
coordinateIndex[pp] = pointIndex
pointIndex += 1
return coordinateIndex
def rewrite_point_string(original_point_string, coordinateIndex, coord_pattern, glyph_name,
xoffset, yoffset, crash=True):
""" Determines whether original_point_string is a pair of coordinates for
a point, and if so, looks up the point number in coordinateIndex. If
not, simply returns the original value. In theory, this should leave
simple expressions undisturbed, e.g. {125;10} + 4. """
coord = coord_pattern.search(original_point_string)
if coord:
# Extract the coordinates (saving the rest); split them apart so that
# offsets can be applied to them, then reassemble them as a key for the
# coordinate list.
matched_string = coord.group()
split_string = coord_pattern.split(original_point_string)
string_index = split_string.index(matched_string)
raw_coord = matched_string.replace('{','').replace('}','').replace(' ','')
coord_list = raw_coord.split(';')
x_coord = int(coord_list[0]) + int(xoffset)
y_coord = int(coord_list[1]) + int(yoffset)
coord_id = glyph_name + '@' + str(x_coord) + 'x' + str(y_coord)
# Try to look up the point number from coordinates. If that fails,
# try to make a fuzzy match. If that fails, fall back on the val
# attribute (if we've been evaluating the coord attribute). Failing
# that, we crash.
try:
point_number = coordinateIndex[coord_id]
except KeyError:
point_number = coordinateFuzzyMatch(coord_id, coordinateIndex)
try:
int(point_number)
except TypeError:
if crash:
print("In glyph " + glyph_name + ", can't resolve coordinates " + matched_string)
sys.exit(1)
else:
if quietcount < 2:
print("Can't resolve coordinate " + original_point_string + "; falling back to value")
raise Exception("Can't resolve coordinate")
# Reassemble the expression.
string_counter = 0
substitute_string = ""
for string_bit in split_string:
if string_counter == string_index:
substitute_string = substitute_string + str(point_number)
else:
substitute_string = substitute_string + string_bit
string_counter += 1
return substitute_string
else:
return original_point_string
def coordinateFuzzyMatch(coordID, coordinateIndex):
# This is an extremely inefficient search, executed only if no match found
# without it. Construct a box around the coordinate (default is 2x2), and
# for each position in that box, construct a potential key for
# coordinateIndex. If we find a match, we issue a warning and go on. If
# not, return None and (probably) crash.
splitCoordID = coordID.split('@')
glyphID = splitCoordID[0]
coordinates = splitCoordID[1].split('x')
originalX = int(coordinates[0])
originalY = int(coordinates[1])
startX = originalX - coordinateFuzz
endX = originalX + coordinateFuzz
startY = originalY - coordinateFuzz
endY = originalY + coordinateFuzz
Xcounter = startX
while Xcounter <= endX:
Ycounter = startY
while Ycounter <= endY:
teststring = glyphID + '@' + str(Xcounter) + 'x' + str(Ycounter)
try:
ci = coordinateIndex[teststring]
if quietcount < 2:
print("Warning: In glyph " + glyphID + ", point " + str(ci) + " found at coordinates " +
str(Xcounter) + "," + str(Ycounter) + " instead of " + str(originalX) + "," + str(originalY) + ".")
return ci
except KeyError:
Ycounter += 1
Xcounter += 1
return None
def coordinates_to_points(glist, xgffile, coordinateIndex, ns):
""" glyph list, xgf program, coordinate index, namespaces.
surveys all the glyph programs in the file and changes coordinate
pairs (e.g. {125;-3}) to point numbers. """
coord_pattern = re.compile(r'(\{[0-9\-]{1,4};[0-9\-]{1,4}\})')
gPathString = "/xgf:xgridfit/xgf:glyph[@ps-name='{gnm}']"
for gn in glist:
try:
xoffset = xgffile.xpath(gPathString.format(gnm=gn), namespaces=ns)[0].attrib['xoffset']
except KeyError:
xoffset = "0"
try:
yoffset = xgffile.xpath(gPathString.format(gnm=gn), namespaces=ns)[0].attrib['yoffset']
except KeyError:
yoffset = "0"
points = xgffile.xpath((gPathString + "/descendant::xgf:point").format(gnm=gn), namespaces=ns)
for p in points:
p.attrib['num'] = rewrite_point_string(p.attrib['num'],
coordinateIndex,
coord_pattern,
gn,
xoffset,
yoffset)
wparams = xgffile.xpath((gPathString + "/descendant::xgf:with-param").format(gnm=gn), namespaces=ns)
for p in wparams:
try:
# print(gn + ": converting " + p.attrib['value'])
p.attrib['value'] = rewrite_point_string(p.attrib['value'],
coordinateIndex,
coord_pattern,
gn,
xoffset,
yoffset)
except:
pass
params = xgffile.xpath((gPathString + "/descendant::xgf:param").format(gnm=gn), namespaces=ns)
for p in params:
p.attrib['value'] = rewrite_point_string(p.attrib['value'],
coordinateIndex,
coord_pattern,
gn,
xoffset,
yoffset)
constants = xgffile.xpath((gPathString + "/descendant::xgf:constant").format(gnm=gn), namespaces=ns)
for c in constants:
orig_value = c.attrib['value']
try:
orig_coord = c.attrib['coordinate']
point_num = rewrite_point_string(orig_coord,
coordinateIndex,
coord_pattern,
gn,
xoffset,
yoffset,
False)
if str(point_num) != orig_value:
if quietcount < 2:
print("Warning: In glyph '" + gn + "', changing value " + orig_value + " to " +
str(point_num) + " after coordinate " + orig_coord)
c.attrib['value'] = str(point_num)
except:
c.attrib['value'] = rewrite_point_string(orig_value,
coordinateIndex,
coord_pattern,
gn,
xoffset,
yoffset)
def validate(f, syntax, noval):
if noval and quietcount < 1:
print("Skipping validation")
else:
schemadir = "Schemas/"
schemafile = "xgridfit.rng"
if syntax == "compact":
schemafile = "xgridfit-sh.rng"
schemapath = get_file_path(schemadir + schemafile)
schema = etree.RelaxNG(etree.parse(schemapath))
schema.assertValid(f)
def main():
global maxInstructions, quietcount
# First read the command-line arguments. At minimum we need the inputfile.
argparser = argparse.ArgumentParser(prog='xgridfit',
description='Compile XML into TrueType instructions and add them to a font.')
argparser.add_argument('-v', '--version', action='version', version='Xgridfit ' + __version__)
argparser.add_argument('-e', '--expand', action="store_true",
help="Convert file to expanded syntax, save, and exit")
argparser.add_argument('-c', '--compact', action="store_true",
help="Convert file to compact syntax, save, and exit")
argparser.add_argument('-n', '--novalidation', action="store_true",
help="Skip validation of the Xgridfit program")
argparser.add_argument('--nocompilation', action="store_true",
help="Skip compilation of the Xgridfit program")
argparser.add_argument('--nocompact', action="store_true",
help="Do not compact glyph programs (can help with debugging)")
argparser.add_argument('-m', '--merge', action="store_true",
help="Merge Xgridfit with existing instructions")
argparser.add_argument('-r', '--replaceprep', action="store_true",
help="Whether to replace the existing prep table or append the new one (use with --merge)")
argparser.add_argument('--initgraphics', choices=['yes', 'no'],
help="Whether to initialize graphics-tracking variables at the beginning of glyph program")
argparser.add_argument('-a', '--assume_y', choices=['yes', 'no'],
help="Whether compiler should assume that your hints are all vertical")
argparser.add_argument('-q', '--quiet', action="count", default=0,
help="No progress messages (-qq to suppress warnings too)")
argparser.add_argument('-g', '--glyphlist', help="List of glyphs to compile")
argparser.add_argument('-i', '--inputfont', action='store', type=str,
help="The font file to add instructions to")
argparser.add_argument('-o', '--outputfont', action='store', type=str,
help="The font file to write")
argparser.add_argument('-s', '--saveprograms', action="store_true",
help="Save generated instructions to text files")
argparser.add_argument('-f', '--coordinatefuzz', type=int, default=1,
help="Error tolerance for points identified by coordinates (default is 1)")
argparser.add_argument("inputfile", help='Xgridfit (XML) file to process.')
argparser.add_argument("outputfile", nargs='?',
help="Filename for options (e.g. --expand) that produce text output")
args = argparser.parse_args()
inputfile = args.inputfile
outputfile = args.outputfile
inputfont = args.inputfont
outputfont = args.outputfont
skipval = args.novalidation
skipcomp = args.nocompilation
expandonly = args.expand
compactonly = args.compact
mergemode = args.merge
quietcount = args.quiet
initgraphics = args.initgraphics
assume_y = args.assume_y
glyphlist = args.glyphlist
replaceprep = args.replaceprep
saveprograms = args.saveprograms
nocompact = args.nocompact
cfuzz = args.coordinatefuzz
if quietcount < 1:
print("Opening the Xgridfit file ...")
if cfuzz > 1:
coordinateFuzz = cfuzz
xgffile = etree.parse(inputfile)
# We'll need namespaces
ns = {"xgf": "http://xgridfit.sourceforge.net/Xgridfit2",
"xi": "http://www.w3.org/2001/XInclude",
"xsl": "http://www.w3.org/1999/XSL/Transform"}
# Do xinclude if this is a multipart file
if len(xgffile.xpath("/xgf:xgridfit/xi:include", namespaces=ns)):
xgffile.xinclude()
# Next determine whether we are using long tagnames or short. Best way
# is to find out which tag is used for the required <pre-program> (<prep>)
# element. If we don't find it, print an error message and exit. Here's
# where we validate too; and if we're only expanding or compacting a file,
# do that and exit before we go to the trouble of opening the font.
if quietcount < 1:
print("Validating ...")
if len(xgffile.xpath("/xgf:xgridfit/xgf:prep", namespaces=ns)):
# first validate
validate(xgffile, "compact", skipval)
# as we can't use the compact syntax, always expand
if quietcount < 1:
print("Expanding compact to normal syntax ...")
xslfile = get_file_path("XSL/expand.xsl")
etransform = etree.XSLT(etree.parse(xslfile))
xgffile = etransform(xgffile)
if expandonly:
tstr = str(xgffile)
tstr = tstr.replace('xgf:','')
tstr = tstr.replace('xmlns:xgf="http://xgridfit.sourceforge.net/Xgridfit2"','')
if outputfile:
of = open(outputfile, "w")
of.write(tstr)
of.close()
else:
print(tstr)
sys.exit(0)
elif len(xgffile.xpath("/xgf:xgridfit/xgf:pre-program", namespaces=ns)):
validate(xgffile, "normal", skipval)
if compactonly:
xslfile = get_file_path("XSL/compact.xsl")
etransform = etree.XSLT(etree.parse(xslfile))
xgffile = etransform(xgffile)
tstr = str(xgffile)
tstr = tstr.replace('xgf:','')
tstr = tstr.replace('xmlns:xgf="http://xgridfit.sourceforge.net/Xgridfit2"','')
tstr = tstr.replace(' >','>')
if outputfile:
of = open(outputfile, "w")
of.write(tstr)
of.close()
else:
print(tstr)
sys.exit(0)
else:
print("The xgridfit program must contain a pre-program (prep) element,")
print("even if it's empty.")
sys.exit(1)
if skipcomp and quietcount < 1:
print("Skipping compilation")
sys.exit(0)
# Now open the font. If we're in merge-mode, we need to know some things
# about the current state of it; otherwise we just wipe it.
if quietcount < 1:
| |
parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, Tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self.tk.call(self._w, "bbox", index)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return bool(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
Entry.__init__(self, master, "ttk::combobox", **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # Tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.call(self._w, "index", tab_id)
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.call(self._w, "tabs") or ()
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, Tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = Tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 12:11:36 2019
@author: keelin
"""
import numpy as np
import opencxr.utils
import skimage.feature
from opencxr.utils.resize_rescale import rescale_to_min_max
from scipy import ndimage
from scipy.ndimage import binary_dilation, find_objects
from skimage import measure
"""
Methods relating to mask and crop functionality
"""
def set_non_mask_constant(img_np, mask_np, dilation_in_pixels=0, constant_val=0):
"""
A method to set a portion of an image to a constant value
e.g. set areas outside lungs to be black
Allows for dilation of the mask if required
:param img_np: the original image
:param mask_np: the mask, e.g. lung mask, expected as binary 1 or 0 values
:param dilation_in_pixels: the size of the dilation of the mask
:param constant_val: the value to set non-mask as
:return: The image as np with non mask areas set to the constant value
"""
if dilation_in_pixels > 0:
struct_element = np.ones((dilation_in_pixels, dilation_in_pixels)).astype(bool)
mask_np = binary_dilation(mask_np, structure=struct_element).astype(
mask_np.dtype
)
# make a copy because putmask will change the original
img_np_copy = np.copy(img_np)
np.putmask(img_np_copy, mask_np < 1, constant_val)
return img_np_copy
def crop_to_mask(img_np, spacing, mask_np, margin_in_mm):
"""
A method to crop away regions outside the bounding box of a mask
e.g. crop to smallest rectangle containing lungs
:param img_np: the original image
:param spacing: The spacing of the original image
:param mask_np: the mask to crop around, e.g. lung mask,
expected as binary 1,0 values
:param margin_in_mm: a margin to allow around the tightest bounding box
:return: The cropped image
The size changes list for reference or future use see utils __init__.py
"""
# convert margin in mm to margin_in_pixels for each of x and y
margin_in_pixels_x = int(np.round(margin_in_mm / spacing[0]))
margin_in_pixels_y = int(np.round(margin_in_mm / spacing[1]))
# get bounding box for mask
bbox = find_objects(mask_np)
min_x_mask = max(bbox[0][0].start - margin_in_pixels_x, 0)
min_y_mask = max(bbox[0][1].start - margin_in_pixels_y, 0)
max_x_mask = min(bbox[0][0].stop + margin_in_pixels_x, mask_np.shape[0])
max_y_mask = min(bbox[0][1].stop + margin_in_pixels_y, mask_np.shape[1])
cropped_img, size_changes = crop_with_params(
img_np, [min_x_mask, max_x_mask, min_y_mask, max_y_mask]
)
return cropped_img, size_changes
def crop_img_borders_by_edginess(
img_np_array, width_edgy_threshold=50, dist_edgy_threshold=100
):
"""
Method to crop homogeneous border regions based on edge detection:
1) do an edge detection to pick up edges belonging to the image content
2) crop to the region which contains edge information
(exclude small isolated edgy regions)
:param img_np_array: the input image
:param width_edgy_threshold: the threshold (in pixels) to define
an edgy region as 'small'
:param dist_edgy_threshold: the threshold (in pixels) to define an edgy region as
'distant' from other edgy regions
:return: The cropped image
The size changes list for reference or future use see utils __init__.py
"""
def find_starts_ends_edgy_regions_axis(edge_img_np_array, axis_to_check):
"""
inner function to count edge pixels and identify (per row, per axis)
where the regions with edges start and end
"""
count_edges = []
start_end_edgy_regions = []
for ind in range(0, edge_img_np_array.shape[axis_to_check]):
count_edge_pixels = np.sum(
edge_img_np_array[ind, :]
if axis_to_check == 0
else edge_img_np_array[:, ind]
)
count_edges.append(count_edge_pixels)
if ind == 0:
# start an edgy region if there are immediately edges present
if count_edge_pixels > 0:
start_end_edgy_regions.append(ind)
continue
if ind == img_np_array.shape[axis_to_check] - 1:
# if previous one was non-zero we were in an edgy region
# so add the ending
if count_edges[ind - 1] > 0:
start_end_edgy_regions.append(ind)
# if the last row is an isolated edgy region
# add it as a start and end also.
elif count_edges[ind - 1] == 0 and count_edge_pixels > 0:
start_end_edgy_regions.append(ind)
start_end_edgy_regions.append(ind)
continue
# otherwise we are somewhere in the middle
if count_edge_pixels > 0 and count_edges[ind - 1] == 0:
# add start location for edgy region
start_end_edgy_regions.append(ind)
elif count_edge_pixels == 0 and count_edges[ind - 1] > 0:
# add end location for edgy region
start_end_edgy_regions.append(ind)
return start_end_edgy_regions
def remove_small_isolated_edgy_regions(starts_ends_edgy_regions):
"""
inner function to remove the edgy regions which are small or isolated
"""
# print('starts and ends are ', starts_ends_edgy_regions)
starts_ends_retain = []
for start_edgy_index in range(0, len(starts_ends_edgy_regions), 2):
start_edgy = starts_ends_edgy_regions[start_edgy_index]
end_edgy = starts_ends_edgy_regions[start_edgy_index + 1]
# print("found start and end", start_edgy, end_edgy)
width_edgy = end_edgy - start_edgy + 1
# print("found width ", width_edgy)
dist_next_edgy = 10000
dist_prev_edgy = 10000
# if a subsequent edgy region exists
if start_edgy_index + 2 < len(starts_ends_edgy_regions):
start_next_edgy = starts_ends_edgy_regions[start_edgy_index + 2]
dist_next_edgy = start_next_edgy - end_edgy
# if a previous edgy region exists
if start_edgy_index - 2 >= 0:
end_prev_edgy = starts_ends_edgy_regions[start_edgy_index - 1]
dist_prev_edgy = start_edgy - end_prev_edgy
isolated_left = dist_prev_edgy > dist_edgy_threshold
isolated_right = dist_next_edgy > dist_edgy_threshold
is_small_edgy_region = width_edgy < width_edgy_threshold
is_isolated_edgy_region = isolated_left and isolated_right
if not (is_small_edgy_region and is_isolated_edgy_region):
starts_ends_retain.append(start_edgy)
starts_ends_retain.append(end_edgy)
# print('starts and ends i will retain are ', starts_ends_retain)
return starts_ends_retain
# Now start the main work:
# Parameters of Canny are based on the input being in range 0-65535
# so need to force this before we run Canny
img_for_edge_det = rescale_to_min_max(img_np_array, new_dtype=np.uint16)
edge_img = skimage.feature.canny(
image=img_for_edge_det.astype(np.float32),
sigma=5.0,
low_threshold=0.0,
high_threshold=500.0,
)
# convert from boolean
edge_img = edge_img.astype(np.uint8)
# Now crop according to where the "edgy" region of the image is
start_end_edges_x = find_starts_ends_edgy_regions_axis(edge_img, 0)
start_end_edges_x = remove_small_isolated_edgy_regions(start_end_edges_x)
start_end_edges_y = find_starts_ends_edgy_regions_axis(edge_img, 1)
start_end_edges_y = remove_small_isolated_edgy_regions(start_end_edges_y)
start_x = start_end_edges_x[0]
end_x = start_end_edges_x[len(start_end_edges_x) - 1]
start_y = start_end_edges_y[0]
end_y = start_end_edges_y[len(start_end_edges_y) - 1]
# print('Will finally crop from edginess x', start_x, end_x)
# print('Will finally crop from edginess y', start_y, end_y)
cropped_img, size_changes = crop_with_params(
img_np_array, [start_x, end_x, start_y, end_y]
)
return cropped_img, size_changes
def crop_img_borders(img_np_array, in_thresh_factor=0.05):
"""
Method to crop homogeneous border regions based on std deviations:
:param img_np_array: the input image
:param in_thresh_factor: threshold factor -> 0.05 recommended
:return: The cropped image
The size changes list for reference or future use see utils __init__.py
"""
xmin = 0
xmax = img_np_array.shape[0]
ymin = 0
ymax = img_np_array.shape[1]
# Use the image std dev and in_thresh_factor to make a hard threshold
img_std_dev = np.std(img_np_array)
hard_threshold = in_thresh_factor * img_std_dev
# Some setup
completed = False
xmin_stored = 0
xmax_stored = img_np_array.shape[0] - 1
ymin_stored = 0
ymax_stored = img_np_array.shape[1] - 1
# Loop until no more changes can be made
while not completed:
xmin = xmin_stored
xmax = xmax_stored
ymin = ymin_stored
ymax = ymax_stored
# Determine xmin
# the first x value where the column of pixels is not "homogeneous"
for x_pix in range(xmin_stored, xmax_stored + 1):
line = img_np_array[x_pix, ymin_stored : ymax_stored + 1]
std = np.std(line)
if std < hard_threshold:
xmin = x_pix
else:
break
# Determine xmax
# the last x value where the column of pixels is not "homogeneous"
for x_pix in range(xmax_stored, -1, -1):
line = img_np_array[x_pix, ymin_stored : ymax_stored + 1]
std = np.std(line)
if std < hard_threshold:
xmax = x_pix
else:
break
# Determine ymin
# the first y value where the row of pixels is not "homogeneous"
for y_pix in range(ymin_stored, ymax_stored + 1):
line = img_np_array[xmin_stored : xmax_stored + 1, y_pix]
std = np.std(line)
if std < hard_threshold:
ymin = y_pix
else:
break
# Determine ymax
# the last x value where the row of pixels is not "homogeneous"
for y_pix in range(ymax_stored, -1, -1):
line = img_np_array[xmin_stored : xmax_stored + 1, y_pix]
std = np.std(line)
if std < hard_threshold:
ymax = y_pix
else:
break
# In case of any unusual image causing issues - just keep stored values
if xmax < xmin:
xmin = xmin_stored
if ymax < ymin:
ymin = ymin_stored
# Assume that we are done, but if we made any change on this
# last loop then we will set completed=False and try one more time
completed = True
if xmin > xmin_stored:
xmin_stored = xmin
completed = False
if xmax < xmax_stored:
xmax_stored = xmax
completed = False
if ymin > ymin_stored:
ymin_stored = ymin
completed = False
if ymax < ymax_stored:
ymax_stored = ymax
completed = False
cropped_img, size_changes = crop_with_params(
img_np_array, [xmin_stored, xmax_stored + 1, ymin_stored, ymax_stored]
)
return cropped_img, size_changes
def crop_with_params(img_np, array_minx_maxx_miny_maxy):
"""
Crops an image accoring to the 4 params provided in the array
:param img_np: the image to be cropped
:param array_minx_maxx_miny_maxy: | |
}),
.M_REGIONS(M_REGIONS),
.M_BASE_ADDR({ {% for p in range(n-1,-1,-1) %}w_a_r(M{{'%02d'%p}}_BASE_ADDR){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_ADDR_WIDTH({ {% for p in range(n-1,-1,-1) %}w_32_r(M{{'%02d'%p}}_ADDR_WIDTH){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_CONNECT_READ({ {% for p in range(n-1,-1,-1) %}w_s(M{{'%02d'%p}}_CONNECT_READ){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_CONNECT_WRITE({ {% for p in range(n-1,-1,-1) %}w_s(M{{'%02d'%p}}_CONNECT_WRITE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_ISSUE({ {% for p in range(n-1,-1,-1) %}w_32(M{{'%02d'%p}}_ISSUE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_SECURE({ {% for p in range(n-1,-1,-1) %}w_1(M{{'%02d'%p}}_SECURE){% if not loop.last %}, {% endif %}{% endfor %} }),
.S_AR_REG_TYPE({ {% for p in range(m-1,-1,-1) %}w_2(S{{'%02d'%p}}_AR_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.S_R_REG_TYPE({ {% for p in range(m-1,-1,-1) %}w_2(S{{'%02d'%p}}_R_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.S_AW_REG_TYPE({ {% for p in range(m-1,-1,-1) %}w_2(S{{'%02d'%p}}_AW_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.S_W_REG_TYPE({ {% for p in range(m-1,-1,-1) %}w_2(S{{'%02d'%p}}_W_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.S_B_REG_TYPE({ {% for p in range(m-1,-1,-1) %}w_2(S{{'%02d'%p}}_B_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_AR_REG_TYPE({ {% for p in range(n-1,-1,-1) %}w_2(M{{'%02d'%p}}_AR_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_R_REG_TYPE({ {% for p in range(n-1,-1,-1) %}w_2(M{{'%02d'%p}}_R_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_AW_REG_TYPE({ {% for p in range(n-1,-1,-1) %}w_2(M{{'%02d'%p}}_AW_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_W_REG_TYPE({ {% for p in range(n-1,-1,-1) %}w_2(M{{'%02d'%p}}_W_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_B_REG_TYPE({ {% for p in range(n-1,-1,-1) %}w_2(M{{'%02d'%p}}_B_REG_TYPE){% if not loop.last %}, {% endif %}{% endfor %} })
)
axi_crossbar_inst (
.clk(clk),
.rst(rst),
.s_axi_awid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awaddr({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awaddr{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awlen({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awlen{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awsize({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awsize{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awburst({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awburst{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awlock({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awlock{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awcache({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awcache{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awprot({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awprot{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awqos({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awqos{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awuser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awuser{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_awready({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_awready{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_wdata({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_wdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_wstrb({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_wstrb{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_wlast({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_wlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_wuser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_wuser{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_wvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_wvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_wready({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_wready{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_bid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_bid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_bresp({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_bresp{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_buser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_buser{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_bvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_bvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_bready({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_bready{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_araddr({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_araddr{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arlen({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arlen{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arsize({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arsize{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arburst({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arburst{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arlock({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arlock{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arcache({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arcache{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arprot({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arprot{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arqos({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arqos{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_aruser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_aruser{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_arready({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_arready{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_rid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_rid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_rdata({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_rdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_rresp({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_rresp{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_rlast({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_rlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_ruser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_ruser{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_rvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_rvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axi_rready({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axi_rready{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awaddr({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awaddr{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awlen({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awlen{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awsize({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awsize{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awburst({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awburst{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awlock({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awlock{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awcache({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awcache{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awprot({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awprot{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awqos({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awqos{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awregion({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awregion{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awuser{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awvalid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_awready({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_awready{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_wdata({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_wdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_wstrb({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_wstrb{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_wlast({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_wlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axi_wuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axi_wuser{% if not loop.last %}, {% endif %}{% endfor %} }),
| |
<reponame>os-climate/sostrades-core
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 8; coding: utf-8
'''
import logging
from copy import copy
from uuid import uuid4
from hashlib import sha256
from numpy import can_cast
from sos_trades_core.api import get_sos_logger
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
from sos_trades_core.tools.tree.serializer import DataSerializer
from sos_trades_core.tools.tree.treeview import TreeView
TYPE = SoSDiscipline.TYPE
VALUE = SoSDiscipline.VALUE
RANGE = SoSDiscipline.RANGE
ORIGIN = SoSDiscipline.ORIGIN
DEFAULT = SoSDiscipline.DEFAULT
OPTIONAL = SoSDiscipline.OPTIONAL
COUPLING = SoSDiscipline.COUPLING
EDITABLE = SoSDiscipline.EDITABLE
IO_TYPE = SoSDiscipline.IO_TYPE
UNIT = SoSDiscipline.UNIT
IO_TYPE_IN = SoSDiscipline.IO_TYPE_IN
IO_TYPE_OUT = SoSDiscipline.IO_TYPE_OUT
COMPOSED_OF = SoSDiscipline.COMPOSED_OF
NS_REFERENCE = SoSDiscipline.NS_REFERENCE
POSSIBLE_VALUES = SoSDiscipline.POSSIBLE_VALUES
INTERNAL_VISIBILITY = SoSDiscipline.INTERNAL_VISIBILITY
DISCIPLINES_DEPENDENCIES = SoSDiscipline.DISCIPLINES_DEPENDENCIES
VAR_NAME = SoSDiscipline.VAR_NAME
DATAFRAME_DESCRIPTOR = SoSDiscipline.DATAFRAME_DESCRIPTOR
DATAFRAME_EDITION_LOCKED = SoSDiscipline.DATAFRAME_EDITION_LOCKED
TYPE_METADATA = SoSDiscipline.TYPE_METADATA
class DataManager:
"""
Specification: DataManager class collects inputs/outputs and disciplines
"""
VALUE = 'value'
DISC_REF = 'reference'
STATUS = 'status'
def __init__(self, name,
root_dir=None,
rw_object=None,
study_filename=None,
ns_manager=None,
logger=None):
'''
Constructor
'''
self.no_change = True
self.name = name
self.rw_object = rw_object
self.root_dir = root_dir
self.study_filename = study_filename
self.ns_manager = ns_manager
self.data_dict = None
self.data_id_map = None
self.disciplines_dict = None
self.disciplines_id_map = None
self.gemseo_disciplines_id_map = None
self.cache_map = None
self.treeview = None
self.reset()
if logger is None:
self.logger = get_sos_logger('SoS.EE.DataManager')
else:
self.logger = logger
@staticmethod
def get_an_uuid():
''' generate a random UUID to make data_dict keys unique '''
return str(uuid4())
def generate_hashed_uid(self, string_list):
'''
Generate a hashed uid based on string list containing disc infos (full disc name, class name and full data i/o)
'''
h = sha256()
for string in string_list:
h.update(string.encode())
return h.digest()
def load_gemseo_disciplines_cache(self, cache_map):
'''
Store gemseo disciplines cache from cache_map using gemseo_disciplines_id_map
'''
# update cache of all gemseo disciplines with loaded cache_map
for disc_id, disc_cache in cache_map.items():
if disc_id in self.gemseo_disciplines_id_map:
self.gemseo_disciplines_id_map[disc_id].cache = disc_cache
self.cache_map[disc_id] = disc_cache
def reset(self):
self.data_dict = {}
self.data_id_map = {}
self.disciplines_dict = {}
self.disciplines_id_map = {}
self.no_check_default_variables = []
def get_data(self, var_f_name, attr=None):
''' Get attr value of var_f_name or all data_dict value of var_f_name (if attr=None)
'''
if attr is None:
return self.data_dict[self.get_data_id(var_f_name)]
else:
return self.data_dict[self.get_data_id(var_f_name)][attr]
def delete_complex_in_df_and_arrays(self):
for dataa in self.data_dict.values():
if dataa['type'] == 'dataframe' and dataa[self.VALUE] is not None:
for col in dataa[self.VALUE].columns:
dataa[self.VALUE][col] = dataa[self.VALUE][col].values.real
elif dataa['type'] == 'array':
try:
dataa[self.VALUE] = dataa[self.VALUE].real
except:
pass
def check_data_in_dm(self, var_f_name):
'''
Check if the data is in the DM with its full name
:params: var_f_name, full variable name to check
:type : string
:returns: boolean True or False
'''
data_in_dm = False
if var_f_name in self.data_id_map:
if self.get_data_id(var_f_name) in self.data_dict:
data_in_dm = True
return data_in_dm
def set_data(self, var_f_name, attr, val, check_value=True):
''' Set attr value of var_f_name in data_dict
'''
if self.get_data_id(var_f_name) in self.data_dict:
if check_value:
if self.data_dict[self.get_data_id(var_f_name)][attr] != val:
self.data_dict[self.get_data_id(var_f_name)][attr] = val
self.no_change = False
else:
self.data_dict[self.get_data_id(var_f_name)][attr] = val
else:
msg = f"Try to update metadata of variable {var_f_name} that does"
msg += f" not exists as I/O of any discipline"
raise KeyError(msg)
def get_io_data_of_disciplines(self, disciplines):
''' get i/o value and metadata of provided disciplines
'''
data = {}
data[VALUE] = {}
data[TYPE_METADATA] = {}
data["local_data"] = {}
for d in disciplines:
# input values and metadata
var_list = d.get_data_io_dict_keys(io_type=IO_TYPE_IN)
for v in var_list:
fname = d.get_var_full_name(v, d.get_data_in())
data[VALUE][fname] = self.get_data(fname, VALUE)
data[TYPE_METADATA][fname] = self.get_data(
fname, TYPE_METADATA)
# output values and metadata
var_list = d.get_data_io_dict_keys(io_type=IO_TYPE_OUT)
for v in var_list:
fname = d.get_var_full_name(v, d.get_data_out())
data[VALUE][fname] = self.get_data(fname, VALUE)
data[TYPE_METADATA][fname] = self.get_data(
fname, TYPE_METADATA)
# local data update
data["local_data"].update(d.local_data)
return data
def get_value(self, var_f_name):
''' Get value of var_f_name from data_dict
'''
return self.get_data(var_f_name, SoSDiscipline.VALUE)
def get_discipline(self, disc_id):
''' Get discipline with disc_id from disciplines_dict
'''
if disc_id in self.disciplines_dict:
return self.disciplines_dict[disc_id][self.DISC_REF]
else:
return None
def get_disciplines_with_name(self, disc_f_name):
''' Get discipline with disc_id from disciplines_dict
'''
disc_list = []
disc_id_list = self.get_discipline_ids_list(disc_f_name)
for disc_id in disc_id_list:
disc_list.append(self.disciplines_dict[disc_id][self.DISC_REF])
return disc_list
def get_discipline_names_with_starting_name(self, starting_name):
'''
Get all disciplines that starts with starting_name in the datamanager
'''
disc_list = []
for disc_name in self.disciplines_id_map:
if disc_name.startswith(starting_name):
disc_list.append(disc_name)
return disc_list
def get_all_namespaces_from_var_name(self, var_name):
''' Get all namespaces containing var_name in data_dict
'''
namespace_list = []
for key in self.data_id_map.keys():
if key.endswith('.' + var_name):
namespace_list.append(key)
return namespace_list
def get_all_var_name_with_ns_key(self, var_name):
''' Get all namespaces containing var_name in data_dict plus their namespace key as a dict
'''
namespace_list = []
for key in self.data_id_map.keys():
if key.endswith('.' + var_name):
namespace_list.append(key)
if len(namespace_list) > 0:
ns_dict_obj = self.get_data_dict_attr('ns_reference')
return {ns: ns_dict_obj[ns].name for ns in namespace_list}
else:
return {}
def get_data_id(self, var_f_name):
''' Get data id with var_f_name
'''
return self.data_id_map[var_f_name]
def get_discipline_ids_list(self, disc_f_name):
''' Get discipline id list with disc_f_name
'''
return self.disciplines_id_map[disc_f_name]
def generate_data_id_map(self):
''' Generate data_id_map with data_dict
'''
self.data_id_map = {}
data_dict = copy(self.data_dict)
for var_id in data_dict.keys():
var_f_name = self.get_var_full_name(var_id)
self.data_id_map[var_f_name] = var_id
def generate_disciplines_id_map(self):
''' Generate disciplines_id_map with disciplines_dict
'''
self.disciplines_id_map = {}
for disc_id in self.disciplines_dict.keys():
disc_f_name = self.get_disc_full_name(disc_id)
self.add_disc_id_to_disc_id_map(disc_f_name, disc_id)
def set_values_from_dict(self, values_dict, full_ns_keys=True):
''' Set values in data_dict from dict with namespaced keys
if full_ns_keys (not uuid), try to get its uuid correspondency through get_data_id function
'''
keys_to_map = self.data_id_map.keys() if full_ns_keys else self.data_id_map.values()
for key, value in values_dict.items():
if not key in keys_to_map:
raise ValueError(f'{key} does not exist in data manager')
k = self.get_data_id(key) if full_ns_keys else key
# if self.data_dict[k][SoSDiscipline.VISIBILITY] == INTERNAL_VISIBILITY:
# raise Exception(f'It is not possible to update the variable {k} which has a visibility Internal')
self.data_dict[k][VALUE] = value
def convert_data_dict_with_full_name(self):
''' Return data_dict with namespaced keys
'''
return self.convert_dict_with_maps(self.data_dict, self.data_id_map, keys='full_names')
def get_data_dict_values(self, excepted=[]):
'''
Return a dictionaries with all full named keys in the dm and the value of each key from the dm
'''
return self.get_data_dict_attr(self.VALUE, excepted)
def get_data_dict_attr(self, attr, excepted=[]):
'''
Return a dictionaries with all full named keys in the dm and the value of each key from the dm
'''
data_dict = self.convert_data_dict_with_full_name()
exception_list = []
if 'numerical' in excepted:
exception_list = list(SoSDiscipline.NUM_DESC_IN.keys())
if 'None' in excepted:
data_dict_values = {key: value.get(attr, None)
for key, value in data_dict.items() if key.split('.')[-1] not in exception_list}
else:
data_dict_values = {key: value.get(attr, None)
for key, value in data_dict.items() if key.split('.')[-1] not in exception_list}
return data_dict_values
def get_data_dict_list_attr(self, list_attr, excepted=[]):
"""
Return a dictionary of dictionary with all full named keys in the dm and the value of each key from the dm
output : dict[key][attr] for each attr in list_attr
"""
data_dict_values_dict = {}
data_dict_values_list = [self.get_data_dict_attr(
attr, excepted) for attr in list_attr]
for key in data_dict_values_list[0].keys():
data_dict_values_dict[key] = {}
for index, attr in enumerate(list_attr):
data_dict_values_dict[key][attr] = data_dict_values_list[index][key]
return data_dict_values_dict
def convert_data_dict_with_ids(self, dict_to_convert):
''' Return data_dict with ids keys
'''
return self.convert_dict_with_maps(dict_to_convert,
self.data_id_map, keys='ids')
def convert_disciplines_dict_with_full_name(self):
''' Return disciplines dict with namespaced keys
'''
converted_dict = {}
for key, val in self.disciplines_id_map.items():
if key not in converted_dict:
converted_dict[key] = []
if isinstance(val, list):
for val_element in val:
if val_element in self.disciplines_dict:
converted_dict[key].append(
self.disciplines_dict[val_element])
else:
if val in self.disciplines_dict:
converted_dict[key].append(self.disciplines_dict[val])
return converted_dict
def convert_dict_with_maps(self, dict_to_convert, map_full_names_ids, keys='full_names'):
''' Convert dict keys with ids to full_names or full_names to ids
keys: 'full_names' or 'ids'
'''
converted_dict = {}
if keys == 'full_names':
for key, val in map_full_names_ids.items():
if isinstance(val, list):
for val_element in val:
if val_element in dict_to_convert:
# The last val_element overwrites the others ...
converted_dict[key] = dict_to_convert[val_element]
else:
if val in dict_to_convert:
converted_dict[key] = dict_to_convert[val]
elif keys == 'ids':
for key, val in map_full_names_ids.items():
if key in dict_to_convert:
if isinstance(val, list):
for val_element in val:
converted_dict[val_element] = dict_to_convert[key]
else:
converted_dict[val] = dict_to_convert[key]
return converted_dict
def update_with_discipline_dict(self, disc_id, disc_dict):
''' Store and update the discipline data into the DM dictionary
'''
self.logger.debug(
f'store and update the discipline data into the DM dictionary {list(disc_dict.keys())[:10]} ...')
def | |
import networkx as nx
import random
import matplotlib.pyplot as plt
import os
import ast
import re
import sys
import time
import joblib
import numpy as np
from copy import deepcopy
import pandas as pd
from Features.Feature_env import Features
memodict = {}
################################################################################
################################################################################
################################################################################
######## #############
######## Index #############
######## #############
################################################################################
################################################################################
################################################################################
'''
AAA I/O Functions
Functions for reading newick sequences
AAB ANALYSIS OF SEQUENCE FOR TREES
Two functions:
- Checking whether a CPS reduces a set of trees
- Sequence_Add_Roots (i.e. CompletePartialSequence from the paper) to extend a partial CPS to a CPS
AAC INPUT SET CLASS with CPS methods
Class containing a set of inputs
Methods for running the CP heuristic and improving the sequence
AAD PHYLOGENETIC TREE CLASS
Class for a phylogenetic tree
Contains methods to cherry-pick a tree and to find reducible pairs
Gives the height of a pair in the tree
AAE CutTree CLASS
Class meant for converting a network into a Newick string
It `cuts' the reticulation arcs to produce a tree that can be converted to Newick
AAF PHYLOGENETIC NETWORK CLASS
Class for a phylogenetic network
Contains methods
to add pairs to the network from a sequence
to compute scores of edges (inheritance and number of embedded trees)
to reduce pairs (not used if input consists of only trees)
'''
################################################################################
################################################################################
################################################################################
######## #############
######## AAA I/O Functions #############
######## #############
################################################################################
################################################################################
################################################################################
########
######## Convert Newick to a networkx Digraph with labels (and branch lengths)
########
# Write length newick: convert ":" to "," and then evaluate as list of lists using ast.literal_eval
# Then, in each list, the node is followed by the length of the incoming arc.
# This only works as long as each branch has length and all internal nodes are labeled.
def newick_to_tree(newick, current_labels=dict()):
# newick = newick[:-1]
distances = False
# presence of : indicates the use of lengths in the trees
if ":" in newick:
distances = True
# taxon names may already be enclosed by " or ', otherwise, we add these now
if "'" not in newick and '"' not in newick:
newick = re.sub(r"([,\(])([a-zA-Z\d]+)", r"\1'\2", newick)
newick = re.sub(r"([a-zA-Z\d]):", r"\1':", newick)
newick = newick.replace(":", ",")
else:
# taxon names may already be enclosed by " or ', otherwise, we add these now
if not "'" in newick and not '"' in newick:
newick = re.sub(r"([,\(])([a-zA-Z\d]+)", r"\1'\2", newick)
newick = re.sub(r"([a-zA-Z\d])([,\(\)])", r"\1'\2", newick)
# turn the string into a pyhton nested list using [ instead of (
newick = newick.replace("(", "[")
newick = newick.replace(")", "]")
nestedtree = ast.literal_eval(newick)
# parse the nested list into a list of edges with some additional information about the leaves
# we start with the root 2, so that we can append a root edge (1,2)
edges, leaves, current_labels, current_node = nested_list_to_tree(nestedtree, 2, current_labels, distances=distances)
# put all this information into a networkx DiGraph with or without distances/lengths
tree = nx.DiGraph()
if distances:
edges.append((1, 2, 0))
tree.add_weighted_edges_from(edges, weight='length')
else:
edges.append((1, 2))
tree.add_edges_from(edges)
add_node_attributes(tree, distances=distances, root=2)
return tree, leaves, current_labels, distances
# Auxiliary function to convert list of lists to tree (graph)
# Works recursively, where we keep track of the nodes we have already used
# Leaves are nodes with negative integer as ID, and already existing taxa are coupled to node IDs by current_labels.
def nested_list_to_tree(nestedList, next_node, current_labels, distances=False):
edges = []
leaves = set()
top_node = next_node
current_node = next_node + 1
if distances:
# each element in the sublist has 2 properties, the subtree, and the length, which are adjacent in nestedList
for i in range(0, len(nestedList), 2):
t = nestedList[i]
length = nestedList[i + 1]
if type(t) == list: # Not a leaf
edges.append((top_node, current_node, length))
extra_edges, extra_leaves, current_labels, current_node = nested_list_to_tree(t, current_node,
current_labels,
distances=distances)
else: # A leaf
if str(t) not in current_labels:
current_labels[str(t)] = -len(current_labels)
edges.append((top_node, current_labels[str(t)], length))
extra_edges = []
extra_leaves = {current_labels[str(t)]}
edges = edges + extra_edges
leaves = leaves.union(extra_leaves)
else:
# no lengths/distances, so each subtree is simply an element of nestedList
for t in nestedList:
if type(t) == list:
edges.append((top_node, current_node))
extra_edges, extra_leaves, current_labels, current_node = nested_list_to_tree(t, current_node,
current_labels)
else:
if str(t) not in current_labels:
current_labels[str(t)] = -len(current_labels)
edges.append((top_node, current_labels[str(t)]))
extra_edges = []
extra_leaves = {current_labels[str(t)]}
edges = edges + extra_edges
leaves = leaves.union(extra_leaves)
return edges, leaves, current_labels, current_node
# per node, add the edge based and comb height of the node as an attribute.
def add_node_attributes(tree, distances=True, root=0):
attrs = dict()
for x in tree.nodes:
if distances:
try:
attrs[x] = {"node_length": nx.algorithms.shortest_paths.generic.shortest_path_length(tree, root, x, weight="length"),
"node_comb": nx.algorithms.shortest_paths.generic.shortest_path_length(tree, root, x)}
except nx.exception.NetworkXNoPath:
attrs[x] = {"node_length": None, "node_comb": None}
else:
try:
attrs[x] = {"node_comb": nx.algorithms.shortest_paths.generic.shortest_path_length(tree, root, x)}
except nx.exception.NetworkXNoPath:
attrs[x] = {"node_comb": None}
nx.set_node_attributes(tree, attrs)
################################################################################
################################################################################
################################################################################
######## #############
######## AAB ANALYSIS OF SEQUENCE FOR TREES #############
######## #############
################################################################################
################################################################################
################################################################################
# Checks whether a given cherry-picking sequence `seq' reduces a given tree `tree'
# if not, returns false
# otherwise, returns the indices of the pairs that actually reduce a cherry in the tree
def sequence_reduces_tree(seq, tree):
t_copy = deepcopy(tree)
indices = []
for i, pair in enumerate(seq):
if t_copy.reduce_pair(*pair):
indices += [i]
if len(t_copy.nw.edges) == 1:
return indices
return False
# Modifies a cherry-picking sequence so that it represents a network with exactly one root.
# A sequence may be such that reconstructing a network from the sequence results in multiple roots
# This function adds some pairs to the sequence so that the network has a single root.
# returns the new sequence, and also modifies the sets of trees reduced by each pair in the sequence,
# so that the new pairs are also represented (they reduce no trees)
def sequence_add_roots(seq, red_trees):
leaves_encountered = set()
roots = set()
# The roots can be found by going back through the sequence and finding pairs where the second element has not been
# encountered in the sequence yet
for pair in reversed(seq):
if pair[1] not in leaves_encountered:
roots.add(pair[1])
leaves_encountered.add(pair[0])
leaves_encountered.add(pair[1])
i = 0
roots = list(roots)
# Now add some pairs to make sure each second element is already part of some pair in the sequence read backwards,
# except for the last pair in the sequence
for i in range(len(roots) - 1):
seq.append((roots[i], roots[i + 1]))
# none of the trees are reduced by the new pairs.
red_trees.append(set())
i += 1
return seq, red_trees
################################################################################
################################################################################
################################################################################
######## #############
######## AAC INPUT SET CLASS with CPS methods #############
######## #############
################################################################################
################################################################################
################################################################################
# Methods for sets of phylogenetic trees
class Input_Set:
def __init__(self, newick_strings=[], instance=0):
# The dictionary of trees
self.trees = dict()
# the set of leaf labels of the trees
self.labels = dict()
self.labels_reversed = dict()
self.leaves = set()
self.instance = instance
# the current best sequence we have found for this set of trees
self.best_seq = None
# the list of reduced trees for each of the pairs in the best sequence
self.best_red_trees = None
# the best sequence for the algorithm using lengths as input as well
self.best_seq_with_lengths = None
# the sets of reduced trees for each pair in this sequence
self.best_seq_with_lengths_red_trees = None
# the height of each pair in this sequence
self.best_seq_with_lengths_heights = None
# true if distances are used
self.distances = True
# computation times
self.CPS_Compute_Time = 0
self.CPS_Compute_Reps = 0
self.DurationPerTrial = []
self.RetPerTrial = []
# read the input trees in 'newick_strings'
for n in newick_strings:
tree = PhT()
self.trees[len(self.trees)] = tree
self.labels, distances_in_tree = tree.tree_from_newick(newick=n, current_labels=self.labels)
self.distances = self.distances and distances_in_tree
self.leaves = list(self.labels)
# make a reverse dictionary for the leaf labels, to look up the label of a given node
for l, i in self.labels.items():
self.labels_reversed[i] = l
# Make a deepcopy of an instance
def __deepcopy__(self, memodict={}):
copy_inputs = Input_Set()
copy_inputs.trees = deepcopy(self.trees, memodict)
copy_inputs.labels = deepcopy(self.labels, memodict)
copy_inputs.labels_reversed = deepcopy(self.labels_reversed, memodict)
copy_inputs.leaves = deepcopy(self.leaves, memodict)
# copy_inputs.best_seq = deepcopy(self.best_seq)
# copy_inputs.best_red_trees = deepcopy(self.best_red_trees)
return copy_inputs
# Find new cherry-picking sequences for the trees and update the best found
def CPSBound(self, repeats=1, progress=False, time_limit=None,
reduce_trivial=False, pick_lowest_cherry=False, pick_ml=False, model_name=None,
str_features=None):
# Set the specific heuristic that we use, based on the user input and whether the trees have lengths
Heuristic = self.CPHeuristic
# Initialize the recorded best sequences and corresponding data
best = None
red_trees_best = | |
to extension-specific structure
("onSubdevice", ze_bool_t), ## [out] True if the resource is located on a sub-device; false means
## that the resource is on the device of the calling Sysman handle
("subdeviceId", c_ulong), ## [out] If onSubdevice is true, this gives the ID of the sub-device
("haveFan", ze_bool_t), ## [out] True if the power supply has a fan
("ampLimit", c_int32_t) ## [out] The maximum electrical current in milliamperes that can be
## drawn. A value of -1 indicates that this property cannot be
## determined.
]
###############################################################################
## @brief Dynamic state of the power supply
class zes_psu_state_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in][optional] pointer to extension-specific structure
("voltStatus", zes_psu_voltage_status_t), ## [out] The current PSU voltage status
("fanFailed", ze_bool_t), ## [out] Indicates if the fan has failed
("temperature", c_int32_t), ## [out] Read the current heatsink temperature in degrees Celsius. A
## value of -1 indicates that this property cannot be determined.
("current", c_int32_t) ## [out] The amps being drawn in milliamperes. A value of -1 indicates
## that this property cannot be determined.
]
###############################################################################
## @brief RAS error type
class zes_ras_error_type_v(IntEnum):
CORRECTABLE = 0 ## Errors were corrected by hardware
UNCORRECTABLE = 1 ## Error were not corrected
class zes_ras_error_type_t(c_int):
def __str__(self):
return str(zes_ras_error_type_v(self.value))
###############################################################################
## @brief RAS error categories
class zes_ras_error_cat_v(IntEnum):
RESET = 0 ## The number of accelerator engine resets attempted by the driver
PROGRAMMING_ERRORS = 1 ## The number of hardware exceptions generated by the way workloads have
## programmed the hardware
DRIVER_ERRORS = 2 ## The number of low level driver communication errors have occurred
COMPUTE_ERRORS = 3 ## The number of errors that have occurred in the compute accelerator
## hardware
NON_COMPUTE_ERRORS = 4 ## The number of errors that have occurred in the fixed-function
## accelerator hardware
CACHE_ERRORS = 5 ## The number of errors that have occurred in caches (L1/L3/register
## file/shared local memory/sampler)
DISPLAY_ERRORS = 6 ## The number of errors that have occurred in the display
class zes_ras_error_cat_t(c_int):
def __str__(self):
return str(zes_ras_error_cat_v(self.value))
###############################################################################
## @brief The maximum number of categories
ZES_MAX_RAS_ERROR_CATEGORY_COUNT = 7
###############################################################################
## @brief RAS properties
class zes_ras_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("type", zes_ras_error_type_t), ## [out] The type of RAS error
("onSubdevice", ze_bool_t), ## [out] True if the resource is located on a sub-device; false means
## that the resource is on the device of the calling Sysman handle
("subdeviceId", c_ulong) ## [out] If onSubdevice is true, this gives the ID of the sub-device
]
###############################################################################
## @brief RAS error details
class zes_ras_state_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in][optional] pointer to extension-specific structure
("category", c_ulonglong * ZES_MAX_RAS_ERROR_CATEGORY_COUNT) ## [in][out] Breakdown of error by category
]
###############################################################################
## @brief RAS error configuration - thresholds used for triggering RAS events
## (::ZES_EVENT_TYPE_FLAG_RAS_CORRECTABLE_ERRORS,
## ::ZES_EVENT_TYPE_FLAG_RAS_UNCORRECTABLE_ERRORS)
##
## @details
## - The driver maintains a total counter which is updated every time a
## hardware block covered by the corresponding RAS error set notifies
## that an error has occurred. When this total count goes above the
## totalThreshold specified below, a RAS event is triggered.
## - The driver also maintains a counter for each category of RAS error
## (see ::zes_ras_state_t for a breakdown). Each time a hardware block of
## that category notifies that an error has occurred, that corresponding
## category counter is updated. When it goes above the threshold
## specified in detailedThresholds, a RAS event is triggered.
class zes_ras_config_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in][optional] pointer to extension-specific structure
("totalThreshold", c_ulonglong), ## [in,out] If the total RAS errors exceeds this threshold, the event
## will be triggered. A value of 0ULL disables triggering the event based
## on the total counter.
("detailedThresholds", zes_ras_state_t) ## [in,out] If the RAS errors for each category exceed the threshold for
## that category, the event will be triggered. A value of 0ULL will
## disable an event being triggered for that category.
]
###############################################################################
## @brief Scheduler mode
class zes_sched_mode_v(IntEnum):
TIMEOUT = 0 ## Multiple applications or contexts are submitting work to the hardware.
## When higher priority work arrives, the scheduler attempts to pause the
## current executing work within some timeout interval, then submits the
## other work.
TIMESLICE = 1 ## The scheduler attempts to fairly timeslice hardware execution time
## between multiple contexts submitting work to the hardware
## concurrently.
EXCLUSIVE = 2 ## Any application or context can run indefinitely on the hardware
## without being preempted or terminated. All pending work for other
## contexts must wait until the running context completes with no further
## submitted work.
COMPUTE_UNIT_DEBUG = 3 ## This is a special mode that must ben enabled when debugging an
## application that uses this device e.g. using the Level0 Debug API. It
## has the effect of disabling any timeouts on workload execution time
## and will change workload scheduling to ensure debug accuracy.
class zes_sched_mode_t(c_int):
def __str__(self):
return str(zes_sched_mode_v(self.value))
###############################################################################
## @brief Properties related to scheduler component
class zes_sched_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("onSubdevice", ze_bool_t), ## [out] True if this resource is located on a sub-device; false means
## that the resource is on the device of the calling Sysman handle
("subdeviceId", c_ulong), ## [out] If onSubdevice is true, this gives the ID of the sub-device
("canControl", ze_bool_t), ## [out] Software can change the scheduler component configuration
## assuming the user has permissions.
("engines", zes_engine_type_flags_t), ## [out] Bitfield of accelerator engine types that are managed by this
## scheduler component. Note that there can be more than one scheduler
## component for the same type of accelerator engine.
("supportedModes", c_ulong) ## [out] Bitfield of scheduler modes that can be configured for this
## scheduler component (bitfield of 1<<::zes_sched_mode_t).
]
###############################################################################
## @brief Disable forward progress guard timeout.
ZES_SCHED_WATCHDOG_DISABLE = (~(0ULL))
###############################################################################
## @brief Configuration for timeout scheduler mode (::ZES_SCHED_MODE_TIMEOUT)
class zes_sched_timeout_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("watchdogTimeout", c_ulonglong) ## [in,out] The maximum time in microseconds that the scheduler will wait
## for a batch of work submitted to a hardware engine to complete or to
## be preempted so as to run another context.
## If this time is exceeded, the hardware engine is reset and the context terminated.
## If set to ::ZES_SCHED_WATCHDOG_DISABLE, a running workload can run as
## long as it wants without being terminated, but preemption attempts to
## run other contexts are permitted but not enforced.
]
###############################################################################
## @brief Configuration for timeslice scheduler mode
## (::ZES_SCHED_MODE_TIMESLICE)
class zes_sched_timeslice_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("interval", c_ulonglong), ## [in,out] The average interval in microseconds that a submission for a
## context will run on a hardware engine before being preempted out to
## run a pending submission for another context.
("yieldTimeout", c_ulonglong) ## [in,out] The maximum time in microseconds that the scheduler will wait
## to preempt a workload running on an engine before deciding to reset
## the hardware engine and terminating the associated context.
]
###############################################################################
## @brief Standby hardware components
class zes_standby_type_v(IntEnum):
GLOBAL = 0 ## Control the overall standby policy of the device/sub-device
class zes_standby_type_t(c_int):
def __str__(self):
return str(zes_standby_type_v(self.value))
###############################################################################
## @brief Standby hardware component properties
class zes_standby_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("type", zes_standby_type_t), ## [out] Which standby hardware component this controls
("onSubdevice", ze_bool_t), ## [out] True if the resource is located on a sub-device; false means
## that the resource is on the device of the calling Sysman handle
("subdeviceId", c_ulong) ## [out] If onSubdevice is true, this gives the ID of the sub-device
]
###############################################################################
## @brief Standby promotion modes
class | |
<filename>src/view/views/console/worksheet/ResultGrid.py
'''
Created on 17-Feb-2017
@author: vijay
'''
import wx
import wx.dataview as dv
from wx import ListCtrl
from src.view.constants import ID_COPY_COLUMN_HEADER
import string
import wx.grid as gridlib
import logging.config
from src.view.constants import LOG_SETTINGS
from src.view.util.FileOperationsUtil import FileOperations
from _io import StringIO
import io
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('extensive')
#############################################################################
class MegaImageRenderer(gridlib.GridCellRenderer):
def __init__(self, table, blobData=None):
"""
Image Renderer Test. This just places an image in a cell
based on the row index. There are N choices and the
choice is made by choice[row%N]
"""
gridlib.GridCellRenderer.__init__(self)
self.table = table
self.blobData = blobData
self._choices = []
# self._choices = [images.Smiles.GetBitmap,
# images.Mondrian.GetBitmap,
# images.WXPdemo.GetBitmap,
# ]
self.colSize = None
self.rowSize = None
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
# choice = self.table.GetRawValue(row, col)
# if self._choices:
blobData = grid.data.get(row + 1)[col]
bmp = wx.Bitmap(2, 2)
try:
img1 = wx.Image(io.BytesIO(blobData))
# img1.SetType(wx.BITMAP_TYPE_ANY)
# img1 = wx.Image(self.blobData)
# img1 = img1.Scale(50, 50)
bmp = wx.Bitmap(img1)
except Exception as e:
print(e)
# bmp = wx.Bitmap("IMG_20180918_115610.jpg", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
# bmp = self._choices[ choice % len(self._choices)]()
image = wx.MemoryDC()
image.SelectObject(bmp)
# clear the background
dc.SetBackgroundMode(wx.SOLID)
if isSelected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.BRUSHSTYLE_SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.PENSTYLE_SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.BRUSHSTYLE_SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.PENSTYLE_SOLID))
dc.DrawRectangle(rect)
# copy the image but only to the size of the grid cell
width, height = bmp.GetWidth(), bmp.GetHeight()
if width > rect.width - 2:
width = rect.width - 2
if height > rect.height - 2:
height = rect.height - 2
dc.Blit(rect.x + 1, rect.y + 1, width, height,
image,
0, 0, wx.COPY, True)
#############################################################################
#---------------------------------------------------------------------------
class MyCellEditor(gridlib.PyGridCellEditor):
"""
This is a sample GridCellEditor that shows you how to make your own custom
grid editors. All the methods that can be overridden are shown here. The
ones that must be overridden are marked with "*Must Override*" in the
docstring.
"""
def __init__(self):
logger.info("MyCellEditor ctor\n")
gridlib.PyGridCellEditor.__init__(self)
def Create(self, parent, id, evtHandler):
"""
Called to create the control, which must derive from wx.Control.
*Must Override*
"""
logger.info("MyCellEditor: Create\n")
self._tc = wx.TextCtrl(parent, id, "")
self._tc.SetInsertionPoint(0)
self.SetControl(self._tc)
if evtHandler:
self._tc.PushEventHandler(evtHandler)
def SetSize(self, rect):
"""
Called to position/size the edit control within the cell rectangle.
If you don't fill the cell (the rect) then be sure to override
PaintBackground and do something meaningful there.
"""
logger.info("MyCellEditor: SetSize %s\n" % rect)
self._tc.SetDimensions(rect.x, rect.y, rect.width + 2, rect.height + 2,
wx.SIZE_ALLOW_MINUS_ONE)
def Show(self, show, attr):
"""
Show or hide the edit control. You can use the attr (if not None)
to set colours or fonts for the control.
"""
logger.info("MyCellEditor: Show(self, %s, %s)\n" % (show, attr))
super(MyCellEditor, self).Show(show, attr)
def PaintBackground(self, rect, attr):
"""
Draws the part of the cell not occupied by the edit control. The
base class version just fills it with background colour from the
attribute. In this class the edit control fills the whole cell so
don't do anything at all in order to reduce flicker.
"""
logger.info("MyCellEditor: PaintBackground\n")
def BeginEdit(self, row, col, grid):
"""
Fetch the value from the table and prepare the edit control
to begin editing. Set the focus to the edit control.
*Must Override*
"""
logger.info("MyCellEditor: BeginEdit (%d,%d)\n" % (row, col))
self.startValue = grid.GetTable().GetValue(row, col)
self._tc.SetValue(self.startValue)
self._tc.SetInsertionPointEnd()
self._tc.SetFocus()
# For this example, select the text
self._tc.SetSelection(0, self._tc.GetLastPosition())
def EndEdit(self, row, col, grid, oldVal):
"""
End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override*
"""
logger.info("MyCellEditor: EndEdit (%s)\n" % oldVal)
val = self._tc.GetValue()
if val != oldVal: # self.startValue:
return val
else:
return None
def ApplyEdit(self, row, col, grid):
"""
This function should save the value of the control into the
grid or grid table. It is called only after EndEdit() returns
a non-None value.
*Must Override*
"""
logger.info("MyCellEditor: ApplyEdit (%d,%d)\n" % (row, col))
val = self._tc.GetValue()
grid.GetTable().SetValue(row, col, val) # update the table
self.startValue = ''
self._tc.SetValue('')
def Reset(self):
"""
Reset the value in the control back to its starting value.
*Must Override*
"""
logger.info("MyCellEditor: Reset\n")
self._tc.SetValue(self.startValue)
self._tc.SetInsertionPointEnd()
def IsAcceptedKey(self, evt):
"""
Return True to allow the given key to start editing: the base class
version only checks that the event has no modifiers. F2 is special
and will always start the editor.
"""
logger.info("MyCellEditor: IsAcceptedKey: %d\n" % (evt.GetKeyCode()))
# # We can ask the base class to do it
# return super(MyCellEditor, self).IsAcceptedKey(evt)
# or do it ourselves
return (not (evt.ControlDown() or evt.AltDown()) and
evt.GetKeyCode() != wx.WXK_SHIFT)
def StartingKey(self, evt):
"""
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
"""
logger.info("MyCellEditor: StartingKey %d\n" % evt.GetKeyCode())
key = evt.GetKeyCode()
ch = None
if key in [ wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7,
wx.WXK_NUMPAD8, wx.WXK_NUMPAD9
]:
ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0)
elif key < 256 and key >= 0 and chr(key) in string.printable:
ch = chr(key)
if ch is not None:
# For this example, replace the text. Normally we would append it.
# self._tc.AppendText(ch)
self._tc.SetValue(ch)
self._tc.SetInsertionPointEnd()
else:
evt.Skip()
def StartingClick(self):
"""
If the editor is enabled by clicking on the cell, this method will be
called to allow the editor to simulate the click on the control if
needed.
"""
logger.info("MyCellEditor: StartingClick\n")
def Destroy(self):
"""final cleanup"""
logger.info("MyCellEditor: Destroy\n")
super(MyCellEditor, self).Destroy()
def Clone(self):
"""
Create a new object which is the copy of this one
*Must Override*
"""
logger.info("MyCellEditor: Clone\n")
return MyCellEditor()
class ResultDataGrid(gridlib.Grid):
def __init__(self, parent, model=None, data=None):
gridlib.Grid.__init__(self, parent, -1, style=wx.BORDER_NONE)
self.fileOperations = FileOperations()
self.CreateGrid(0, 0)
self.RowLabelSize = 32
self.Bind(gridlib.EVT_GRID_CELL_RIGHT_CLICK, self.showGridCellPopupMenu)
self.Bind(gridlib.EVT_GRID_LABEL_RIGHT_CLICK, self.showHeaderPopupMenu)
self.Bind(wx.EVT_KEY_DOWN, self.OnKey)
self.Bind(gridlib.EVT_GRID_CELL_CHANGED, self.cellChange)
self.data = None
self.sqlText = ''
# self.SetCellAlignment(row, col, horiz, vert)
# Somebody changed the grid so the type registry takes precedence
# over the default attribute set for editors and renderers, so we
# have to set null handlers for the type registry before the
# default editor will get used otherwise...
# self.RegisterDataType(wxGRID_VALUE_STRING, None, None)
# self.SetDefaultEditor(MyCellEditor())
# Or we could just do it like this:
# self.RegisterDataType(wx.GRID_VALUE_STRING,
# wx.GridCellStringRenderer(),
# MyCellEditor())
# )
# but for this example, we'll just set the custom editor on one cell
# self.SetCellEditor(1, 0, MyCellEditor())
# self.SetCellValue(1, 0, "Try to edit this box")
#
# # and on a column
# attr = gridlib.GridCellAttr()
# attr.SetEditor(MyCellEditor())
# self.SetColAttr(2, attr)
# self.SetCellValue(1, 2, "or any in this column")
#
# self.addData()
def setSqlText(self, sqlText):
self.sqlText = sqlText
def setData(self, data):
self.data = data
def getData(self):
return self.data
def cellChange(self, evt):
row = evt.GetRow()
col = evt.GetCol()
# #print 'Cell changed at', row, col
value = self.GetTable().GetValue(row, col)
logger.info(f'cellChange ({row,col}):{value}')
# #print 'New value', value
# #print 'Type', type(value)
evt.Skip()
return
def addData(self, data=None):
self.data = data
# logger.info(self.GetRowSizes())
# logger.info(self.GetColSizes())
self.ClearGrid()
try:
if data and len(data) > 0:
dataTypeRow = data.get(-1)
# logger.info('rows:', self.GetNumberRows())
# logger.info('cols:', self.GetNumberCols())
# self.DeleteRows()
currentRows, currentCols = (self.GetNumberRows(), self.GetNumberCols())
newRows = len(data) - 1
if dataTypeRow:
newRows = newRows - 1
newCols = len(data[0])
# self.AppendRows(numRows=len(data)-1, updateLabels=True)
# if len(data) > 0 :
# self.AppendCols(numCols=len(data[0]), updateLabels=True)
if newRows < currentRows:
# - Delete rows:
self.DeleteRows(0, currentRows - newRows, True)
if newRows > currentRows:
# - append currentRows:
self.AppendRows(newRows - currentRows)
if newCols < currentCols:
# - Delete rows:
self.DeleteCols(pos=0, numCols=currentCols - newCols, updateLabels=True)
if newCols > currentCols:
# - append currentRows:
self.AppendCols(newCols - currentCols)
for dataKey, dataValue in data.items():
# logger.info(dataKey, dataValue)
for idx, colValue in enumerate(dataValue):
# logger.info(idx, dataValue)
if dataKey == 0:
self.SetColLabelValue(idx, str(colValue))
elif dataKey > 0:
row = dataKey - 1
col = idx
try:
# if col==3:
# size=self.GetCellSize(row, col)
# data='3.jpg'
# self.SetCellRenderer(row, col, MegaImageRenderer(self.GetTable(), data))
# elif str(colValue).startswith('-______-'):
if str(colValue).startswith('-______-'):
newStringValue = str(colValue).replace('-______-', '')
self.SetCellFont(row, col, wx.Font(10, wx.FONTFAMILY_SCRIPT, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_NORMAL))
self.SetCellTextColour(row, col, wx.LIGHT_GREY)
self.SetCellValue(row, col, newStringValue)
else:
if dataTypeRow and dataTypeRow[col].lower() == 'blob':
# data='3.jpg'
if str(colValue).startswith('-______-'):
newStringValue = str(colValue).replace('-______-', '')
self.SetCellFont(row, col, wx.Font(10, wx.FONTFAMILY_SCRIPT, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_NORMAL))
self.SetCellTextColour(row, col, wx.LIGHT_GREY)
self.SetCellValue(row, col, newStringValue)
else:
self.SetCellRenderer(row, col, MegaImageRenderer(self.GetTable(), colValue))
elif dataTypeRow and | |
<reponame>JulyKikuAkita/PythonPrac<gh_stars>1-10
__source__ = 'https://github.com/kamyu104/LeetCode/blob/master/Python/arithmetic-slices-ii-subsequence.py'
# https://leetcode.com/problems/arithmetic-slices-ii-subsequence/
# Time: O(n^2)
# Space: O(n * d)
#
# # Description: Leetcode # 413. Arithmetic Slices
#
# A sequence of numbers is called arithmetic if it consists of at least three elements
# and if the difference between any two consecutive elements is the same.
#
# For example, these are arithmetic sequences:
#
# 1, 3, 5, 7, 9
# 7, 7, 7, 7
# 3, -1, -5, -9
# The following sequence is not arithmetic.
#
# 1, 1, 2, 5, 7
#
# A zero-indexed array A consisting of N numbers is given.
# A subsequence slice of that array is any sequence of integers (P0, P1, ..., Pk)
# such that 0 <= P0 < P1 < ... < Pk < N.
#
# A subsequence slice (P0, P1, ..., Pk) of array A is called arithmetic
# if the sequence A[P0], A[P1], ..., A[Pk-1], A[Pk] is arithmetic. In particular, this means that k >= 2.
#
# The function should return the number of arithmetic subsequence slices in the array A.
#
# The input contains N integers. Every integer is in the range of -2^31 and 2^31-1 and 0 <= N <= 1000.
# The output is guaranteed to be less than 2^31-1.
#
#
# Example:
#
# Input: [2, 4, 6, 8, 10]
#
# Output: 7
#
# Explanation:
# All arithmetic subsequence slices are:
# [2,4,6]
# [4,6,8]
# [6,8,10]
# [2,4,6,8]
# [4,6,8,10]
# [2,4,6,8,10]
# [2,6,10]
# Baidu
# Hide Tags Dynamic Programming
# Hide Similar Problems (M) Arithmetic Slices
#
import unittest
import collections
class Solution(object):
def numberOfArithmeticSlices(self, A):
"""
:type A: List[int]
:rtype: int
"""
result = 0
dp = [collections.defaultdict(int) for i in xrange(len(A))]
for i in xrange(1, len(A)):
for j in xrange(i):
diff = A[i]-A[j]
dp[i][diff] += 1
if diff in dp[j]:
dp[i][diff] += dp[j][diff]
result += dp[j][diff]
return result
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/arithmetic-slices-ii-subsequence/solution/
https://discuss.leetcode.com/topic/67413/detailed-explanation-for-java-o-n-2-solution
At first glance of the problem description,
I had a strong feeling that the solution to the original problem can be built through its subproblems,
i.e., the total number of arithmetic subsequence slices of the whole input array can be constructed from
those of the subarrays of the input array.
While I was on the right track to the final solution,
it's not so easy to figure out the relations between the original problem and its subproblems.
To begin with, let's be ambitious and reformulate our problem as follows:
let T(i) denote the total number of arithmetic subsequence slices that can be formed within subarray A[0, i],
where A is the input array and 0 <= i < n with n = A.length.
Then our original problem will be T(n - 1), and the base case is T(0) = 0.
To make the above idea work, we need to relate T(i) to all T(j) with 0 <= j < i.
Let's take some specific j as an example. If we want to incorporate element A[i] into the subarray A[0, j],
what information do we need?
As far as I can see, we need to know at least the total number of arithmetic subsequence slices ending
at each index k with difference d where 0 <= k <= j and d = A[i] - A[k],
(i.e., for each such slice, its last element is A[k] and the difference between every two consecutive elements is d),
so that adding A[i] to the end of each such slice will make a new arithmetic subsequence slice.
However, our original formulation of T(i) says nothing about the the total number of arithmetic subsequence
slices ending at some particular index and with some particular difference.
This renders it impossible to relate T(i) to all T(j). As a rule of thumb,
when there is difficulty relating original problem to its subproblems,
it usually indicates something goes wrong with your formulation for the original problem.
From our analyses above, each intermediate solution should at least contain information about
the total number of arithmetic subsequence slices ending at some particular index with some particular difference.
So let's go along this line and reformulate our problem as T(i, d),
which denotes the total number of arithmetic subsequence slices ending at index i with difference d.
The base case and recurrence relation are as follows:
Base case: T(0, d) = 0 (This is true for any d).
Recurrence relation: T(i, d) = summation of (1 + T(j, d)) as long as 0 <= j < i && d == A[i] - A[j].
For the recurrence relation, it's straightforward to understand the T(j, d) part:
for each slice ending at index j with difference d == A[i] - A[j],
adding A[i] to the end of the slice will make a new arithmetic subsequence slice,
therefore the total number of such new slices will be the same as T(j, d).
What you are probably wondering is: where does the 1 come from?
The point here is that to make our recurrence relation work properly,
the meaning of arithmetic subsequence slice has to be extended to include slices with only two elements
(of course we will make sure these "phony" slices won't contribute to our final count).
This is because for each slice, we are adding A[i] to its end to form a new one.
If the original slice is of length two, after adding we will have a valid arithmetic subsequence slice
with three elements. Our T(i, d) will include all these "generalized" slices.
And for each pair of elements (A[j], A[i]), they will form one such "generalized" slice (with only two elements)
and thus contribute to one count of T(i, d).
Before jumping to the solution below, I'd like to point out that there are actually overlapping among our subproblems
(for example, both T(i, d) and T(i + 1, d) require knowledge of T(j, d) with 0 <= j < i).
This necessitates memorization of the intermediate results. Each intermediate result is characterized by two integers:
i and d. The former is bounded (i.e., 0 <= i < n) since they are the indices of the element in the input array
while the latter is not as d is the difference of two elements in the input array and can be any value.
For bounded integers, we can use them to index arrays (or lists) while for unbounded ones, use of HashMap
would be more appropriate. So we end up with an array of the same length as the input and whose element type is HashMap.
Here is the Java program (with a quick explanation given at the end). Both time and space complexity are O(n^2).
Some minor points for improving time and space performance are:
Define the type of the difference as Integer type instead of Long.
This is because there is no valid arithmetic subsequence slice that can have difference out of the Integer value range.
But we do need a long integer to filter out those invalid cases.
Preallocate the HashMap to avoid reallocation to deal with extreme cases.
Refrain from using lambda expressions inside loops.
Quick explanation:
res is the final count of all valid arithmetic subsequence slices; map will store the intermediate results T(i, d),
with i indexed into the array and d as the key of the corresponding HashMap.
For each index i, we find the total number of "generalized" arithmetic subsequence slices ending at it
with all possible differences. This is done by attaching A[i] to all slices of T(j, d) with j less than i.
Within the inner loop, we first use a long variable diff to filter out invalid cases,
then get the counts of all valid slices (with element >= 3) as c2 and add it to the final count.
At last we update the count of all "generalized" slices for T(i, d) by adding the three parts together:
the original value of T(i, d), which is c1 here, the counts from T(j, d), which is c2 and lastly the 1 count of the
"two-element" slice (A[j], A[i]).
# 154ms 85.89%
class Solution {
public int numberOfArithmeticSlices(int[] A) {
int re = 0;
HashMap<Integer, Integer>[] maps = new HashMap[A.length];
for(int i=0; i<A.length; i++) {
maps[i] = new HashMap<>();
int num = A[i];
for(int j=0; j<i; j++) {
if((long)num-A[j]>Integer.MAX_VALUE) continue;
if((long)num-A[j]<Integer.MIN_VALUE) continue;
int diff | |
import json
import logging.config
import sys
from json import JSONEncoder
from stn.utils.uuid import generate_uuid
import networkx as nx
from networkx.readwrite import json_graph
from stn.node import Node
from uuid import UUID
import copy
import math
from stn.task import Timepoint
MAX_FLOAT = sys.float_info.max
class MyEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, UUID):
return obj.hex
return obj.__dict__
class STN(nx.DiGraph):
""" Represents a Simple Temporal Network (STN) as a networkx directed graph
"""
logger = logging.getLogger('stn.stn')
def __init__(self):
super().__init__()
self.add_zero_timepoint()
self.max_makespan = MAX_FLOAT
self.risk_metric = None
def __str__(self):
to_print = ""
for (i, j, data) in self.edges.data():
if self.has_edge(j, i) and i < j:
# Constraints with the zero timepoint
if i == 0:
timepoint = self.nodes[j]['data']
lower_bound = -self[j][i]['weight']
upper_bound = self[i][j]['weight']
to_print += "Timepoint {}: [{}, {}]".format(timepoint, lower_bound, upper_bound)
if timepoint.is_executed:
to_print += " Ex"
# Constraints between the other timepoints
else:
to_print += "Constraint {} => {}: [{}, {}]".format(i, j, -self[j][i]['weight'], self[i][j]['weight'])
if self[i][j]['is_executed']:
to_print += " Ex"
to_print += "\n"
return to_print
def __eq__(self, other):
if other is None:
return False
if len(other.nodes()) != len(self.nodes()):
return False
for (i, j, data) in self.edges.data():
if other.has_edge(i, j):
if other[i][j]['weight'] != self[i][j]['weight']:
return False
else:
return False
if other.has_node(i):
if other.nodes[i]['data'] != self.nodes[i]['data'] :
return False
else:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def add_zero_timepoint(self):
node = Node(generate_uuid(), 'zero_timepoint')
self.add_node(0, data=node)
def get_earliest_time(self):
edges = [e for e in self.edges]
first_edge = edges[0]
return -self[first_edge[1]][0]['weight']
def get_latest_time(self):
edges = [e for e in self.edges]
last_edge = edges[-1]
return self[0][last_edge[0]]['weight']
def is_empty(self):
return nx.is_empty(self)
def add_constraint(self, i, j, wji=0.0, wij=float('inf')):
"""
Adds constraint between nodes i and j
i: starting node
j: ending node
The constraint
i --- [-wji, wij] ---> j
Maps to two edges in a distance graph
i --- wij ---> j
i <--- -wji --- j
-wji is the lower bound (minimum allocated time between i and j)
wij is the upper bound (maximum allocated time between i and j)
The default upper and lower bounds are 0 and infinity
"""
# Minimum allocated time between i and j
min_time = -wji
# Maximum allocated time between i and j
max_time = wij
self.add_edge(j, i, weight=min_time, is_executed=False)
self.add_edge(i, j, weight=max_time, is_executed=False)
def remove_constraint(self, i, j):
""" i : starting node id
j : ending node id
"""
self.remove_edge(i, j)
self.remove_edge(j, i)
def get_constraints(self):
"""
Two edges correspond to a constraint.
returns dict with constraints
{(starting_node, ending_node): self[i][j] }
"""
constraints = dict()
for (i, j) in self.edges():
if i < j:
constraints[(i, j)] = self[i][j]
return constraints
def add_timepoint(self, id, task, node_type, **kwargs):
""" A timepoint is represented by a node in the STN
The node can be of node_type:
- zero_timepoint: references the schedule to the origin
- start : time at which the robot starts navigating towards the pickup location
- pickup : time at which the robot arrives starts the pickup action
- delivery : time at which the robot finishes the delivery action
"""
node = Node(task.task_id, node_type, **kwargs)
self.add_node(id, data=node)
def add_task(self, task, position=1):
""" A task is added as 3 timepoints and 5 constraints in the STN"
Timepoints:
- start
- pickup time
- delivery time
Constraints:
- earliest and latest start times
- travel time: time to go from current position to pickup position)
- earliest and latest pickup times
- work time: time to perform the task (time to transport an object from the pickup to the delivery location)
- earliest and latest finish times
If the task is not the first in the STN, add wait time constraint
Note: Position 0 is reserved for the zero_timepoint
Add tasks from postion 1 onwards
"""
self.logger.info("Adding task %s in position %s", task.task_id, position)
start_node_id = 2 * position + (position-2)
pickup_node_id = start_node_id + 1
delivery_node_id = pickup_node_id + 1
# Remove constraint linking start_node_id and previous node (if any)
if self.has_edge(start_node_id-1, start_node_id) and start_node_id-1 != 0:
self.logger.debug("Deleting constraint: %s => %s", start_node_id-1, start_node_id)
self.remove_constraint(start_node_id-1, start_node_id)
# Displace by 3 all nodes and constraints after position
mapping = {}
for node_id, data in self.nodes(data=True):
if node_id >= start_node_id:
mapping[node_id] = node_id + 3
self.logger.debug("mapping: %s ", mapping)
nx.relabel_nodes(self, mapping, copy=False)
# Add new timepoints
self.add_timepoint(start_node_id, task, "start")
self.add_timepoint_constraint(start_node_id, task.get_timepoint("start"))
self.add_timepoint(pickup_node_id, task, "pickup", action_id=task.pickup_action_id)
self.add_timepoint_constraint(pickup_node_id, task.get_timepoint("pickup"))
self.add_timepoint(delivery_node_id, task, "delivery", action_id=task.delivery_action_id)
self.add_timepoint_constraint(delivery_node_id, task.get_timepoint("delivery"))
# Add constraints between new nodes
new_constraints_between = [start_node_id, pickup_node_id, delivery_node_id]
# Check if there is a node after the new delivery node
if self.has_node(delivery_node_id+1):
new_constraints_between.append(delivery_node_id+1)
# Check if there is a node before the new start node
if self.has_node(start_node_id-1):
new_constraints_between.insert(0, start_node_id-1)
self.logger.debug("New constraints between nodes: %s", new_constraints_between)
constraints = [((i), (i + 1)) for i in new_constraints_between[:-1]]
self.logger.debug("Constraints: %s", constraints)
self.add_intertimepoints_constraints(constraints, task)
def add_intertimepoints_constraints(self, constraints, task):
""" Adds constraints between the timepoints of a task
Constraints between:
- start and pickup
- pickup and delivery
- delivery and start next task (if any)
Args:
constraints (list) : list of tuples that defines the pair of nodes between which a new constraint should be added
Example:
constraints = [(1, 2), (2, 3)]
New constraints will be added between nodes 1 and 2 and between 2 and 3
task (Task): task represented by the constraints
"""
for (i, j) in constraints:
self.logger.debug("Adding constraint: %s ", (i, j))
if self.nodes[i]['data'].node_type == "start":
travel_time = self.get_travel_time(task)
self.add_constraint(i, j, travel_time, travel_time)
elif self.nodes[i]['data'].node_type == "pickup":
work_time = self.get_work_time(task)
self.add_constraint(i, j, work_time, work_time)
elif self.nodes[i]['data'].node_type == "delivery":
# wait time between finish of one task and start of the next one. Fixed to [0, inf]
self.add_constraint(i, j)
@staticmethod
def get_travel_time(task):
""" Returns the mean of the travel time (time for going from current pose to pickup pose)
"""
travel_time = task.get_edge("travel_time")
return travel_time.mean
@staticmethod
def get_work_time(task):
""" Returns the mean of the work time (time to transport an object from the pickup to the delivery location)
"""
work_time = task.get_edge("work_time")
return work_time.mean
@staticmethod
def create_timepoint_constraints(r_earliest_pickup, r_latest_pickup, travel_time, work_time):
start_constraint = Timepoint(name="start",
r_earliest_time=r_earliest_pickup - travel_time.mean,
r_latest_time=r_latest_pickup - travel_time.mean)
pickup_constraint = Timepoint(name="pickup",
r_earliest_time=r_earliest_pickup,
r_latest_time=r_latest_pickup)
delivery_constraint = Timepoint(name="delivery",
r_earliest_time=r_earliest_pickup + work_time.mean,
r_latest_time=r_latest_pickup + work_time.mean)
return [start_constraint, pickup_constraint, delivery_constraint]
def show_n_nodes_edges(self):
""" Prints the number of nodes and edges in the stn
"""
self.logger.info("Nodes: %s ", self.number_of_nodes())
self.logger.info("Edges: %s ", self.number_of_edges())
def update_task(self, task):
position = self.get_task_position(task.task_id)
start_node_id = 2 * position + (position-2)
pickup_node_id = start_node_id + 1
delivery_node_id = pickup_node_id + 1
# Adding an existing timepoint constraint updates the constraint
self.add_timepoint_constraint(start_node_id, task.get_timepoint("start"))
self.add_timepoint_constraint(pickup_node_id, task.get_timepoint("pickup"))
self.add_timepoint_constraint(delivery_node_id, task.get_timepoint("delivery"))
# Add constraints between new nodes
new_constraints_between = [start_node_id, pickup_node_id, delivery_node_id]
# Check if there is a node after the new delivery node
if self.has_node(delivery_node_id+1):
new_constraints_between.append(delivery_node_id+1)
# Check if there is a node before the new start node
if self.has_node(start_node_id-1):
new_constraints_between.insert(0, start_node_id-1)
constraints = [((i), (i + 1)) for i in new_constraints_between[:-1]]
self.add_intertimepoints_constraints(constraints, task)
def remove_task(self, position=1):
""" Removes the task from the given position"""
self.logger.info("Removing task at position: %s", position)
start_node_id = 2 * position + (position-2)
pickup_node_id = start_node_id + 1
delivery_node_id = pickup_node_id + 1
new_constraints_between = list()
if self.has_node(start_node_id-1) and self.has_node(delivery_node_id+1):
new_constraints_between = [start_node_id-1, start_node_id]
# Remove node and all adjacent edges
self.remove_node(start_node_id)
self.remove_node(pickup_node_id)
self.remove_node(delivery_node_id)
# Displace by -3 all nodes and constraints after position
mapping = {}
for node_id, data in self.nodes(data=True):
if node_id >= start_node_id:
mapping[node_id] = node_id - 3
self.logger.debug("mapping: %s", mapping)
nx.relabel_nodes(self, mapping, copy=False)
if new_constraints_between:
constraints = [((i), (i + 1)) for i in new_constraints_between[:-1]]
self.logger.debug("Constraints: %s", constraints)
for (i, j) in constraints:
if self.nodes[i]['data'].node_type == "delivery":
# wait time between finish of one task and start of the next one
self.add_constraint(i, j)
def remove_node_ids(self, node_ids):
# Assumes that the node_ids are in consecutive order from node_id 1 onwards
for node_id in node_ids:
self.remove_node(node_id)
# Displace all remaining nodes by 3
mapping = {}
for node_id, data in self.nodes(data=True):
if node_id > 0:
mapping[node_id] = node_id - 3
nx.relabel_nodes(self, mapping, copy=False)
def get_tasks(self):
| |
<filename>website/views.py
import functools
import json
import string
from random import choice
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core import serializers
from django.core.cache import cache
from django.http import HttpResponse
from django.shortcuts import redirect, render, get_object_or_404
from django.template.context_processors import csrf
from django.template.defaultfilters import register
from django.utils.datetime_safe import datetime
from django.utils.timezone import now
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from pytz import timezone
from .models import (
DBConf, ExperimentConf, NewResultForm, Project, Result,
PLOTTABLE_FIELDS, METRIC_META
)
# For the html template to access dict object
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
def signup_view(request):
csrf_dict = {}
csrf_dict.update(csrf(request))
return render(request, 'signup.html', csrf_dict)
def login_view(request):
csrf_dict = {}
csrf_dict.update(csrf(request))
return render(request, 'login.html', csrf_dict)
def auth_and_login(request, onsuccess='/', onfail='/login/'):
user = authenticate(
username=request.POST['email'],
password=request.POST['password']
)
if user is not None:
login(request, user)
return redirect(onsuccess)
return redirect(onfail)
def create_user(username, email, password):
user = User(username=username, email=email)
user.set_password(password)
user.save()
return user
def user_exists(username):
user_count = User.objects.filter(username=username).count()
return user_count != 0
def sign_up_in(request):
post = request.POST
if not user_exists(post['email']):
create_user(
username=post['email'],
email=post['email'],
password=post['password']
)
return auth_and_login(request)
return redirect("/login/")
@login_required(login_url='/login/')
def logout_view(request):
logout(request)
return redirect("/login/")
def upload_code_generator(size=6, chars=string.ascii_uppercase+string.digits):
return ''.join(choice(chars) for x in range(size))
@login_required(login_url='/login/')
def home(request):
context = {"projects": Project.objects.filter(user=request.user)}
context.update(csrf(request))
return render(request, 'home.html', context)
@login_required(login_url='/login/')
def project(request):
data = request.GET
proj = Project.objects.get(pk=data['id'])
if proj.user != request.user:
return render(request, '404.html')
results = Result.objects.filter(project=proj)
databases = set()
benchmarks = {}
for res in results:
databases.add(res.db_conf.db_type)
btype = res.benchmark_conf.benchmark_type
benchmarks[btype] = benchmarks.get(btype, set())
benchmarks[btype].add(res.benchmark_conf)
filters = [{'values': list(set(getattr(res.benchmark_conf, field['field'])
for res in results)),
'print': field['print'],
'field': field['field']}
for field in ExperimentConf.FILTER_FIELDS
]
context = {'project': proj,
'db_types': databases,
'benchmarks': benchmarks,
'lastrevisions': [10, 50, 200, 1000],
'defaultlast': 10,
'defaultequid': False,
'defaultbenchmark': 'grid',
'metrics': PLOTTABLE_FIELDS,
'metric_meta': METRIC_META,
'defaultmetrics': ['throughput', 'p99_latency'],
'filters': filters,
'results': results}
context.update(csrf(request))
return render(request, 'project.html', context)
@login_required(login_url='/login/')
def edit_project(request):
context = {}
try:
if request.GET['id'] != '':
proj = Project.objects.get(pk=request.GET['id'])
if proj.user != request.user:
return render(request, '404.html')
context['project'] = proj
except Project.DoesNotExist:
pass
return render(request, 'edit_project.html', context)
@login_required(login_url='/login/')
def delete_project(request):
for primary_key in request.POST.getlist('projects', []):
proj = Project.objects.get(pk=primary_key)
if proj.user == request.user:
proj.delete()
return redirect('/')
@login_required(login_url='/login/')
def update_project(request):
if 'id_new_code' in request.POST:
proj_id = request.POST['id_new_code']
else:
proj_id = request.POST['id']
if proj_id == '':
proj = Project()
proj.creation_time = now()
proj.user = request.user
proj.upload_code = upload_code_generator(size=20)
else:
proj = Project.objects.get(pk=proj_id)
if proj.user != request.user:
return render(request, '404.html')
if 'id_new_code' in request.POST:
proj.upload_code = upload_code_generator(size=20)
proj.name = request.POST['name']
proj.description = request.POST['description']
proj.last_update = now()
proj.save()
return redirect('/project/?id=' + str(proj.pk))
@csrf_exempt
def new_result(request):
if request.method == 'POST':
form = NewResultForm(request.POST, request.FILES)
if not form.is_valid():
return HttpResponse(str(form))
try:
proj = Project.objects.get(
upload_code=form.cleaned_data['upload_code']
)
except Project.DoesNotExist:
return HttpResponse("wrong upload_code!")
upload_hash = form.cleaned_data['upload_hash']
result_ok = form.cleaned_data['result_ok'].lower() == 'true'
return handle_result_file(proj, request.FILES, upload_hash, result_ok)
return HttpResponse("POST please\n")
def handle_result_file(proj, files, upload_hash, result_ok):
summary_lines = json.loads(files['summary_data'].read().decode('utf-8'))
db_type = summary_lines['DBMS Type'].strip().upper()
bench_type = summary_lines['Benchmark Type'].strip().upper()
if db_type not in DBConf.DB_TYPES:
return HttpResponse(db_type + " db_type Wrong")
if bench_type not in ExperimentConf.BENCHMARK_TYPES:
return HttpResponse(bench_type + " bench_type Wrong")
def get_save_db_conf(db_type):
db_conf = None
try:
db_confs = DBConf.objects.filter(db_type=db_type)
if len(db_confs) < 1:
raise DBConf.DoesNotExist
db_conf = db_confs[0]
except DBConf.DoesNotExist:
db_conf = DBConf()
db_conf.db_type = db_type
db_conf.save()
return db_conf
def get_save_bench_conf(proj, benchmark_conf_data, bench_type):
b_chunks = [str(x).strip() for x in benchmark_conf_data.chunks()]
bench_conf_str = ''.join(''.join(b_chunks).split('\n'))
bench_conf = None
try:
bench_confs = ExperimentConf.objects.filter(
configuration=bench_conf_str
)
if len(bench_confs) < 1:
raise ExperimentConf.DoesNotExist
bench_conf = bench_confs[0]
except ExperimentConf.DoesNotExist:
bench_conf = ExperimentConf()
bench_conf.project = proj
bench_conf.configuration = bench_conf_str
bench_conf.benchmark_type = bench_type
bench_conf.creation_time = now()
for key, val in summary_lines.items():
unwanted = [
'Benchmark Type',
'Current Timestamp (milliseconds)',
'DBMS Type',
'DBMS Version',
'Latency Distribution',
'Throughput (requests/second)',
]
if key not in unwanted:
setattr(bench_conf, key, val)
bench_conf.name = ''.join([
bench_type,
'@',
bench_conf.creation_time.strftime("%Y-%m-%d,%H"),
'#',
str(bench_conf.pk)
])
bench_conf.save()
return bench_conf
def save_result(proj, db_conf, bench_conf, summary_lines):
res = Result()
res.db_conf = db_conf
res.benchmark_conf = bench_conf
res.project = proj
res.timestamp = datetime.fromtimestamp(
summary_lines['Current Timestamp (milliseconds)'] // 1000,
timezone("UTC")
)
latency_dict = summary_lines['Latency Distribution']
res.avg_latency = \
float(latency_dict['Average Latency (microseconds)'])
res.min_latency = \
float(latency_dict['Minimum Latency (microseconds)'])
res.p25_latency = \
float(latency_dict['25th Percentile Latency (microseconds)'])
res.p50_latency = \
float(latency_dict['Median Latency (microseconds)'])
res.p75_latency = \
float(latency_dict['75th Percentile Latency (microseconds)'])
res.p90_latency = \
float(latency_dict['90th Percentile Latency (microseconds)'])
res.p95_latency = \
float(latency_dict['95th Percentile Latency (microseconds)'])
res.p99_latency = \
float(latency_dict['99th Percentile Latency (microseconds)'])
res.max_latency = \
float(latency_dict['Maximum Latency (microseconds)'])
res.throughput = \
float(summary_lines['Throughput (requests/second)'])
res.git_hash = upload_hash
res.result_ok = result_ok
res.save()
db_conf = get_save_db_conf(db_type)
bench_conf = get_save_bench_conf(proj,
files['benchmark_conf_data'],
bench_type)
save_result(proj, db_conf, bench_conf, summary_lines)
proj.last_update = now()
proj.save()
return HttpResponse('Success')
@login_required(login_url='/login/')
def benchmark_configuration(request):
benchmark_conf = get_object_or_404(ExperimentConf, pk=request.GET['id'])
if benchmark_conf.project.user != request.user:
return render(request, '404.html')
results = Result.objects.filter(benchmark_conf=benchmark_conf)
dbs = list(set(res.db_conf.db_type for res in results))
context = {'benchmark': benchmark_conf,
'dbs': dbs,
'metrics': PLOTTABLE_FIELDS,
'metric_meta': METRIC_META,
'default_dbconf': dbs,
'default_metrics': ['throughput', 'p99_latency']}
return render(request, 'benchmark_conf.html', context)
# Data Format
# error
# metrics as a list of selected metrics
# results
# data for each selected metric
# meta data for the metric
# Result list for the metric in a folded list
@login_required(login_url='/login/')
def get_benchmark_data(request):
data = request.GET
benchmark_conf = get_object_or_404(ExperimentConf, pk=data['id'])
if benchmark_conf.project.user != request.user:
return render(request, '404.html')
def _throughput_diff(fst, snd):
return int(snd.throughput - fst.throughput)
results = Result.objects.filter(benchmark_conf=benchmark_conf)
results = sorted(results, key=functools.cmp_to_key(_throughput_diff))
data_package = {
'results': [],
'error': 'None',
'metrics': data.get('met', 'throughput,p99_latency').split(',')
}
for met in data_package['metrics']:
data_package['results']. \
append({'data': [[]], 'tick': [],
'unit': METRIC_META[met]['unit'],
'lessisbetter': '(less is better)'
if METRIC_META[met]['lessisbetter']
else '(more is better)',
'metric': METRIC_META[met]['print']})
added = {}
db_confs = data['db'].split(',')
i = len(db_confs)
for res in results:
if res.db_conf.pk in added or str(res.db_conf.pk) not in db_confs:
continue
added[res.db_conf.pk] = True
data_package['results'][-1]['data'][0].append([
i,
res.met * METRIC_META[met]['scale'],
res.pk,
res.met * METRIC_META[met]['scale']
])
data_package['results'][-1]['tick'].append(res.db_conf.name)
i -= 1
data_package['results'][-1]['data'].reverse()
data_package['results'][-1]['tick'].reverse()
return HttpResponse(
json.dumps(data_package),
content_type='application/json')
@login_required(login_url='/login/')
def get_benchmark_conf_file(request):
benchmark_conf = get_object_or_404(ExperimentConf, pk=request.GET['id'])
if benchmark_conf.project.user != request.user:
return render(request, '404.html')
return HttpResponse(
benchmark_conf.configuration,
content_type='text/plain')
@login_required(login_url='/login/')
def edit_benchmark_conf(request):
context = {}
if request.GET['id'] != '':
ben_conf = get_object_or_404(ExperimentConf, pk=request.GET['id'])
if ben_conf.project.user != request.user:
return render(request, '404.html')
context['benchmark'] = ben_conf
return render(request, 'edit_benchmark.html', context)
@login_required(login_url='/login/')
def update_benchmark_conf(request):
ben_conf = ExperimentConf.objects.get(pk=request.POST['id'])
ben_conf.name = request.POST['name']
ben_conf.description = request.POST['description']
ben_conf.save()
return redirect('/benchmark_conf/?id=' + str(ben_conf.pk))
def result(request):
target = get_object_or_404(Result, pk=request.GET['id'])
data_package = {}
results = Result.objects.filter(project=target.project,
benchmark_conf=target.benchmark_conf)
results = [r for r in results
if r.db_conf.db_type == target.db_conf.db_type]
sames = [r for r in results
if r.benchmark_conf == target.benchmark_conf and r != target]
for metric in PLOTTABLE_FIELDS:
data_package[metric] = {
'data': {},
'units': METRIC_META[metric]['unit'],
'lessisbetter': '(less is better)'
if METRIC_META[metric]['lessisbetter']
else '(more is better)',
'metric': METRIC_META[metric]['print']
}
same_id = []
same_id.append(str(target.pk))
context = {
'result': target,
'metrics': PLOTTABLE_FIELDS,
'metric_meta': METRIC_META,
'default_metrics': ['throughput', 'p99_latency'],
'data': json.dumps(data_package),
'same_runs': sames
}
return render(request, 'result.html', context)
# Data Format:
# error
# results
# all result data after the filters for the table
# timelines
# data for each benchmark & metric pair
# meta data for the pair
# data as a map<DBMS name, result list>
@login_required(login_url='/login/')
def get_timeline_data(request):
data_package = {'error': 'None', 'timelines': []}
proj = get_object_or_404(Project, pk=request.GET['proj'])
if proj.user != request.user:
return HttpResponse(
json.dumps(data_package),
content_type='application/json')
revs = int(request.GET['revs'])
# Get all results related to the selected DBMS, sort by time
results = Result.objects.filter(project=request.GET['proj'])
def _valid_db(x):
return x.db_conf.db_type in request.GET['db'].split(',')
def cmptime(x, y):
return int((x.timestamp - y.timestamp).total_seconds())
results = [r for r in results if _valid_db(r)]
results = sorted(results, key=functools.cmp_to_key(cmptime))
# Determine which benchmark is selected
benchmarks = []
if request.GET['ben'] == 'grid':
benchmarks = ExperimentConf.BENCHMARK_TYPES
revs = 10
def _in_benchmarks(x):
return x.benchmark_conf.benchmark_type in benchmarks
results = [r for r in results if _in_benchmarks(r)]
table_results = []
elif request.GET['ben'] == 'show_none':
benchmarks = []
table_results = []
else:
benchmarks = [request.GET['ben']]
benchmark_confs = [x for x in request.GET['spe'].strip().split(',')
if x != '']
def _in_confs(x):
return str(x.benchmark_conf.pk) in benchmark_confs
results = [r for r in results if _in_confs(r)]
for f in [r for r in request.GET.getlist('add[]', []) if r != '']:
_, value = f.split(':')
if value == 'select_all':
continue
results = [r for r in results if r.benchmark_conf.key == value]
table_results = results
if len(benchmarks) == 1:
metrics = request.GET.get('met', 'throughput,p99_latency').split(',')
else:
metrics = ['throughput']
# For the data table
data_package['results'] = [
[
x.pk,
x.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
x.db_conf.db_type,
x.benchmark_conf.name,
x.throughput * METRIC_META['throughput']['scale'],
x.p99_latency * METRIC_META['p99_latency']['scale'],
x.db_conf.pk,
x.benchmark_conf.pk
]
for x in table_results
]
# For plotting charts
for metric in metrics:
for bench in benchmarks:
b_r = [r for r in results
if r.benchmark_conf.benchmark_type == bench]
| |
<gh_stars>1-10
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file networktools.py
# @author <NAME>
# @date
import subprocess
import os
import sys
import string
import numpy as np
from numpy import random
from collections import OrderedDict
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
from agilepy.lib_base.processes import Process, CmlMixin
import netconvert
import routing
#from agilepy.lib_base.geometry import get_length_polypoints,get_dist_point_to_segs, get_diff_angle_clockwise
from agilepy.lib_base.geometry import *
class TlsGenerator(netconvert.NetConvertMixin):
def __init__(self, net, logger=None, **kwargs):
print 'TlsGenerate.__init__'
self._init_common('tlsgenerator',
parent=net,
name='Trafficlight system generator',
logger=logger,
info='Detects, sets and configures major traffic light systems.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
# self.init_options_tls()
self.dist_tls_max = 60.0
self.dist_tls_internode_max = 30.0
self.length_min_notls = 6.0
self.ptype = 1 # "static": 1,"actuated": 2,
self.offset = 0
self.cycletime = 180
self.yellowtime = 5
self.phasetime_min = 6
self.phasetime_max = 50
self.id_mode_bike = net.modes.get_id_mode("bicycle")
self.id_mode_ped = net.modes.get_id_mode("pedestrian")
self.id_mode_car = net.modes.get_id_mode("passenger")
self.id_mode_bus = net.modes.get_id_mode("bus")
self.ids_cycleped = [self.id_mode_bike, self.id_mode_ped]
#self.id_mode_ped = net.modes.get_id_mode("delivery")
def is_major_road(self, priority, n_lanes, speed_max):
return ((priority >= 7) | (n_lanes >= 3)) & (n_lanes >= 2) & (speed_max >= 30/3.6)
def get_routecost(self, route):
"""
Centralized function to determine the importance of a route.
"""
return np.sum(self.parent.edges.lengths[route])
def do(self):
print 'TlsGenerator.do'
net = self.parent
edges = net.edges
nodes = net.nodes
lanes = net.lanes
connections = net.connections
tlss = net.tlss
tlss.clear_tlss()
# construct major routes
ids_edges_major = set()
self.ids_edges_major = ids_edges_major
ids_edge = edges.get_ids()
majorroutes = []
for id_edge, id_sumo, type_spread, shape, \
n_lanes, ids_lane, priority, id_fromnode, id_tonode, speed_max\
in zip(ids_edge,
edges.ids_sumo[ids_edge],
edges.types_spread[ids_edge],
edges.shapes[ids_edge],
edges.nums_lanes[ids_edge],
edges.ids_lanes[ids_edge],
edges.priorities[ids_edge],
edges.ids_fromnode[ids_edge],
edges.ids_tonode[ids_edge],
edges.speeds_max[ids_edge],
):
print ' is_major_road', id_edge, self.is_major_road(
priority, n_lanes, speed_max), id_edge not in ids_edges_major
if self.is_major_road(priority, n_lanes, speed_max):
if id_edge not in ids_edges_major:
#dist = 0
route_back = self.follow_major_route_backward(id_edge, shape)
route_forward = self.follow_major_route_foreward(id_edge, shape)
if len(route_back)+len(route_forward) > 0:
majorroutes.append(route_back[::-1]+[id_edge]+route_forward)
self.majorroutes = majorroutes
print ' majorroutes:' # ,majorroutes
# mapping with id_edge as key ind index of majorroutes
edges_major = {}
for i, route in zip(xrange(len(majorroutes)), majorroutes):
print ' route', i, len(route), route
for id_edge in route:
edges_major[id_edge] = i
self.edges_major = edges_major
# identify major nodes
majornodes = []
self.majorroutecosts = []
for route in majorroutes:
# print ' check route',len(route),route
ids_node = np.zeros(len(route)+1, dtype=np.int32)
# print ' ids_node',len(ids_node),ids_node
ids_node[:-1] = edges.ids_fromnode[route]
ids_node[-1] = edges.ids_fromnode[route[-1]]
majornodes.append(ids_node)
self.majorroutecosts.append(self.get_routecost(route))
# identify major junctions
majorjuntions = {}
for i_route1, ids_node1 in zip(xrange(len(majorroutes)), majornodes):
ids_nodeset1 = set(ids_node1)
for i_route2, ids_node2 in zip(xrange(len(majorroutes)), majornodes):
if i_route2 != i_route1:
print ' check routes', i_route1, i_route2, 'ids_node_inter', ids_nodeset1.intersection(ids_node2)
for id_node_inter in ids_nodeset1.intersection(ids_node2):
# go through all nodes that route1 and route2 have in common
if majorjuntions.has_key(id_node_inter):
# add route index to already existing junction
if i_route1 not in majorjuntions[id_node_inter]:
majorjuntions[id_node_inter].append(i_route1)
if i_route2 not in majorjuntions[id_node_inter]:
majorjuntions[id_node_inter].append(i_route2)
else:
# create new junction and add the two route indexes
majorjuntions[id_node_inter] = [i_route1, i_route2]
self.ids_majornodes = set(majorjuntions.keys())
self.ids_majornodes_used = set()
# create mojor TLS
print ' major junctions:'
for id_node, inds_route in majorjuntions.iteritems():
if id_node not in self.ids_majornodes_used:
# debug
print ' Next majornode', id_node
for ind in inds_route:
print ' majorroute', majorroutes[ind]
self.make_majortls(id_node, inds_route)
# print ' junction', id_node
# for ind_route in majorjuntions[id_node]:
# print ' ',majorroutes[ind_route]
return True
def make_majortls(self, id_node_major, inds_route):
print 79*'-'
print 'make_majortls for', id_node_major, inds_route
edges = self.parent.edges
nodes = self.parent.nodes
lanes = self.parent.lanes
connections = self.parent.connections
tlss = self.parent.tlss
edges_major = self.edges_major
# identify all nodes
nodes_tls = OrderedDict()
# put cross node first
self.init_tlsnode(id_node_major, nodes_tls)
#edges_major_tls = {}
# find tls nodes and put info in nodes_tls
self.find_neighbour_nodes(id_node_major, 0.0, nodes_tls, edges, nodes)
# build connection conflict matrix
# identify external incoming and outgoing edges
# for ind_route in inds_route:
# for id_edge in self.majorroutes[ind_route]:
# edges_major_tls[id_edge] = ind_route
# get all connections from incoming links
#connections_tls = OrderedDict()
ids_con_tls = []
#ids_conlane = []
ids_connodes = []
convecs = []
# edges entering the TLS
# edges are either bikeways, major or other edges
# no pedestrian edges are considered
ids_incoming_tls = [] # other edges
ids_incoming_bike_tls = [] # bikeway edges
ids_incoming_major_tls = [] # major edges
# edges leaving the TLS
# edges are either bikeways, major or other edges
# no pedestrian edges are considered
ids_outgoing_tls = [] # other edges
ids_outgoing_major_tls = [] # major edges
ids_outgoing_bike_tls = [] # bikeway edges
ids_nodes_tls = set(nodes_tls.keys())
ids_cycleped = set(self.ids_cycleped) # set with bike and ped mode
# enrich node attributes: is_cycleped, is_crossing
# detect all connections, lanes and everything else
for id_node, nodeattrs in nodes_tls.iteritems():
print ' check all in id_node', id_node
is_cycleped = True
n_cycleped = 0 # count incomin and outgoing cycleped edges
n_nocycleped_in = 0 # count normal incoming road edges
n_cycleped_in = 0
n_cycleped_out = 0
# make lists with incoming edges
for id_edge in nodes.ids_incoming[id_node]:
print ' check incoming', id_edge
ids_lane = edges.ids_lanes[id_edge]
#is_cycleped_edge = True
#is_ped_edge = True
#is_cycle_edge = True
ids_lanemodes = set(lanes.ids_mode[ids_lane])
is_cycleped_edge = ids_cycleped.issuperset(ids_lanemodes)
is_cycle_edge = (self.id_mode_bike in ids_lanemodes) & ((len(ids_lanemodes) == 1) | is_cycleped_edge)
is_ped_edge = (self.id_mode_ped in ids_lanemodes) & (len(ids_lanemodes) == 1)
for id_lane in ids_lane:
#is_cp = lanes.ids_mode[id_lane] in self.ids_cycleped
#is_cycleped &= is_cp
#is_cycleped_edge &= is_cp
#is_ped_edge &= lanes.ids_mode[id_lane] == self.id_mode_ped
#is_cycle_edge &= lanes.ids_mode[id_lane] == self.id_mode_bike
for id_con in connections.select_ids(connections.ids_fromlane.get_value() == id_lane):
#connections_tls[id_con] = {'vec_in' : lanes.shapes[id_lane][-2:]}
ids_con_tls.append(id_con)
ids_connodes.append(id_node)
if not is_cycleped_edge:
n_nocycleped_in += 1
else:
n_cycleped += 1
n_cycleped_in += 1
nodeattrs['ids_cycleped_incoming'].append(id_edge)
# detect incoming edges
# ,not is_ped_edge,'not from TLS',edges.ids_fromnode[id_edge] not in ids_nodes_tls
print ' is external', edges.ids_fromnode[
id_edge] not in ids_nodes_tls, 'is_cycle_edge', is_cycle_edge, 'is_major', id_edge in edges_major, 'is_noped'
if edges.ids_fromnode[id_edge] not in ids_nodes_tls:
# from node is not part of the TLS
# so it comes from external
if is_cycle_edge:
# print ' apend ids_incoming_bike_tls',id_edge
ids_incoming_bike_tls.append(id_edge)
elif id_edge in edges_major:
# print ' apend ids_incoming_major_tls',id_edge
ids_incoming_major_tls.append(id_edge)
elif not is_ped_edge: # do not consider pedestrian edges
# print ' append ids_incoming_tls',id_edge
ids_incoming_tls.append(id_edge)
# else:
# print ' no incoming id_edge',id_edge
# make lists with outgoing edges
for id_edge in nodes.ids_outgoing[id_node]:
is_cycleped_edge = True
is_ped_edge = True
for id_lane in edges.ids_lanes[id_edge]:
is_cp = lanes.ids_mode[id_lane] in self.ids_cycleped
is_cycleped &= is_cp
is_cycleped_edge &= is_cp
is_ped_edge &= lanes.ids_mode[id_lane] == self.id_mode_ped
# print ' check outgoing',id_edge,id_edge in edges_major
if is_cycleped_edge:
n_cycleped += 1
n_cycleped_out += 1
nodeattrs['ids_cycleped_outgoing'].append(id_edge)
if edges.ids_tonode[id_edge] not in ids_nodes_tls:
if is_cycleped_edge:
ids_outgoing_bike_tls.append(id_edge)
elif id_edge in edges_major:
ids_outgoing_major_tls.append(id_edge)
elif not is_ped_edge: # do not consider pedestrian edges
ids_outgoing_tls.append(id_edge)
# node is pure bike-pedestrian crossing > no signals
nodeattrs['is_cycleped'] = is_cycleped
# node crossing between bike-pedestrian and road
#nodeattrs['is_crossing'] = (n_nocycleped_in == 1)&(n_cycleped>2)
nodeattrs['is_crossing'] = (n_nocycleped_in == 1) & (n_cycleped_in == 1) & (n_cycleped_out == 1)
# debug
if 1:
print ' nodes_tls:'
for id_node, nodeattrs in nodes_tls.iteritems():
print ' id_node', id_node
for key, val in nodeattrs.iteritems():
print ' ', key, val
n_cons = len(ids_con_tls)
ids_con_tls = np.array(ids_con_tls, dtype=np.int32)
ids_connodes = np.array(ids_connodes, dtype=np.int32)
print ' ids_incoming_major_tls', ids_incoming_major_tls
print ' ids_outgoing_major_tls', ids_outgoing_major_tls
print ' ids_incoming_tls', ids_incoming_tls
print ' ids_outgoing_tls', ids_outgoing_tls
if len(ids_incoming_tls)+len(ids_incoming_major_tls) < 2:
print ' Need at least 2 incoming edges. Abandon.'
return False
print ' connectors detected:', n_cons
#ids_conlane = np.array(ids_conlane, dtype = np.int32 )
#convecs = np.array(convecs, dtype = np.float32 )
vertices_fromcon = np.zeros((n_cons, 2, 2), dtype=np.float32)
vertices_tocon = np.zeros((n_cons, 2, 2), dtype=np.float32)
are_enabeled = np.zeros((n_cons), dtype=np.bool)
#are_enabeled = np.zeros((n_cons), dtype = np.bool)
#convecs[:] = lanes.shapes[ids_conlane][-2:]
ids_fromlane_tls = connections.ids_fromlane[ids_con_tls]
ids_tolane_tls = connections.ids_tolane[ids_con_tls]
for i, shape_fromlane, shape_tolane in zip(
xrange(n_cons),
lanes.shapes[ids_fromlane_tls],
lanes.shapes[ids_tolane_tls]
):
#vertices_con[i] = [shape_fromlane[-1][:2], shape_tolane[0][:2]]
vertices_fromcon[i] = [shape_fromlane[-2][:2], shape_fromlane[-1][:2]]
vertices_tocon[i] = [shape_tolane[0][:2], shape_tolane[1][:2]]
# conflicting connectors with
# id_connector as key and | |
<reponame>Moldovandreii/RepetitionCount<gh_stars>1000+
"""Use pika with the Gevent IOLoop."""
import functools
import logging
import os
import threading
import weakref
try:
import queue
except ImportError: # Python <= v2.7
import Queue as queue
import gevent
import gevent.hub
import gevent.socket
import pika.compat
from pika.adapters.base_connection import BaseConnection
from pika.adapters.utils.io_services_utils import check_callback_arg
from pika.adapters.utils.nbio_interface import (
AbstractIOReference,
AbstractIOServices,
)
from pika.adapters.utils.selector_ioloop_adapter import (
AbstractSelectorIOLoop,
SelectorIOServicesAdapter,
)
LOGGER = logging.getLogger(__name__)
class GeventConnection(BaseConnection):
"""Implementation of pika's ``BaseConnection``.
An async selector-based connection which integrates with Gevent.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new GeventConnection instance and connect to RabbitMQ on
Gevent's event-loop.
:param pika.connection.Parameters|None parameters: The connection
parameters
:param callable|None on_open_callback: The method to call when the
connection is open
:param callable|None on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`:
on_open_error_callback(Connection, exception)
:param callable|None on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the
cause of connection failure
:param gevent._interfaces.ILoop|nbio_interface.AbstractIOServices|None
custom_ioloop: Use a custom Gevent ILoop.
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory
"""
if pika.compat.ON_WINDOWS:
raise RuntimeError('GeventConnection is not supported on Windows.')
custom_ioloop = (custom_ioloop or
_GeventSelectorIOLoop(gevent.get_hub()))
if isinstance(custom_ioloop, AbstractIOServices):
nbio = custom_ioloop
else:
nbio = _GeventSelectorIOServicesAdapter(custom_ioloop)
super(GeventConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
custom_ioloop = (custom_ioloop or
_GeventSelectorIOLoop(gevent.get_hub()))
nbio = _GeventSelectorIOServicesAdapter(custom_ioloop)
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
class _TSafeCallbackQueue(object):
"""Dispatch callbacks from any thread to be executed in the main thread
efficiently with IO events.
"""
def __init__(self):
"""
:param _GeventSelectorIOLoop loop: IO loop to add callbacks to.
"""
# Thread-safe, blocking queue.
self._queue = queue.Queue()
# PIPE to trigger an event when the queue is ready.
self._read_fd, self._write_fd = os.pipe()
# Lock around writes to the PIPE in case some platform/implementation
# requires this.
self._write_lock = threading.RLock()
@property
def fd(self):
"""The file-descriptor to register for READ events in the IO loop."""
return self._read_fd
def add_callback_threadsafe(self, callback):
"""Add an item to the queue from any thread. The configured handler
will be invoked with the item in the main thread.
:param item: Object to add to the queue.
"""
self._queue.put(callback)
with self._write_lock:
# The value written is not important.
os.write(self._write_fd, b'\xFF')
def run_next_callback(self):
"""Invoke the next callback from the queue.
MUST run in the main thread. If no callback was added to the queue,
this will block the IO loop.
Performs a blocking READ on the pipe so must only be called when the
pipe is ready for reading.
"""
try:
callback = self._queue.get_nowait()
except queue.Empty:
# Should never happen.
LOGGER.warning("Callback queue was empty.")
else:
# Read the byte from the pipe so the event doesn't re-fire.
os.read(self._read_fd, 1)
callback()
class _GeventSelectorIOLoop(AbstractSelectorIOLoop):
"""Implementation of `AbstractSelectorIOLoop` using the Gevent event loop.
Required by implementations of `SelectorIOServicesAdapter`.
"""
# Gevent's READ and WRITE masks are defined as 1 and 2 respectively. No
# ERROR mask is defined.
# See http://www.gevent.org/api/gevent.hub.html#gevent._interfaces.ILoop.io
READ = 1
WRITE = 2
ERROR = 0
def __init__(self, gevent_hub=None):
"""
:param gevent._interfaces.ILoop gevent_loop:
"""
self._hub = gevent_hub or gevent.get_hub()
self._io_watchers_by_fd = {}
# Used to start/stop the loop.
self._waiter = gevent.hub.Waiter()
# For adding callbacks from other threads. See `add_callback(..)`.
self._callback_queue = _TSafeCallbackQueue()
def run_callback_in_main_thread(fd, events):
"""Swallow the fd and events arguments."""
del fd
del events
self._callback_queue.run_next_callback()
self.add_handler(self._callback_queue.fd, run_callback_in_main_thread,
self.READ)
def close(self):
"""Release the loop's resources."""
self._hub.loop.destroy()
self._hub = None
def start(self):
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
"""
LOGGER.debug("Passing control to Gevent's IOLoop")
self._waiter.get() # Block until 'stop()' is called.
LOGGER.debug("Control was passed back from Gevent's IOLoop")
self._waiter.clear()
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback(ioloop.stop)`
"""
self._waiter.switch(None)
def add_callback(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this IOLoop's thread.
NOTE: This is the only thread-safe method in IOLoop. All other
manipulations of IOLoop must be performed from the IOLoop's thread.
For example, a thread may request a call to the `stop` method of an
ioloop that is running in a different thread via
`ioloop.add_callback_threadsafe(ioloop.stop)`
:param callable callback: The callback method
"""
if gevent.get_hub() == self._hub:
# We're in the main thread; just add the callback.
LOGGER.debug("Adding callback from main thread")
self._hub.loop.run_callback(callback)
else:
# This isn't the main thread and Gevent's hub/loop don't provide
# any thread-safety so enqueue the callback for it to be registered
# in the main thread.
LOGGER.debug("Adding callback from another thread")
callback = functools.partial(self._hub.loop.run_callback, callback)
self._callback_queue.add_callback_threadsafe(callback)
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: handle to the created timeout that may be passed to
`remove_timeout()`
:rtype: object
"""
timer = self._hub.loop.timer(delay)
timer.start(callback)
return timer
def remove_timeout(self, timeout_handle):
"""Remove a timeout
:param timeout_handle: Handle of timeout to remove
"""
timeout_handle.close()
def add_handler(self, fd, handler, events):
"""Start watching the given file descriptor for events
:param int fd: The file descriptor
:param callable handler: When requested event(s) occur,
`handler(fd, events)` will be called.
:param int events: The event mask (READ|WRITE)
"""
io_watcher = self._hub.loop.io(fd, events)
self._io_watchers_by_fd[fd] = io_watcher
io_watcher.start(handler, fd, events)
def update_handler(self, fd, events):
"""Change the events being watched for.
:param int fd: The file descriptor
:param int events: The new event mask (READ|WRITE)
"""
io_watcher = self._io_watchers_by_fd[fd]
# Save callback from the original watcher. The close the old watcher
# and create a new one using the saved callback and the new events.
callback = io_watcher.callback
io_watcher.close()
del self._io_watchers_by_fd[fd]
self.add_handler(fd, callback, events)
def remove_handler(self, fd):
"""Stop watching the given file descriptor for events
:param int fd: The file descriptor
"""
io_watcher = self._io_watchers_by_fd[fd]
io_watcher.close()
del self._io_watchers_by_fd[fd]
class _GeventSelectorIOServicesAdapter(SelectorIOServicesAdapter):
"""SelectorIOServicesAdapter implementation using Gevent's DNS resolver."""
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.getaddrinfo()`.
"""
resolver = _GeventAddressResolver(native_loop=self._loop,
host=host,
port=port,
family=family,
socktype=socktype,
proto=proto,
flags=flags,
on_done=on_done)
resolver.start()
# Return needs an implementation of `AbstractIOReference`.
return _GeventIOLoopIOHandle(resolver)
class _GeventIOLoopIOHandle(AbstractIOReference):
"""Implement `AbstractIOReference`.
Only used to wrap the _GeventAddressResolver.
"""
def __init__(self, subject):
"""
:param subject: subject of the reference containing a `cancel()` method
"""
self._cancel = subject.cancel
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
return self._cancel()
class _GeventAddressResolver(object):
"""Performs getaddrinfo asynchronously Gevent's configured resolver in a
separate greenlet and invoking the provided callback with the result.
See: http://www.gevent.org/dns.html
"""
__slots__ = (
'_loop',
'_on_done',
'_greenlet',
# getaddrinfo(..) args:
'_ga_host',
'_ga_port',
'_ga_family',
'_ga_socktype',
'_ga_proto',
'_ga_flags')
def __init__(self, native_loop, host, port, family, socktype, proto, flags,
on_done):
"""Initialize the `_GeventAddressResolver`.
:param AbstractSelectorIOLoop native_loop:
:param host: `see socket.getaddrinfo()`
:param port: `see socket.getaddrinfo()`
:param family: `see socket.getaddrinfo()`
:param socktype: `see socket.getaddrinfo()`
:param proto: `see socket.getaddrinfo()`
:param flags: `see socket.getaddrinfo()`
:param on_done: on_done(records|BaseException) callback for reporting
result from the given I/O loop. The single arg will be either an
exception object (check for `BaseException`) in case of failure or
the result returned by `socket.getaddrinfo()`.
"""
check_callback_arg(on_done, 'on_done')
self._loop = native_loop
self._on_done = on_done
# Reference to the greenlet performing `getaddrinfo`.
self._greenlet = None
# getaddrinfo(..) args.
self._ga_host = host
self._ga_port = port
self._ga_family = family
self._ga_socktype = socktype
self._ga_proto = proto
self._ga_flags = flags
def start(self):
"""Start an asynchronous getaddrinfo invocation."""
if self._greenlet is None:
self._greenlet = gevent.spawn_raw(self._resolve)
else:
LOGGER.warning("_GeventAddressResolver already started")
def cancel(self):
"""Cancel the pending resolver."""
changed = False
if self._greenlet is | |
<gh_stars>0
#!/usr/bin/env python
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import json
import os
import shutil
import anndata
import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve
from scipy.sparse import issparse
from collections import defaultdict
import scvi
from scvi.dataset import AnnDatasetFromAnnData, LoomDataset, \
GeneExpressionDataset, Dataset10X
from scvi.models import Classifier, VAE
from scvi.inference import UnsupervisedTrainer, ClassifierTrainer
import torch
import umap
from .utils import create_average_doublet, create_summed_doublet, \
create_multinomial_doublet, make_gene_expression_dataset, \
knn_smooth_pred_class
'''
solo.py
Simulate doublets, train a VAE, and then a classifier on top.
'''
###############################################################################
# main
###############################################################################
def main():
usage = 'solo'
parser = ArgumentParser(usage, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='model_json_file',
help='json file to pass VAE parameters')
parser.add_argument(dest='data_path',
help='path to h5ad, loom or 10x directory containing cell by genes counts')
parser.add_argument('-d', dest='doublet_depth',
default=2., type=float,
help='Depth multiplier for a doublet relative to the \
average of its constituents')
parser.add_argument('-g', dest='gpu',
default=True, action='store_true',
help='Run on GPU')
parser.add_argument('-a', dest='anndata_output',
default=False, action='store_true',
help='output modified anndata object with solo scores \
Only works for anndata')
parser.add_argument('-o', dest='out_dir',
default='solo_out')
parser.add_argument('-r', dest='doublet_ratio',
default=2., type=float,
help='Ratio of doublets to true \
cells')
parser.add_argument('-s', dest='seed',
default=None, help='Path to previous solo output \
directory. Seed VAE models with previously \
trained solo model. Directory structure is assumed to \
be the same as solo output directory structure. \
should at least have a vae.pt a pickled object of \
vae weights and a latent.npy an np.ndarray of the \
latents of your cells.')
parser.add_argument('-k', dest='known_doublets',
help='Experimentally defined doublets tsv file. \
Should be a single column of True/False. True \
indicates the cell is a doublet. No header.',
type=str)
parser.add_argument('-t', dest='doublet_type', help='Please enter \
multinomial, average, or sum',
default='multinomial',
choices=['multinomial', 'average', 'sum'])
parser.add_argument('-e', dest='expected_number_of_doublets',
help='Experimentally expected number of doublets',
type=int, default=None)
parser.add_argument('-p', dest='plot',
default=False, action='store_true',
help='Plot outputs for solo')
parser.add_argument('-l', dest='normal_logging',
default=False, action='store_true',
help='Logging level set to normal (aka not debug)')
parser.add_argument('--random_size', dest='randomize_doublet_size',
default=False,
action='store_true',
help='Sample depth multipliers from Unif(1, \
DoubletDepth) \
to provide a diversity of possible doublet depths.'
)
args = parser.parse_args()
if not args.normal_logging:
scvi._settings.set_verbosity(10)
model_json_file = args.model_json_file
data_path = args.data_path
if args.gpu and not torch.cuda.is_available():
args.gpu = torch.cuda.is_available()
print('Cuda is not available, switching to cpu running!')
if not os.path.isdir(args.out_dir):
os.mkdir(args.out_dir)
##################################################
# data
# read loom/anndata
data_ext = os.path.splitext(data_path)[-1]
if data_ext == '.loom':
scvi_data = LoomDataset(data_path)
elif data_ext == '.h5ad':
adata = anndata.read(data_path)
if issparse(adata.X):
adata.X = adata.X.todense()
scvi_data = AnnDatasetFromAnnData(adata)
elif os.path.isdir(data_path):
scvi_data = Dataset10X(save_path=data_path,
measurement_names_column=1,
dense=True)
cell_umi_depth = scvi_data.X.sum(axis=1)
fifth, ninetyfifth = np.percentile(cell_umi_depth, [5, 95])
min_cell_umi_depth = np.min(cell_umi_depth)
max_cell_umi_depth = np.max(cell_umi_depth)
if fifth * 10 < ninetyfifth:
print("""WARNING YOUR DATA HAS A WIDE RANGE OF CELL DEPTHS.
PLEASE MANUALLY REVIEW YOUR DATA""")
print(f"Min cell depth: {min_cell_umi_depth}, Max cell depth: {max_cell_umi_depth}")
else:
msg = f'{data_path} is not a recognized format.\n'
msg += 'must be one of {h5ad, loom, 10x directory}'
raise TypeError(msg)
num_cells, num_genes = scvi_data.X.shape
if args.known_doublets is not None:
print('Removing known doublets for in silico doublet generation')
print('Make sure known doublets are in the same order as your data')
known_doublets = np.loadtxt(args.known_doublets, dtype=str) == 'True'
assert len(known_doublets) == scvi_data.X.shape[0]
known_doublet_data = make_gene_expression_dataset(
scvi_data.X[known_doublets],
scvi_data.gene_names)
known_doublet_data.labels = np.ones(known_doublet_data.X.shape[0])
singlet_scvi_data = make_gene_expression_dataset(
scvi_data.X[~known_doublets],
scvi_data.gene_names)
singlet_num_cells, _ = singlet_scvi_data.X.shape
else:
known_doublet_data = None
singlet_num_cells = num_cells
known_doublets = np.zeros(num_cells, dtype=bool)
singlet_scvi_data = scvi_data
singlet_scvi_data.labels = np.zeros(singlet_scvi_data.X.shape[0])
scvi_data.labels = known_doublets.astype(int)
##################################################
# parameters
# check for parameters
if not os.path.exists(model_json_file):
raise FileNotFoundError(f'{model_json_file} does not exist.')
# read parameters
with open(model_json_file, 'r') as model_json_open:
params = json.load(model_json_open)
# set VAE params
vae_params = {}
for par in ['n_hidden', 'n_latent', 'n_layers', 'dropout_rate',
'ignore_batch']:
if par in params:
vae_params[par] = params[par]
vae_params['n_batch'] = 0 if params.get(
'ignore_batch', False) else scvi_data.n_batches
# training parameters
batch_size = params.get('batch_size', 128)
valid_pct = params.get('valid_pct', 0.1)
learning_rate = params.get('learning_rate', 1e-3)
stopping_params = {'patience': params.get('patience', 10), 'threshold': 0}
# protect against single example batch
while num_cells % batch_size == 1:
batch_size = int(np.round(1.25*batch_size))
print('Increasing batch_size to %d to avoid single example batch.' % batch_size)
##################################################
# VAE
vae = VAE(n_input=singlet_scvi_data.nb_genes, n_labels=2,
reconstruction_loss='nb',
log_variational=True, **vae_params)
if args.seed:
if args.gpu:
device = torch.device('cuda')
vae.load_state_dict(torch.load(os.path.join(args.seed, 'vae.pt')))
vae.to(device)
else:
map_loc = 'cpu'
vae.load_state_dict(torch.load(os.path.join(args.seed, 'vae.pt'),
map_location=map_loc))
# save latent representation
utrainer = \
UnsupervisedTrainer(vae, singlet_scvi_data,
train_size=(1. - valid_pct),
frequency=2,
metrics_to_monitor=['reconstruction_error'],
use_cuda=args.gpu,
early_stopping_kwargs=stopping_params,
batch_size=batch_size)
full_posterior = utrainer.create_posterior(
utrainer.model,
singlet_scvi_data,
indices=np.arange(len(singlet_scvi_data)))
latent, _, _ = full_posterior.sequential(batch_size).get_latent()
np.save(os.path.join(args.out_dir, 'latent.npy'),
latent.astype('float32'))
else:
stopping_params['early_stopping_metric'] = 'reconstruction_error'
stopping_params['save_best_state_metric'] = 'reconstruction_error'
# initialize unsupervised trainer
utrainer = \
UnsupervisedTrainer(vae, singlet_scvi_data,
train_size=(1. - valid_pct),
frequency=2,
metrics_to_monitor=['reconstruction_error'],
use_cuda=args.gpu,
early_stopping_kwargs=stopping_params,
batch_size=batch_size)
utrainer.history['reconstruction_error_test_set'].append(0)
# initial epoch
utrainer.train(n_epochs=2000, lr=learning_rate)
# drop learning rate and continue
utrainer.early_stopping.wait = 0
utrainer.train(n_epochs=500, lr=0.5 * learning_rate)
# save VAE
torch.save(vae.state_dict(), os.path.join(args.out_dir, 'vae.pt'))
# save latent representation
full_posterior = utrainer.create_posterior(
utrainer.model,
singlet_scvi_data,
indices=np.arange(len(singlet_scvi_data)))
latent, _, _ = full_posterior.sequential(batch_size).get_latent()
np.save(os.path.join(args.out_dir, 'latent.npy'),
latent.astype('float32'))
##################################################
# simulate doublets
non_zero_indexes = np.where(singlet_scvi_data.X > 0)
cells = non_zero_indexes[0]
genes = non_zero_indexes[1]
cells_ids = defaultdict(list)
for cell_id, gene in zip(cells, genes):
cells_ids[cell_id].append(gene)
# choose doublets function type
if args.doublet_type == 'average':
doublet_function = create_average_doublet
elif args.doublet_type == 'sum':
doublet_function = create_summed_doublet
else:
doublet_function = create_multinomial_doublet
cell_depths = singlet_scvi_data.X.sum(axis=1)
num_doublets = int(args.doublet_ratio * singlet_num_cells)
if known_doublet_data is not None:
num_doublets -= known_doublet_data.X.shape[0]
# make sure we are making a non negative amount of doublets
assert num_doublets >= 0
in_silico_doublets = np.zeros((num_doublets, num_genes), dtype='float32')
# for desired # doublets
for di in range(num_doublets):
# sample two cells
i, j = np.random.choice(singlet_num_cells, size=2)
# generate doublets
in_silico_doublets[di, :] = \
doublet_function(singlet_scvi_data.X, i, j,
doublet_depth=args.doublet_depth,
cell_depths=cell_depths, cells_ids=cells_ids,
randomize_doublet_size=args.randomize_doublet_size)
# merge datasets
# we can maybe up sample the known doublets
# concatentate
classifier_data = GeneExpressionDataset()
classifier_data.populate_from_data(
X=np.vstack([scvi_data.X,
in_silico_doublets]),
labels=np.hstack([np.ravel(scvi_data.labels),
np.ones(in_silico_doublets.shape[0])]),
remap_attributes=False)
assert(len(np.unique(classifier_data.labels.flatten())) == 2)
##################################################
# classifier
# model
classifier = Classifier(n_input=(vae.n_latent + 1),
n_hidden=params['cl_hidden'],
n_layers=params['cl_layers'], n_labels=2,
dropout_rate=params['dropout_rate'])
# trainer
stopping_params['early_stopping_metric'] = 'accuracy'
stopping_params['save_best_state_metric'] = 'accuracy'
strainer = ClassifierTrainer(classifier, classifier_data,
train_size=(1. - valid_pct),
frequency=2, metrics_to_monitor=['accuracy'],
use_cuda=args.gpu,
sampling_model=vae, sampling_zl=True,
early_stopping_kwargs=stopping_params,
batch_size=batch_size)
# initial
strainer.train(n_epochs=1000, lr=learning_rate)
# drop learning rate and continue
strainer.early_stopping.wait = 0
strainer.train(n_epochs=300, lr=0.1 * learning_rate)
torch.save(classifier.state_dict(), os.path.join(args.out_dir, 'classifier.pt'))
##################################################
# post-processing
# use logits for predictions for better results
logits_classifier = Classifier(n_input=(vae.n_latent + 1),
n_hidden=params['cl_hidden'],
n_layers=params['cl_layers'], n_labels=2,
dropout_rate=params['dropout_rate'],
logits=True)
logits_classifier.load_state_dict(classifier.state_dict())
# using logits leads to better performance in for ranking
logits_strainer = ClassifierTrainer(logits_classifier, classifier_data,
train_size=(1. - valid_pct),
frequency=2,
metrics_to_monitor=['accuracy'],
use_cuda=args.gpu,
sampling_model=vae, sampling_zl=True,
early_stopping_kwargs=stopping_params,
batch_size=batch_size)
# models evaluation mode
vae.eval()
classifier.eval()
logits_classifier.eval()
print('Train accuracy: %.4f' % strainer.train_set.accuracy())
print('Test accuracy: %.4f' % strainer.test_set.accuracy())
# compute predictions manually
# output logits
train_y, train_score = strainer.train_set.compute_predictions(soft=True)
test_y, test_score = strainer.test_set.compute_predictions(soft=True)
# train_y == true label
# train_score[:, 0] == singlet score; train_score[:, 1] == doublet score
train_score = train_score[:, 1]
train_y = train_y.astype('bool')
test_score = test_score[:, 1]
test_y = test_y.astype('bool')
train_auroc = roc_auc_score(train_y, train_score)
test_auroc = roc_auc_score(test_y, test_score)
print('Train AUROC: %.4f' % train_auroc)
print('Test AUROC: %.4f' % test_auroc)
train_fpr, train_tpr, train_t = roc_curve(train_y, train_score)
test_fpr, test_tpr, test_t = roc_curve(test_y, test_score)
train_t = np.minimum(train_t, 1 + 1e-9)
test_t = np.minimum(test_t, 1 + 1e-9)
train_acc = np.zeros(len(train_t))
for i in range(len(train_t)):
train_acc[i] = np.mean(train_y == (train_score > train_t[i]))
test_acc = np.zeros(len(test_t))
for i in range(len(test_t)):
test_acc[i] = np.mean(test_y == (test_score > test_t[i]))
# write predictions
# softmax predictions
order_y, order_score = strainer.compute_predictions(soft=True)
_, order_pred = strainer.compute_predictions()
doublet_score = order_score[:, 1]
np.save(os.path.join(args.out_dir, 'no_updates_softmax_scores.npy'), doublet_score[:num_cells])
np.save(os.path.join(args.out_dir, 'no_updates_softmax_scores_sim.npy'), doublet_score[num_cells:])
# logit predictions
logit_y, logit_score = logits_strainer.compute_predictions(soft=True)
logit_doublet_score = logit_score[:, 1]
np.save(os.path.join(args.out_dir, 'logit_scores.npy'), logit_doublet_score[:num_cells])
np.save(os.path.join(args.out_dir, 'logit_scores_sim.npy'), logit_doublet_score[num_cells:])
# update threshold as a function of Solo's estimate of the number of
# doublets
# essentially a log odds update
# TODO put in a function
diff = np.inf
counter_update = 0
solo_scores = doublet_score[:num_cells]
logit_scores = logit_doublet_score[:num_cells]
d_s = (args.doublet_ratio / (args.doublet_ratio + 1))
while (diff > .01) | (counter_update < 5):
# calculate log odss calibration for logits
d_o = np.mean(solo_scores)
c = np.log(d_o/(1-d_o)) - np.log(d_s/(1-d_s))
# update soloe scores
solo_scores = 1 / (1+np.exp(-(logit_scores + c)))
# update while conditions
diff = np.abs(d_o - np.mean(solo_scores))
counter_update += 1
np.save(os.path.join(args.out_dir, 'softmax_scores.npy'),
solo_scores)
if args.expected_number_of_doublets is not None:
k = len(solo_scores) - args.expected_number_of_doublets
if args.expected_number_of_doublets / len(solo_scores) > .5:
print('''Make sure you actually expect more than half your cells
to be doublets. If not change your
-e | |
<reponame>BenTenmann/scirpy<gh_stars>0
import pandas as pd
import json
from anndata import AnnData
from ._datastructures import AirrCell
from typing import (
Iterable,
Mapping,
MutableMapping,
Sequence,
Union,
Collection,
Optional,
)
import numpy as np
from glob import iglob
import pickle
import os.path
from . import _tracerlib
import sys
from pathlib import Path
import airr
from ..util import _doc_params, _is_true, _is_true2, _translate_dna_to_protein, _is_na2
from ._convert_anndata import from_airr_cells, to_airr_cells, _sanitize_anndata
from ._util import doc_working_model, _IOLogger, _check_upgrade_schema
from .._compat import Literal
from airr import RearrangementSchema
import itertools
import re
from .. import __version__
# patch sys.modules to enable pickle import.
# see https://stackoverflow.com/questions/2121874/python-pckling-after-changing-a-modules-directory
sys.modules["tracerlib"] = _tracerlib
DEFAULT_AIRR_FIELDS = (
"productive",
"locus",
"v_call",
"d_call",
"j_call",
"c_call",
"junction",
"junction_aa",
"consensus_count",
"duplicate_count",
)
DEFAULT_10X_FIELDS = DEFAULT_AIRR_FIELDS + ("is_cell", "high_confidence")
DEFAULT_AIRR_CELL_ATTRIBUTES = ("is_cell", "high_confidence", "multi_chain")
def _cdr3_from_junction(junction_aa, junction_nt):
"""CDR3 euqals junction without the conserved residues C and W/F, respectively.
Should the conserved residues not equal to C and W/F, then the chain
is non-productive and we set CDR3 to None.
See also https://github.com/icbi-lab/scirpy/pull/290.
"""
cdr3_aa, cdr3_nt = None, None
if (
not _is_na2(junction_aa)
and junction_aa[0] == "C"
and junction_aa[-1] in ("W", "F")
):
cdr3_aa = junction_aa[1:-1]
if (
not _is_na2(junction_nt)
and _translate_dna_to_protein(junction_nt[:3]) == "C"
and _translate_dna_to_protein(junction_nt[-3:]) in ("W", "F")
):
cdr3_nt = junction_nt[3:-3]
return cdr3_aa, cdr3_nt
def _read_10x_vdj_json(
path: Union[str, Path],
filtered: bool = True,
include_fields: Optional[Collection[str]] = None,
) -> AnnData:
"""Read IR data from a 10x genomics `all_contig_annotations.json` file"""
logger = _IOLogger()
with open(path, "r") as f:
cells = json.load(f)
airr_cells = {}
for cell in cells:
if filtered and not (cell["is_cell"] and cell["high_confidence"]):
continue
barcode = cell["barcode"]
if barcode not in airr_cells:
ir_obj = AirrCell(
barcode,
logger=logger,
cell_attribute_fields=["is_cell", "high_confidence"],
)
airr_cells[barcode] = ir_obj
else:
ir_obj = airr_cells[barcode]
# AIRR-compliant chain dict
chain = AirrCell.empty_chain_dict()
genes = dict()
mapping = {
"L-REGION+V-REGION": "v",
"D-REGION": "d",
"J-REGION": "j",
"C-REGION": "c",
}
for annot in cell["annotations"]:
feat = annot["feature"]
if feat["region_type"] in mapping:
region = mapping[feat["region_type"]]
assert region not in genes, region
genes[region] = dict()
genes[region]["chain"] = feat["chain"]
genes[region]["gene"] = feat["gene_name"]
genes[region]["start"] = annot["contig_match_start"]
genes[region]["end"] = annot["contig_match_end"]
chain["v_call"] = genes["v"]["gene"] if "v" in genes else None
chain["d_call"] = genes["d"]["gene"] if "d" in genes else None
chain["j_call"] = genes["j"]["gene"] if "j" in genes else None
chain["c_call"] = genes["c"]["gene"] if "c" in genes else None
# check if chain type is consistent
chain_types = [g["chain"] for g in genes.values()]
chain_type = chain_types[0] if np.unique(chain_types).size == 1 else None
# compute inserted nucleotides
# VJ junction for TRA, TRG, IGK, IGL chains
# VD + DJ junction for TRB, TRD, IGH chains
#
# Notes on indexing:
# some tryouts have shown, that the indexes in the json file
# seem to be python-type indexes (i.e. the 'end' index is exclusive).
# Therefore, no `-1` needs to be subtracted when computing the number
# of inserted nucleotides.
chain["np1_length"] = None
chain["np2_length"] = None
if (
chain_type in AirrCell.VJ_LOCI
and chain["v_call"] is not None
and chain["j_call"] is not None
):
assert (
chain["d_call"] is None
), "TRA, TRG or IG-light chains should not have a D region"
chain["np1_length"] = genes["j"]["start"] - genes["v"]["end"]
elif (
chain_type in AirrCell.VDJ_LOCI
and chain["v_call"] is not None
and chain["j_call"] is not None
and chain["d_call"] is not None
):
chain["np1_length"] = genes["d"]["start"] - genes["v"]["end"]
chain["np2_length"] = genes["j"]["start"] - genes["d"]["end"]
chain["locus"] = chain_type
chain["junction"] = cell["cdr3_seq"]
chain["junction_aa"] = cell["cdr3"]
chain["duplicate_count"] = cell["umi_count"]
chain["consensus_count"] = cell["read_count"]
chain["productive"] = cell["productive"]
chain["is_cell"] = cell["is_cell"]
chain["high_confidence"] = cell["high_confidence"]
# additional cols from CR6 outputs: fwr{1,2,3,4}{,_nt} and cdr{1,2}{,_nt}
fwrs = [f"fwr{i}" for i in range(1, 5)]
cdrs = [f"cdr{i}" for i in range(1, 3)]
for col in fwrs + cdrs:
if col in cell.keys():
chain[col] = cell[col].get("nt_seq") if cell[col] else None
chain[col + "_aa"] = cell[col].get("aa_seq") if cell[col] else None
chain["cdr3_aa"], chain["cdr3"] = _cdr3_from_junction(
chain["junction_aa"], chain["junction"]
)
ir_obj.add_chain(chain)
return from_airr_cells(airr_cells.values(), include_fields=include_fields)
def _read_10x_vdj_csv(
path: Union[str, Path],
filtered: bool = True,
include_fields: Optional[Collection[str]] = None,
) -> AnnData:
"""Read IR data from a 10x genomics `_contig_annotations.csv` file"""
logger = _IOLogger()
df = pd.read_csv(path)
airr_cells = {}
if filtered:
df = df.loc[_is_true(df["is_cell"]) & _is_true(df["high_confidence"]), :]
for barcode, cell_df in df.groupby("barcode"):
ir_obj = AirrCell(
barcode, logger=logger, cell_attribute_fields=("is_cell", "high_confidence")
)
for _, chain_series in cell_df.iterrows():
chain_dict = AirrCell.empty_chain_dict()
chain_dict.update(
locus=chain_series["chain"],
junction_aa=chain_series["cdr3"],
junction=chain_series["cdr3_nt"],
duplicate_count=chain_series["umis"],
consensus_count=chain_series["reads"],
productive=_is_true2(chain_series["productive"]),
v_call=chain_series["v_gene"],
d_call=chain_series["d_gene"],
j_call=chain_series["j_gene"],
c_call=chain_series["c_gene"],
is_cell=chain_series["is_cell"],
high_confidence=chain_series["high_confidence"],
)
# additional cols from CR6 outputs: fwr{1,2,3,4}{,_nt} and cdr{1,2}{,_nt}
fwrs = [f"fwr{i}" for i in range(1, 5)]
cdrs = [f"cdr{i}" for i in range(1, 3)]
for col in fwrs + cdrs:
if col in chain_series.index:
chain_dict[col + "_aa"] = chain_series.get(col)
if col + "_nt" in chain_series.index:
chain_dict[col] = chain_series.get(col + "_nt")
chain_dict["cdr3_aa"], chain_dict["cdr3"] = _cdr3_from_junction(
chain_dict["junction_aa"], chain_dict["junction"]
)
ir_obj.add_chain(chain_dict)
airr_cells[barcode] = ir_obj
return from_airr_cells(airr_cells.values(), include_fields=include_fields)
@_doc_params(doc_working_model=doc_working_model, include_fields=DEFAULT_10X_FIELDS)
def read_10x_vdj(
path: Union[str, Path],
filtered: bool = True,
include_fields: Optional[Collection[str]] = DEFAULT_10X_FIELDS,
) -> AnnData:
"""\
Read :term:`IR` data from 10x Genomics cell-ranger output.
Supports `all_contig_annotations.json` and
`{{all,filtered}}_contig_annotations.csv`.
If the `json` file is available, it is preferable as it
contains additional information about V(D)J-junction insertions. Other than
that there should be no difference.
{doc_working_model}
Parameters
----------
path
Path to `filterd_contig_annotations.csv`, `all_contig_annotations.csv` or
`all_contig_annotations.json`.
filtered
Only keep filtered contig annotations (i.e. `is_cell` and `high_confidence`).
If using `filtered_contig_annotations.csv` already, this option
is futile.
include_fields
The fields to include in `adata`. The AIRR rearrangment schema contains
can contain a lot of columns, most of which irrelevant for most analyses.
Per default, this includes a subset of columns relevant for a typical
scirpy analysis, to keep `adata.obs` a bit cleaner. Defaults to {include_fields}.
Set this to `None` to include all columns.
Returns
-------
AnnData object with IR data in `obs` for each cell. For more details see
:ref:`data-structure`.
"""
path = Path(path)
if path.suffix == ".json":
return _read_10x_vdj_json(path, filtered, include_fields)
else:
return _read_10x_vdj_csv(path, filtered, include_fields)
@_doc_params(doc_working_model=doc_working_model)
def read_tracer(path: Union[str, Path]) -> AnnData:
"""\
Read data from `TraCeR <https://github.com/Teichlab/tracer>`_ (:cite:`Stubbington2016-kh`).
Requires the TraCeR output directory which contains a folder for each cell.
Unfortunately the results files generated by `tracer summarize` do not
contain all required information.
The function will read TCR information from the `filtered_TCR_seqs/<CELL_ID>.pkl`
files.
{doc_working_model}
Parameters
----------
path
Path to the TraCeR output folder.
Returns
-------
AnnData object with TCR data in `obs` for each cell. For more details see
:ref:`data-structure`.
"""
logger = _IOLogger()
airr_cells = {}
path = str(path)
def _process_chains(chains, chain_type):
for tmp_chain in chains:
if tmp_chain.cdr3 == "N/A" or tmp_chain.cdr3nt == "N/A":
# ignore chains that have no sequence
continue
# AIRR-rearrangement compliant chain dictionary
chain_dict = AirrCell.empty_chain_dict()
if tmp_chain.has_D_segment:
assert chain_type in AirrCell.VDJ_LOCI
assert len(tmp_chain.junction_details) == 5
assert len(tmp_chain.summary) == 8
chain_dict["v_call"] = tmp_chain.summary[0].split("*")[0]
chain_dict["d_call"] = tmp_chain.summary[1].split("*")[0]
chain_dict["j_call"] = tmp_chain.summary[2].split("*")[0]
else:
assert chain_type in AirrCell.VJ_LOCI
assert len(tmp_chain.junction_details) == 3
assert len(tmp_chain.summary) == 7
chain_dict["v_call"] = tmp_chain.summary[0].split("*")[0]
chain_dict["d_call"] = None
chain_dict["j_call"] = tmp_chain.summary[1].split("*")[0]
for call_key in ["v_call", "d_call", "j_call"]:
if chain_dict[call_key] == "N/A":
chain_dict[call_key] = None
if chain_dict[call_key] is not None:
assert chain_dict[call_key][3] == call_key[0].upper()
chain_dict["np1_length"] = (
len(tmp_chain.junction_details[1])
if tmp_chain.junction_details[1] != "N/A"
else None
)
try:
# only in VDJ
chain_dict["np2_length"] = (
len(tmp_chain.junction_details[3])
if tmp_chain.junction_details[3] != "N/A"
else None
)
except IndexError:
chain_dict["np2_length"] = None
chain_dict["locus"] = chain_type
chain_dict["consensus_count"] = tmp_chain.TPM
chain_dict["productive"] = tmp_chain.productive
chain_dict["junction"] = tmp_chain.cdr3nt
chain_dict["junction_aa"] = tmp_chain.cdr3
yield chain_dict
for summary_file in iglob(
os.path.join(path, "**/filtered_TCR_seqs/*.pkl"), recursive=True
):
cell_name = summary_file.split(os.sep)[-3]
airr_cell = AirrCell(cell_name, logger=logger)
try:
with open(summary_file, "rb") as f:
tracer_obj = pickle.load(f)
chains = tracer_obj.recombinants["TCR"]
for chain_id in "ABGD":
if chain_id in chains and chains[chain_id] is not None:
for tmp_chain in _process_chains(
chains[chain_id], f"TR{chain_id}"
):
airr_cell.add_chain(tmp_chain)
except ImportError as e:
# except Exception as e:
raise Exception(
"Error loading TCR data from cell {}".format(summary_file)
) from e
airr_cells[cell_name] = airr_cell
if not len(airr_cells):
raise IOError(
"Could not find any TraCeR *.pkl files. Make sure you are "
"using a TraCeR output folder that looks like "
"<CELL>/filtered_TCR_seqs/*.pkl"
)
return from_airr_cells(airr_cells.values())
@_doc_params(
doc_working_model=doc_working_model,
cell_attributes=f"""`({",".join([f'"{x}"' for x in DEFAULT_AIRR_CELL_ATTRIBUTES])})`""",
include_fields=f"""`({",".join([f'"{x}"' for x in DEFAULT_AIRR_FIELDS])})`""",
)
def read_airr(
path: Union[str, Sequence[str], Path, Sequence[Path]],
use_umi_count_col: Union[bool, Literal["auto"]] = "auto",
infer_locus: bool = True,
cell_attributes: Collection[str] = DEFAULT_AIRR_CELL_ATTRIBUTES,
include_fields: Optional[Collection[str]] = DEFAULT_AIRR_FIELDS,
) -> AnnData:
"""\
Read data from `AIRR | |
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class Budget(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'str',
'aggregation_accounts': 'list[BudgetAggregationAccount]',
'budget': 'list[BudgetObject]',
'card_id': 'str',
'client_id': 'str',
'create_date': 'datetime',
'currency_code': 'str',
'end_date': 'date',
'frequency': 'int',
'frequency_unit': 'str',
'goal_id': 'str',
'id': 'str',
'is_active': 'bool',
'metadata': 'dict(str, str)',
'name': 'str',
'secondary_id': 'str',
'start_date': 'date',
'total_value': 'float',
'update_date': 'datetime'
}
attribute_map = {
'account_id': 'account_id',
'aggregation_accounts': 'aggregation_accounts',
'budget': 'budget',
'card_id': 'card_id',
'client_id': 'client_id',
'create_date': 'create_date',
'currency_code': 'currency_code',
'end_date': 'end_date',
'frequency': 'frequency',
'frequency_unit': 'frequency_unit',
'goal_id': 'goal_id',
'id': 'id',
'is_active': 'is_active',
'metadata': 'metadata',
'name': 'name',
'secondary_id': 'secondary_id',
'start_date': 'start_date',
'total_value': 'total_value',
'update_date': 'update_date'
}
def __init__(self, account_id=None, aggregation_accounts=None, budget=None, card_id=None, client_id=None, create_date=None, currency_code=None, end_date=None, frequency=None, frequency_unit=None, goal_id=None, id=None, is_active=None, metadata=None, name=None, secondary_id=None, start_date=None, total_value=None, update_date=None, _configuration=None): # noqa: E501
"""Budget - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._account_id = None
self._aggregation_accounts = None
self._budget = None
self._card_id = None
self._client_id = None
self._create_date = None
self._currency_code = None
self._end_date = None
self._frequency = None
self._frequency_unit = None
self._goal_id = None
self._id = None
self._is_active = None
self._metadata = None
self._name = None
self._secondary_id = None
self._start_date = None
self._total_value = None
self._update_date = None
self.discriminator = None
if account_id is not None:
self.account_id = account_id
if aggregation_accounts is not None:
self.aggregation_accounts = aggregation_accounts
if budget is not None:
self.budget = budget
if card_id is not None:
self.card_id = card_id
self.client_id = client_id
if create_date is not None:
self.create_date = create_date
self.currency_code = currency_code
if end_date is not None:
self.end_date = end_date
if frequency is not None:
self.frequency = frequency
self.frequency_unit = frequency_unit
if goal_id is not None:
self.goal_id = goal_id
if id is not None:
self.id = id
if is_active is not None:
self.is_active = is_active
if metadata is not None:
self.metadata = metadata
self.name = name
if secondary_id is not None:
self.secondary_id = secondary_id
if start_date is not None:
self.start_date = start_date
if total_value is not None:
self.total_value = total_value
if update_date is not None:
self.update_date = update_date
@property
def account_id(self):
"""Gets the account_id of this Budget. # noqa: E501
accountId # noqa: E501
:return: The account_id of this Budget. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this Budget.
accountId # noqa: E501
:param account_id: The account_id of this Budget. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def aggregation_accounts(self):
"""Gets the aggregation_accounts of this Budget. # noqa: E501
:return: The aggregation_accounts of this Budget. # noqa: E501
:rtype: list[BudgetAggregationAccount]
"""
return self._aggregation_accounts
@aggregation_accounts.setter
def aggregation_accounts(self, aggregation_accounts):
"""Sets the aggregation_accounts of this Budget.
:param aggregation_accounts: The aggregation_accounts of this Budget. # noqa: E501
:type: list[BudgetAggregationAccount]
"""
self._aggregation_accounts = aggregation_accounts
@property
def budget(self):
"""Gets the budget of this Budget. # noqa: E501
:return: The budget of this Budget. # noqa: E501
:rtype: list[BudgetObject]
"""
return self._budget
@budget.setter
def budget(self, budget):
"""Sets the budget of this Budget.
:param budget: The budget of this Budget. # noqa: E501
:type: list[BudgetObject]
"""
self._budget = budget
@property
def card_id(self):
"""Gets the card_id of this Budget. # noqa: E501
cardId # noqa: E501
:return: The card_id of this Budget. # noqa: E501
:rtype: str
"""
return self._card_id
@card_id.setter
def card_id(self, card_id):
"""Sets the card_id of this Budget.
cardId # noqa: E501
:param card_id: The card_id of this Budget. # noqa: E501
:type: str
"""
self._card_id = card_id
@property
def client_id(self):
"""Gets the client_id of this Budget. # noqa: E501
clientId # noqa: E501
:return: The client_id of this Budget. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this Budget.
clientId # noqa: E501
:param client_id: The client_id of this Budget. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and client_id is None:
raise ValueError("Invalid value for `client_id`, must not be `None`") # noqa: E501
self._client_id = client_id
@property
def create_date(self):
"""Gets the create_date of this Budget. # noqa: E501
:return: The create_date of this Budget. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this Budget.
:param create_date: The create_date of this Budget. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def currency_code(self):
"""Gets the currency_code of this Budget. # noqa: E501
currencyCode # noqa: E501
:return: The currency_code of this Budget. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this Budget.
currencyCode # noqa: E501
:param currency_code: The currency_code of this Budget. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and currency_code is None:
raise ValueError("Invalid value for `currency_code`, must not be `None`") # noqa: E501
self._currency_code = currency_code
@property
def end_date(self):
"""Gets the end_date of this Budget. # noqa: E501
endDate # noqa: E501
:return: The end_date of this Budget. # noqa: E501
:rtype: date
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this Budget.
endDate # noqa: E501
:param end_date: The end_date of this Budget. # noqa: E501
:type: date
"""
self._end_date = end_date
@property
def frequency(self):
"""Gets the frequency of this Budget. # noqa: E501
frequency # noqa: E501
:return: The frequency of this Budget. # noqa: E501
:rtype: int
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Sets the frequency of this Budget.
frequency # noqa: E501
:param frequency: The frequency of this Budget. # noqa: E501
:type: int
"""
self._frequency = frequency
@property
def frequency_unit(self):
"""Gets the frequency_unit of this Budget. # noqa: E501
frequencyUnit # noqa: E501
:return: The frequency_unit of this Budget. # noqa: E501
:rtype: str
"""
return self._frequency_unit
@frequency_unit.setter
def frequency_unit(self, frequency_unit):
"""Sets the frequency_unit of this Budget.
frequencyUnit # noqa: E501
:param frequency_unit: The frequency_unit of this Budget. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and frequency_unit is None:
raise ValueError("Invalid value for `frequency_unit`, must not be `None`") # noqa: E501
self._frequency_unit = frequency_unit
@property
def goal_id(self):
"""Gets the goal_id of this Budget. # noqa: E501
goalId # noqa: E501
:return: The goal_id of this Budget. # noqa: E501
:rtype: str
"""
return self._goal_id
@goal_id.setter
def goal_id(self, goal_id):
"""Sets the goal_id of this Budget.
goalId # noqa: E501
:param goal_id: The goal_id of this Budget. # noqa: E501
:type: str
"""
self._goal_id = goal_id
@property
def id(self):
"""Gets the id of this Budget. # noqa: E501
:return: The id of this Budget. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Budget.
:param id: The id of this Budget. # noqa: E501
:type: str
"""
self._id = id
@property
def is_active(self):
"""Gets the is_active of this Budget. # noqa: E501
is_active # noqa: E501
:return: The is_active of this Budget. # noqa: E501
:rtype: bool
"""
return self._is_active
@is_active.setter
def is_active(self, is_active):
"""Sets the is_active of this Budget.
is_active # noqa: E501
:param is_active: The is_active of this Budget. # noqa: E501
:type: bool
"""
self._is_active = is_active
@property
def metadata(self):
"""Gets the metadata of this Budget. # noqa: E501
:return: The metadata of this Budget. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Budget.
:param metadata: The metadata of this Budget. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def name(self):
"""Gets the name of this Budget. # noqa: E501
name # noqa: E501
:return: The name of | |
<reponame>berkalpay/EVcouplings
"""
Functions for handling evolutionary couplings data.
.. todo::
1. clean up
2. add Pompom score
3. add mapping tools (multidomain, complexes)
4. ECs to matrix
5. APC on subsets of positions (e.g. for complexes)
Authors:
<NAME>
<NAME> (original mixture model code)
<NAME> (skew normal mixture model)
<NAME> (EVComplex Score code)
"""
from math import ceil
from copy import deepcopy
from pkg_resources import resource_filename
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as op
from scipy import stats
from sklearn.linear_model import LogisticRegression
from evcouplings.utils.calculations import median_absolute_deviation
from evcouplings.utils.config import read_config_file
def read_raw_ec_file(filename, sort=True, score="cn"):
"""
Read a raw EC file (e.g. from plmc) and sort
by scores
Parameters
----------
filename : str
File containing evolutionary couplings
sort : bool, optional (default: True)
If True, sort pairs by coupling score in
descending order
score_column : str, optional (default: True)
Score column to be used for sorting
Returns
-------
ecs : pd.DataFrame
Table of evolutionary couplings
"""
ecs = pd.read_csv(
filename, sep=" ",
names=["i", "A_i", "j", "A_j", "fn", "cn"]
)
if sort:
ecs = ecs.sort_values(
by=score, ascending=False
)
return ecs
def enrichment(ecs, num_pairs=1.0, score="cn", min_seqdist=6):
"""
Calculate EC "enrichment" as first described in
Hopf et al., Cell, 2012.
.. todo::
Make this handle segments if they are in EC table
Parameters
----------
ecs : pd.DataFrame
Dataframe containing couplings
num_pairs : int or float, optional (default: 1.0)
Number of ECs to use for enrichment calculation.
- If float, will be interpreted as fraction of the
length of the sequence (e.g. 1.0*L)
- If int, will be interpreted as
absolute number of pairs
score : str, optional (default: cn)
Pair coupling score used for calculation
min_seqdist : int, optional (default: 6)
Minimum sequence distance of couplings that will
be included in the calculation
Returns
-------
enrichment_table : pd.DataFrame
Sorted table with enrichment values for each
position in the sequence
"""
# determine how many positions ECs are over
pos = set(ecs.i.unique()) | set(ecs.j.unique())
num_pos = len(pos)
# calculate absolute number of pairs if
# fraction of length is given
if isinstance(num_pairs, float):
num_pairs = int(ceil(num_pairs * num_pos))
# get longrange ECs and sort by score
sorted_ecs = ecs.query(
"abs(i-j) >= {}".format(min_seqdist)
).sort_values(
by=score, ascending=False
)
# select top EC pairs
top_ecs = sorted_ecs.iloc[0:num_pairs]
# stack dataframe so it contains each
# EC twice as forward and backward pairs
# (i, j) and (j, i)
flipped = top_ecs.rename(
columns={"i": "j", "j": "i", "A_i": "A_j", "A_j": "A_i"}
)
stacked_ecs = top_ecs.append(flipped)
# now sum cumulative strength of EC for each position
ec_sums = pd.DataFrame(
stacked_ecs.groupby(["i", "A_i"]).sum()
)
# average EC strength for top ECs
avg_degree = top_ecs.loc[:, score].sum() / len(top_ecs)
# "enrichment" is ratio how much EC strength on
# an individual position exceeds average strength in top
ec_sums.loc[:, "enrichment"] = ec_sums.loc[:, score] / avg_degree
e = ec_sums.reset_index().loc[:, ["i", "A_i", "enrichment"]]
return e.sort_values(by="enrichment", ascending=False)
class LegacyScoreMixtureModel:
"""
Assign to each EC score the probability of being in the
lognormal tail of a normal-lognormal mixture model.
.. note::
this is the original version of the score mixture model
with a normal distribution noise component, this has been
superseded by a model using a skew normal distribution
"""
def __init__(self, x, clamp_mu=False, max_fun=10000, max_iter=1000):
"""
Mixture model of evolutionary coupling scores to
determine significant scores that are in high-scoring,
positive tail of distribution.
Parameters
----------
x : np.array (or list-like)
EC scores from which to infer the mixture model
clamp_mu : bool, optional (default: False)
Fix mean of Gaussian component to 0 instead of
fitting it based on data
max_fun : int
Maximum number of function evaluations
max_iter : int
Maximum number of iterations
"""
x = np.array(x)
# Infer parameters of mixture model
self.params = self._learn_params(x, clamp_mu, max_fun, max_iter)
@classmethod
def _learn_params(cls, x, clamp_mu, max_fun, max_iter):
"""
Infer parameters of mixture model.
Parameters
----------
x : np.array (or list-like)
EC scores from which to infer the mixture model
clamp_mu : bool, optional (default: False)
Fix mean of Gaussian component to 0 instead of
fitting it based on data
max_fun : int
Maximum number of function evaluations
max_iter : int
Maximum number of iterations
Returns
-------
mu : float
Mean of normal distribution
sigma : float
Standard deviation of normal distribution
q : float
Relative weights of each distribution
logmu : float
Mean of lognormal distribution
logsigma : float
Standard deviation of lognormal distribution
"""
# Initial starting parameters for normal
# and lognormal distributions
# q: relative contribution of each distribtuion
mu = 0
sigma = np.std(x)
q = 1
logsigma = 0.4
logmu = np.percentile(x, 75) - logsigma**2 / 2
param = np.array([mu, sigma, q, logmu, logsigma])
# Target function for minimization
def target_func(params):
if clamp_mu:
params[0] = 0
return -np.sum(np.log(cls._gaussian_lognormal(x, params)))
# Minimize function
coeff = op.fmin(
target_func, param, maxfun=max_fun, maxiter=max_iter, disp=False
)
# If clamping mu, also set to 0 in the end, so this value
# is used for probability calculations
if clamp_mu:
coeff[0] = 0
q = coeff[2]
# Check if fit worked
if q >= 1 or np.isinf(q) or np.isneginf(q):
raise ValueError(
"No tail, fit failed. q={}".format(q)
)
return coeff
@classmethod
def _gaussian_lognormal(cls, x, params):
"""
Gaussian-lognormal mixture probability
density function.
Parameters
----------
x : np.array
Scores for which PDF is calculated
params : tuple
Parameters of lognormal-Gaussian mixture
(mu, sigma, class weight q, loglog, logsigma)
Returns
-------
np.array
Probabilities
"""
return cls._gaussian(x, params) + cls._lognormal(x, params)
@classmethod
def _gaussian(cls, x, params):
"""
Normal probability density (multiplied
by class weight).
Parameters
----------
x : np.array
Scores for which PDF is calculated
params : tuple
Parameters of lognormal-Gaussian mixture
(mu, sigma, class weight q, loglog, logsigma)
Returns
-------
np.array
Probabilities
"""
mu, sigma, q, logmu, logsigma = params
return q * stats.norm.pdf(x, loc=mu, scale=sigma)
@classmethod
def _lognormal(cls, x, params):
"""
Log normal probability density (multiplied
by class weight).
Parameters
----------
x : np.array
Scores for which PDF is calculated
params : tuple
Parameters of lognormal-Gaussian mixture
(mu, sigma, class weight q, loglog, logsigma)
Returns
-------
np.array
Probabilities
"""
mu, sigma, q, logmu, logsigma = params
# only assign probability to defined (i.e. positive) values,
# set all others to zero
prob = np.zeros(len(x))
xpos = x > 0
prob[xpos] = (1 - q) * stats.norm.pdf(
np.log(x[xpos]), loc=logmu, scale=logsigma
) / x[xpos]
return prob
def probability(self, x, plot=False):
"""
Calculate posterior probability of EC pair to
be located in positive (lognormal) tail of the
distribution.
Parameters
----------
x : np.array (or list-like)
List of scores
plot : bool, optional (default: False)
Plot score distribution and probabilities
Returns
-------
posterior : np.array(float)
Posterior probability of being in signal
component of mixture model
"""
x = np.array(x)
p_lognormal = self._lognormal(x, self.params)
p_gaussian = self._gaussian(x, self.params)
posterior = p_lognormal / (p_lognormal + p_gaussian)
if plot:
plt.figure(figsize=(12, 8))
# fig = plt.figure(figsize=(4,4))
c = "#fdc832"
n_ECs, edges = np.histogram(x, 1000, density=True)
mid = []
for l, r in zip(edges[:-1], edges[1:]):
mid.append((l + r) / 2)
plt.plot(
mid, n_ECs, '-', color=c, markerfacecolor=c,
markeredgecolor='None', linewidth=1
)
plt.plot(x, posterior, '-k', linewidth=2)
plt.plot(x, p_lognormal, 'r', linewidth=1)
plt.plot(x, p_gaussian, 'b', linewidth=1)
takeover = x[p_lognormal > p_gaussian].min()
plt.axvline(takeover, color="grey")
plt.axvline(x[posterior > 0.99].min(), color="grey", lw=1)
pompom = abs(x.min())
plt.axvline(-pompom, color="grey", ls="--")
plt.axvline(pompom, color="grey", ls="--")
plt.xlabel("EC scores")
plt.ylabel("PDF")
return posterior
class ScoreMixtureModel:
"""
Assign to each EC score the probability of being in the
lognormal tail of a skew normal-lognormal mixture model.
"""
def __init__(self, x):
"""
Mixture model of evolutionary coupling scores to
determine signifcant scores that are in high-scoring,
positive tail of distribution.
Parameters
----------
x : np.array (or list-like)
EC scores from which to infer the mixture model
"""
x = np.array(x)
# Infer parameters of mixture model
self.params = self._learn_params(x)
@classmethod
def skewnorm_pdf(cls, x, location, scale, skew):
"""
Probability density of skew normal distribution
(noise component)
Parameters
---------
x : np.array(float)
Data for which probability density should be calculated
location : float
Location parameter | |
<reponame>vishalbelsare/synapse<gh_stars>0
import os
import asyncio
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.cell as s_cell
import synapse.lib.base as s_base
import synapse.lib.nexus as s_nexus
import synapse.lib.msgpack as s_msgpack
import synapse.lib.lmdbslab as s_lmdbslab
class JsonStor(s_base.Base):
'''
A filesystem like storage mechanism that allows hirarchical lookup of
reference counted "objects" that have individually editable properties.
#TODO json validation by path glob matches? (persists?)
#TODO GUID ACCESS with index generation by type
#TODO registered types jsonschema with optional write-back validation
'''
async def __anit__(self, slab, pref):
await s_base.Base.__anit__(self)
self.slab = slab
self.pref = pref
self.dirty = {}
self.pathdb = self.slab.initdb(f'{pref}:paths')
self.itemdb = self.slab.initdb(f'{pref}:items')
self.metadb = self.slab.initdb(f'{pref}:meta')
self.fsinfo = self.slab.initdb(f'{pref}:fsinfo')
self.slab.on('commit', self._syncDirtyItems)
async def _syncDirtyItems(self, mesg):
todo = list(self.dirty.items())
for buid, item in todo:
self.slab.put(buid, s_msgpack.en(item), db=self.itemdb)
self.dirty.pop(buid, None)
await asyncio.sleep(0)
def _incRefObj(self, buid, valu=1):
refs = 0
refsbyts = self.slab.get(buid + b'refs', db=self.metadb)
if refsbyts:
refs = s_msgpack.un(refsbyts)
refs += valu
if refs > 0:
self.slab.put(buid + b'refs', s_msgpack.en(refs), db=self.metadb)
return refs
# remove the meta entries
for lkey, byts in self.slab.scanByPref(buid, db=self.metadb):
self.slab.pop(lkey, db=self.metadb)
# remove the item data
self.slab.pop(buid, db=self.itemdb)
self.dirty.pop(buid, None)
async def copyPathObj(self, oldp, newp):
item = await self.getPathObj(oldp)
await self.setPathObj(newp, item)
async def copyPathObjs(self, paths):
for oldp, newp in paths:
await self.copyPathObj(oldp, newp)
await asyncio.sleep(0)
async def setPathObj(self, path, item):
'''
Set (and/or reinitialize) the object at the given path.
NOTE: This will break any links by creating a new object.
'''
buid = os.urandom(16)
pkey = self._pathToPkey(path)
oldb = self.slab.replace(pkey, buid, db=self.pathdb)
if oldb is not None:
self._incRefObj(oldb, -1)
self.slab.put(buid + b'refs', s_msgpack.en(1), db=self.metadb)
self.dirty[buid] = item
async def getPathObj(self, path):
buid = self._pathToBuid(path)
if buid is None:
return None
return self._getBuidItem(buid)
def _getBuidItem(self, buid):
item = self.dirty.get(buid)
if item is not None:
return item
byts = self.slab.get(buid, db=self.itemdb)
if byts is not None:
return s_msgpack.un(byts)
def _pathToBuid(self, path):
pkey = self._pathToPkey(path)
return self.slab.get(pkey, db=self.pathdb)
async def hasPathObj(self, path):
pkey = self._pathToPkey(path)
return self.slab.has(pkey, db=self.pathdb)
async def delPathObj(self, path):
'''
Remove a path and decref the object it references.
'''
pkey = self._pathToPkey(path)
buid = self.slab.pop(pkey, db=self.pathdb)
if buid is not None:
self._incRefObj(buid, valu=-1)
async def setPathLink(self, srcpath, dstpath):
'''
Add a link from the given srcpath to the dstpath.
NOTE: This causes the item at dstpath to be incref'd
'''
srcpkey = self._pathToPkey(srcpath)
dstpkey = self._pathToPkey(dstpath)
buid = self.slab.get(dstpkey, db=self.pathdb)
if buid is None:
raise s_exc.NoSuchPath(path=dstpath)
oldb = self.slab.pop(srcpkey, db=self.pathdb)
if oldb is not None:
self._incRefObj(oldb, valu=-1)
self._incRefObj(buid, valu=1)
self.slab.put(srcpkey, buid, db=self.pathdb)
async def getPathObjProp(self, path, prop):
item = await self.getPathObj(path)
if item is None:
return None
for name in self._pathToTupl(prop):
item = item.get(name, s_common.novalu)
if item is s_common.novalu:
return None
return item
def _pathToPkey(self, path):
path = self._pathToTupl(path)
return ('\x00'.join(path)).encode()
def _pathToTupl(self, path):
if isinstance(path, str):
path = tuple(path.split('/'))
return path
def _tuplToPath(self, path):
return '/'.join(path)
def _pkeyToTupl(self, pkey):
return tuple(pkey.decode().split('\x00'))
async def getPathList(self, path):
path = self._pathToTupl(path)
plen = len(path)
pkey = self._pathToPkey(path)
for lkey, buid in self.slab.scanByPref(pkey, db=self.pathdb):
yield self._tuplToPath(self._pkeyToTupl(lkey)[plen:])
async def getPathObjs(self, path):
path = self._pathToTupl(path)
plen = len(path)
pkey = self._pathToPkey(path)
for lkey, buid in self.slab.scanByPref(pkey, db=self.pathdb):
yield self._pkeyToTupl(lkey)[plen:], self._getBuidItem(buid)
async def setPathObjProp(self, path, prop, valu):
buid = self._pathToBuid(path)
if buid is None:
return False
item = self._getBuidItem(buid)
if item is None:
return False
step = item
names = self._pathToTupl(prop)
for name in names[:-1]:
down = step.get(name)
if down is None:
down = step[name] = {}
step = down
name = names[-1]
if step.get(name, s_common.novalu) == valu:
return True
step[name] = valu
self.dirty[buid] = item
return True
async def delPathObjProp(self, path, prop):
buid = self._pathToBuid(path)
if buid is None:
return False
item = self._getBuidItem(buid)
if item is None:
return False
step = item
names = self._pathToTupl(prop)
for name in names[:-1]:
step = step[name]
step.pop(names[-1], None)
self.dirty[buid] = item
return True
async def cmpDelPathObjProp(self, path, prop, valu):
buid = self._pathToBuid(path)
if buid is None:
return False
item = self._getBuidItem(buid)
if item is None:
return False
step = item
names = self._pathToTupl(prop)
for name in names[:-1]:
step = step[name]
name = names[-1]
if step.get(name) != valu:
return False
step.pop(name, None)
self.dirty[buid] = item
return True
async def popPathObjProp(self, path, prop, defv=None):
buid = self._pathToBuid(path)
if buid is None:
return defv
item = self._getBuidItem(buid)
if item is None:
return defv
step = item
names = self._pathToTupl(prop)
for name in names[:-1]:
step = step.get(name, s_common.novalu)
if step is s_common.novalu:
return defv
retn = step.pop(names[-1], defv)
self.dirty[buid] = item
return retn
class JsonStorApi(s_cell.CellApi):
async def popPathObjProp(self, path, prop):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'set', *path))
return await self.cell.popPathObjProp(path, prop)
async def hasPathObj(self, path):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'get', *path))
return await self.cell.hasPathObj(path)
async def copyPathObj(self, oldp, newp):
oldp = self.cell.jsonstor._pathToTupl(oldp)
newp = self.cell.jsonstor._pathToTupl(newp)
await self._reqUserAllowed(('json', 'get', *oldp))
await self._reqUserAllowed(('json', 'set', *newp))
return await self.cell.copyPathObj(oldp, newp)
async def copyPathObjs(self, paths):
pathnorms = []
for oldp, newp in paths:
oldp = self.cell.jsonstor._pathToTupl(oldp)
newp = self.cell.jsonstor._pathToTupl(newp)
await self._reqUserAllowed(('json', 'get', *oldp))
await self._reqUserAllowed(('json', 'set', *newp))
pathnorms.append((oldp, newp))
return await self.cell.copyPathObjs(pathnorms)
async def getPathList(self, path):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'list', *path))
async for item in self.cell.getPathList(path):
yield item
async def getPathObj(self, path):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'get', *path))
return await self.cell.getPathObj(path)
async def getPathObjs(self, path):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'get', *path))
async for item in self.cell.getPathObjs(path):
yield item
async def setPathObj(self, path, item):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'set', *path))
return await self.cell.setPathObj(path, item)
async def delPathObj(self, path):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'del', *path))
return await self.cell.delPathObj(path)
async def delPathObjProp(self, path, name):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'set', *path))
return await self.cell.delPathObjProp(path, name)
async def cmpDelPathObjProp(self, path, name, valu):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'set', *path))
return await self.cell.cmpDelPathObjProp(path, name, valu)
async def getPathObjProp(self, path, prop):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'get', *path))
return await self.cell.getPathObjProp(path, prop)
async def setPathObjProp(self, path, prop, valu):
path = self.cell.jsonstor._pathToTupl(path)
await self._reqUserAllowed(('json', 'set', *path))
return await self.cell.setPathObjProp(path, prop, valu)
async def setPathLink(self, srcpath, dstpath):
srcpath = self.cell.jsonstor._pathToTupl(srcpath)
dstpath = self.cell.jsonstor._pathToTupl(dstpath)
await self._reqUserAllowed(('json', 'get', *dstpath))
await self._reqUserAllowed(('json', 'set', *srcpath))
return await self.cell.setPathLink(srcpath, dstpath)
async def addQueue(self, name, info):
await self._reqUserAllowed(('queue', 'add', name))
info['owner'] = self.user.iden
info['created'] = s_common.now()
return await self.cell.addQueue(name, info)
async def delQueue(self, name):
await self._reqUserAllowed(('queue', 'del', name))
return await self.cell.delQueue(name)
async def cullQueue(self, name, offs):
await self._reqUserAllowed(('queue', 'gets', name))
return await self.cell.cullQueue(name, offs)
async def putsQueue(self, name, items):
await self._reqUserAllowed(('queue', 'puts', name))
return await self.cell.putsQueue(name, items)
async def getsQueue(self, name, offs, size=None, cull=True, wait=True):
await self._reqUserAllowed(('queue', 'gets', name))
async for item in self.cell.getsQueue(name, offs, size=size, cull=cull, wait=wait):
yield item
class JsonStorCell(s_cell.Cell):
cellapi = JsonStorApi
async def initServiceStorage(self):
self.jsonstor = await JsonStor.anit(self.slab, 'jsonstor')
self.multique = await s_lmdbslab.MultiQueue.anit(self.slab, 'multique')
self.onfini(self.jsonstor.fini)
self.onfini(self.multique.fini)
async def getPathList(self, path):
async for item in self.jsonstor.getPathList(path):
yield item
@s_nexus.Pusher.onPushAuto('json:pop:prop')
async def popPathObjProp(self, path, prop):
return await self.jsonstor.popPathObjProp(path, prop)
async def hasPathObj(self, path):
return await self.jsonstor.hasPathObj(path)
@s_nexus.Pusher.onPushAuto('json:copy')
async def copyPathObj(self, oldp, newp):
return await self.jsonstor.copyPathObj(oldp, newp)
@s_nexus.Pusher.onPushAuto('json:copys')
async def copyPathObjs(self, paths):
return await self.jsonstor.copyPathObjs(paths)
async def getPathObj(self, path):
return await self.jsonstor.getPathObj(path)
async def getPathObjs(self, path):
async for item in self.jsonstor.getPathObjs(path):
yield item
async def getPathObjProp(self, path, prop):
return await self.jsonstor.getPathObjProp(path, prop)
@s_nexus.Pusher.onPushAuto('json:set')
async def setPathObj(self, path, item):
return await self.jsonstor.setPathObj(path, item)
@s_nexus.Pusher.onPushAuto('json:del')
async def delPathObj(self, path):
return await self.jsonstor.delPathObj(path)
@s_nexus.Pusher.onPushAuto('json:del:prop')
async def delPathObjProp(self, path, name):
return await self.jsonstor.delPathObjProp(path, name)
@s_nexus.Pusher.onPushAuto('json:cmp:del:prop')
async def cmpDelPathObjProp(self, path, name, valu):
return await self.jsonstor.cmpDelPathObjProp(path, name, valu)
@s_nexus.Pusher.onPushAuto('json:set:prop')
async def setPathObjProp(self, path, prop, valu):
return await self.jsonstor.setPathObjProp(path, prop, valu)
@s_nexus.Pusher.onPushAuto('json:link')
async def setPathLink(self, srcpath, dstpath):
return await self.jsonstor.setPathLink(srcpath, dstpath)
@s_nexus.Pusher.onPushAuto('q:add')
async def addQueue(self, name, info):
if not self.multique.exists(name):
await self.multique.add(name, info)
return True
return False
@s_nexus.Pusher.onPushAuto('q:del')
async def delQueue(self, name):
if not self.multique.exists(name):
return False
await self.multique.rem(name)
return True
@s_nexus.Pusher.onPushAuto('q:puts')
async def putsQueue(self, name, items, reqid=None):
return await self.multique.puts(name, items, reqid=reqid)
@s_nexus.Pusher.onPushAuto('q:cull')
async def cullQueue(self, name, offs):
return await self.multique.cull(name, offs)
async def getsQueue(self, name, offs, size=None, cull=True, wait=True):
if cull and offs > 0:
await self.cullQueue(name, offs - 1)
async for item in | |
<gh_stars>0
from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.urls import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_text, smart_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote, DJANGO_11
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None):
if value is None:
value = ''
if DJANGO_11:
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
else:
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_text(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_text(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
# forms.Field.__init__(self, required=required, widget=widget, label=label, initial=initial, help_text=help_text,
# *args, **kwargs)
forms.Field.__init__(self)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_text(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in | |
small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
vN_dm_max = np.abs(dm_io).max() * timestep / Vcellrhomax
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_var(
part_id,
stability_breaches,
UA_tb,
UA_port,
UA_amb_shell,
dm_top,
dm_bot,
dm_port,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_cell,
A_port,
A_shell, # areas to backcalc diffusivity from UA
r_total,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Massflows are checked for parts where the massflow is defined as NOT
invariant, that means where all cells in the part may have different
massflow!
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
# rhocpmax = rhocp.max()
# For calculation see docstring
# replaced old and faulty calculations with missing Area
# vN_diff[0] = (UA_tb.max() / rhocpmax) * timestep / grid_spacing
vN_diff[0] = (
np.max(UA_tb[1:-1] / rhocp[1:]) * timestep / (A_cell * grid_spacing)
)
# for the next two with non-constant gridspacing, find max of UA/gsp:
# vN_diff[1] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
vN_diff[1] = (
np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max()
)
# vN_diff[2] = UA_amb_shell.max() / r_total / rhocpmax * timestep
vN_diff[2] = np.max(UA_amb_shell / rhocp) * timestep / (A_shell * r_total)
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
# NO checking for dims, since error probability of only having a critical
# massflow sum at the port inflow cell and NOT at the next cell border is
# extremely low AND this calculation would require either complicated
# generated_jit functions OR keepdims support in sum! Thus just simple
# check.
# if UA_port.ndim == 1:
vN_dm_max = (
max(dm_top.max(), dm_bot.max(), np.abs(dm_port).max())
* timestep
/ Vcellrhomax
)
# else:
# vN_dm = (
# max(dm_top.max(), dm_bot.max(),
# np.abs(dm_port.sum(axis=0, keepdims=True)).max())
# * timestep / Vcellrhomax)
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1.0:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
# %% Simulation Env. part specific differential functions:
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer | |
<reponame>mak-ov/dlcourse_ai-master
import numpy as np
from numpy.core.numeric import zeros_like
def l2_regularization(W, reg_strength):
'''
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
'''
# TODO: Copy from previous assignment
raise Exception("Not implemented!")
return loss, grad
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
if predictions.ndim == 1 :
epowa = np.exp(predictions-np.max(predictions))
return epowa/np.sum(epowa)
else:
epowa = np.exp(predictions-np.max(predictions,axis=1,keepdims=True))
return epowa/np.sum(epowa,axis=1,keepdims=True)
def cross_entropy_loss(probs, target_index):
'''
Computes cross-entropy loss
Arguments:
probs, np array, shape is either (N) or (batch_size, N) -
probabilities for every class
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss: single value
'''
# TODO implement cross-entropy
# Your final implementation shouldn't have any loops
if probs.ndim == 1:
trues = np.zeros(probs.shape)
trues[target_index] = 1
return -np.sum(trues * np.log(probs))
elif target_index.ndim == 1:
trues = np.eye(probs.shape[1])[target_index]
return -np.sum(trues * np.log(probs)) / probs.shape[0]
else:
trues = np.eye(probs.shape[1])[target_index[:, 0]]
return -np.sum(trues * np.log(probs)) / probs.shape[0]
def softmax_with_cross_entropy(preds, target_index):
'''
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
'''
# TODO copy from the previous assignment
probes = softmax(preds)
loss = cross_entropy_loss(probes, target_index)
if preds.ndim == 1:
trues = np.zeros(preds.shape)
trues[target_index] = 1
dprediction = probes - trues
elif target_index.ndim == 1:
trues = np.eye(probes.shape[1])[target_index]
dprediction = (probes - trues) / probes.shape[0]
else:
trues = np.eye(probes.shape[1])[target_index[:, 0]]
dprediction = (probes - trues) / probes.shape[0]
return loss, dprediction
class Param:
'''
Trainable parameter of the model
Captures both parameter value and the gradient
'''
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class ReLULayer:
def __init__(self):
pass
def forward(self, X):
# TODO copy from the previous assignment
relu_output = None
relu = lambda x: x * (x > 0).astype(float)
# relu_output = np.where(X > 0, X, X * 0.01)
relu_output = relu(X)
self.cache = X
return relu_output
def backward(self, d_out):
# TODO copy from the previous assignment
dx, x = None, self.cache
dx = d_out * (self.cache > 0)
return dx
def params(self):
return {}
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.001 * np.random.randn(n_input, n_output))
self.B = Param(0.001 * np.random.randn(1, n_output))
self.X = None
def forward(self, X):
# TODO copy from the previous assignment
self.X = X
return np.dot(X, self.W.value) + self.B.value
def backward(self, d_out):
# TODO copy from the previous assignment
self.W.grad = np.dot(self.X.T, d_out)
self.B.grad = np.sum(d_out, axis=0, keepdims=True)
d_input = np.dot(d_out, self.W.value.T)
return d_input
def params(self):
return { 'W': self.W, 'B': self.B }
class ConvolutionalLayer:
def __init__(self, in_channels, out_channels,
filter_size, padding):
'''
Initializes the layer
Arguments:
in_channels, int - number of input channels
out_channels, int - number of output channels
filter_size, int - size of the conv filter
padding, int - number of 'pixels' to pad on each side
'''
self.filter_size = filter_size
self.in_channels = in_channels
self.out_channels = out_channels
self.W = Param(
np.random.randn(filter_size, filter_size,
in_channels, out_channels)
)
self.B = Param(np.zeros(out_channels))
self.X = None
self.padding = padding
def forward(self, X):
batch_size, height, width, channels = X.shape
# print(X.shape)
if self.padding:
X_pad = np.zeros((X.shape[0], X.shape[1]+2*self.padding, X.shape[2]+2*self.padding, X.shape[3]))
for img in range(X.shape[0]):
for ch in range(X.shape[-1]):
# print(X[img, :, :, ch].shape)
X_pad[img,:,:,ch] = np.pad(X[img,:,:,ch], ((self.padding,self.padding),(self.padding,self.padding)), 'constant')
X = X_pad
self.X = X
out_height = (height - self.filter_size + 2 * self.padding) + 1
out_width = (width - self.filter_size + 2 * self.padding) + 1
output = np.zeros((batch_size, out_height, out_width, self.out_channels))
# print(output.shape)
W = self.W.value.reshape(self.filter_size*self.filter_size*self.in_channels, self.W.value.shape[-1])
# print('------------------')
# print(f'X: {X}','\n', f'W: {W}\n')
# TODO: Implement forward pass
# Hint: setup variables that hold the result
# and one x/y location at a time in the loop below
# It's ok to use loops for going over width and height
# but try to avoid having any other loops
for y in range(out_height):
for x in range(out_width):
# TODO: Implement forward pass for specific location
output[:, y, x, :] = np.dot(X[:, y:(y+self.filter_size), x:(x+self.filter_size), :].reshape(X.shape[0], \
self.filter_size*self.filter_size*self.in_channels), W) + self.B.value
return output
def backward(self, d_out):
# Hint: Forward pass was reduced to matrix multiply
# You already know how to backprop through that
# when you implemented FullyConnectedLayer
# Just do it the same number of times and accumulate gradients
batch_size, height, width, channels = self.X.shape
_, out_height, out_width, out_channels = d_out.shape
# print(d_out.shape)
# TODO: Implement backward pass
# Same as forward, setup variables of the right shape that
# aggregate input gradient and fill them for every location
# of the output
d_input = np.zeros(self.X.shape)
# print(d_input.shape)
# self.W.grad = np.zeros_like(self.W.grad)
# self.B.grad = np.zeros_like(self.B.grad)
W = self.W.value.reshape(self.filter_size*self.filter_size*self.in_channels, self.W.value.shape[-1])
# # Try to avoid having any other loops here too
for y in range(out_height):
for x in range(out_width):
# TODO: Implement backward pass for specific location
# Aggregate gradients for both the input and
# the parameters (W and B)
d_input[:, y:(y+self.filter_size), x:(x+self.filter_size), :] += \
np.dot(d_out[:, y, x, :], W.T).reshape(batch_size, self.filter_size, self.filter_size, channels)
self.W.grad += np.dot(self.X[:, y:(y+self.filter_size), x:(x+self.filter_size), :].reshape(self.X.shape[0], \
self.filter_size*self.filter_size*self.in_channels).T, d_out[:, y, x, :]).reshape(self.filter_size, self.filter_size, \
self.in_channels, self.out_channels)
self.B.grad = np.sum(d_out, axis=(0,1,2)).reshape(out_channels)
if self.padding:
d_input = np.delete(d_input, np.s_[0:self.padding], 1)
d_input = np.delete(d_input, np.s_[-self.padding:], 1)
d_input = np.delete(d_input, np.s_[0:self.padding], 2)
d_input = np.delete(d_input, np.s_[-self.padding:], 2)
# print(d_input.shape)
return d_input
def params(self):
return { 'W': self.W, 'B': self.B }
class MaxPoolingLayer:
def __init__(self, pool_size, stride):
'''
Initializes the max pool
Arguments:
pool_size, int - area to pool
stride, int - step size between pooling windows
'''
self.pool_size = pool_size
self.stride = stride
self.X = None
self.mask = None
def forward(self, X):
batch_size, height, width, channels = X.shape
self.X = X
# TODO: Implement maxpool forward pass
# Hint: Similarly to Conv layer, loop on
# output x/y dimension
# print(height, width)
out_height = ((height - self.pool_size) / self.stride + 1)
out_width = ((width - self.pool_size) / self.stride + 1)
number_height = str(out_height-int(out_height)).split(".")[1]
number_width = str(out_width-int(out_width)).split(".")[1]
if number_height != "0" or number_width != "0":
raise TypeError("The output of MaxPooling can only be integer numbers")
else:
out_height = int(out_height)
out_width = int(out_width)
output = np.zeros((batch_size, out_height, out_width, channels))
self.mask = np.zeros_like(X)
for y in range(out_height):
for x in range(out_width):
# TODO: Implement forward pass for specific location
tmp = X[:, (y*self.stride):((y*self.stride)+self.pool_size), \
(x*self.stride):((x*self.stride)+self.pool_size), :]
output[:, y, x, :] = tmp.max(axis=(1,2))
# print(tmp.shape)
# print(output[:, y, x, :])
# print(output[:, y, x, :].shape)
# print(tmp.argmax(axis=(2)))
maxs = output[:, y, x, :].repeat(self.pool_size, axis=0).repeat(self.pool_size, axis=0)
# print(maxs)
maxs = maxs.reshape(tmp.shape, order='C')
# print(maxs)
# print(np.equal(tmp, maxs).astype(int))
# print(np.where(tmp == tmp.max(axis=(1,2)), 1, 0))
self.mask[:, (y*self.stride):((y*self.stride)+self.pool_size), \
(x*self.stride):((x*self.stride)+self.pool_size), :] = np.equal(tmp, maxs).astype(int)
# print(self.X.shape,'\n')
# print('--------------\n')
# print("mask: ", self.mask.shape)
# print("output: ", output.shape)
return output
def backward(self, d_out):
# TODO: Implement maxpool backward pass
batch_size, height, width, channels = self.X.shape
# print("d_out: ", d_out.shape)
# print(self.mask.shape)
dX = zeros_like(self.X)
# print("dX: ", dX.shape)
for y in range(d_out.shape[1]):
for x in range(d_out.shape[2]):
# print("d_out x y: ", d_out[:, y, x, :].shape)
tmp = d_out[:, y, x, :].repeat(self.pool_size, axis=0).repeat(self.pool_size, axis=0)
tmp = tmp.reshape(dX[:, (y*self.stride):((y*self.stride)+self.pool_size), \
(x*self.stride):((x*self.stride)+self.pool_size), :].shape, order='C')
# print("d_out x y 2: ", tmp.shape)
dX[:, (y*self.stride):((y*self.stride)+self.pool_size), \
(x*self.stride):((x*self.stride)+self.pool_size), :] += tmp
# dX = d_out.repeat(self.pool_size, axis=1).repeat(self.pool_size, axis=2)
dX = np.multiply(dX, self.mask)
return dX
def params(self):
return {}
class Flattener:
def __init__(self):
self.X_shape | |
[('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeTransMethod', '')])),
('destination_addr', (YLeaf(YType.str, 'destination-addr'), ['str'])),
('enable', (YLeaf(YType.boolean, 'enable'), ['bool'])),
])
self.method = None
self.destination_addr = None
self.enable = None
self._segment_path = lambda: "address" + "[method='" + str(self.method) + "']" + "[destination-addr='" + str(self.destination_addr) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.Addresses.Address, ['method', 'destination_addr', 'enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.Addresses.Address']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.Addresses']['meta_info']
class SubscribeAlertGroup(_Entity_):
"""
Subscribe to alert\-group
.. attribute:: environment
environmental info
**type**\: :py:class:`Environment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Environment>`
.. attribute:: configuration
configuration info
**type**\: :py:class:`Configuration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration>`
.. attribute:: snapshot
snapshot info
**type**\: :py:class:`Snapshot <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot>`
.. attribute:: inventory
inventory info
**type**\: :py:class:`Inventory <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory>`
.. attribute:: crash
Crash info
**type**\: :py:class:`Crash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Crash>`
.. attribute:: syslogs
syslog info
**type**\: :py:class:`Syslogs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Syslogs>`
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup, self).__init__()
self.yang_name = "subscribe-alert-group"
self.yang_parent_name = "profile"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("environment", ("environment", CallHome.Profiles.Profile.SubscribeAlertGroup.Environment)), ("configuration", ("configuration", CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration)), ("snapshot", ("snapshot", CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot)), ("inventory", ("inventory", CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory)), ("crash", ("crash", CallHome.Profiles.Profile.SubscribeAlertGroup.Crash)), ("syslogs", ("syslogs", CallHome.Profiles.Profile.SubscribeAlertGroup.Syslogs))])
self._leafs = OrderedDict()
self.environment = CallHome.Profiles.Profile.SubscribeAlertGroup.Environment()
self.environment.parent = self
self._children_name_map["environment"] = "environment"
self.configuration = CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration()
self.configuration.parent = self
self._children_name_map["configuration"] = "configuration"
self.snapshot = CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot()
self.snapshot.parent = self
self._children_name_map["snapshot"] = "snapshot"
self.inventory = CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory()
self.inventory.parent = self
self._children_name_map["inventory"] = "inventory"
self.crash = CallHome.Profiles.Profile.SubscribeAlertGroup.Crash()
self.crash.parent = self
self._children_name_map["crash"] = "crash"
self.syslogs = CallHome.Profiles.Profile.SubscribeAlertGroup.Syslogs()
self.syslogs.parent = self
self._children_name_map["syslogs"] = "syslogs"
self._segment_path = lambda: "subscribe-alert-group"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup, [], name, value)
class Environment(_Entity_):
"""
environmental info
.. attribute:: severity
Severity
**type**\: :py:class:`CallHomeEventSeverity <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHomeEventSeverity>`
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Environment, self).__init__()
self.yang_name = "environment"
self.yang_parent_name = "subscribe-alert-group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('severity', (YLeaf(YType.enumeration, 'severity'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeEventSeverity', '')])),
])
self.severity = None
self._segment_path = lambda: "environment"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Environment, ['severity'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Environment']['meta_info']
class Configuration(_Entity_):
"""
configuration info
.. attribute:: periodic
Periodic call\-home message
**type**\: :py:class:`Periodic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration.Periodic>`
.. attribute:: subscribe
Subscribe the alert\-group
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration, self).__init__()
self.yang_name = "configuration"
self.yang_parent_name = "subscribe-alert-group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("periodic", ("periodic", CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration.Periodic))])
self._leafs = OrderedDict([
('subscribe', (YLeaf(YType.empty, 'subscribe'), ['Empty'])),
])
self.subscribe = None
self.periodic = CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration.Periodic()
self.periodic.parent = self
self._children_name_map["periodic"] = "periodic"
self._segment_path = lambda: "configuration"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration, ['subscribe'], name, value)
class Periodic(_Entity_):
"""
Periodic call\-home message
.. attribute:: interval
none
**type**\: :py:class:`CallHomeMailSendInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHomeMailSendInterval>`
.. attribute:: day
Day
**type**\: int
**range:** 0..31
.. attribute:: weekday
Day of week
**type**\: :py:class:`CallHomeDayOfWeek <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHomeDayOfWeek>`
.. attribute:: hour
Hour
**type**\: int
**range:** 0..23
.. attribute:: minute
Minute
**type**\: int
**range:** 0..59
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration.Periodic, self).__init__()
self.yang_name = "periodic"
self.yang_parent_name = "configuration"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interval', (YLeaf(YType.enumeration, 'interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeMailSendInterval', '')])),
('day', (YLeaf(YType.uint32, 'day'), ['int'])),
('weekday', (YLeaf(YType.enumeration, 'weekday'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeDayOfWeek', '')])),
('hour', (YLeaf(YType.uint32, 'hour'), ['int'])),
('minute', (YLeaf(YType.uint32, 'minute'), ['int'])),
])
self.interval = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self._segment_path = lambda: "periodic"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration.Periodic, ['interval', 'day', 'weekday', 'hour', 'minute'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration.Periodic']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Configuration']['meta_info']
class Snapshot(_Entity_):
"""
snapshot info
.. attribute:: periodic
Periodic call\-home message
**type**\: :py:class:`Periodic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot.Periodic>`
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot, self).__init__()
self.yang_name = "snapshot"
self.yang_parent_name = "subscribe-alert-group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("periodic", ("periodic", CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot.Periodic))])
self._leafs = OrderedDict()
self.periodic = CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot.Periodic()
self.periodic.parent = self
self._children_name_map["periodic"] = "periodic"
self._segment_path = lambda: "snapshot"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot, [], name, value)
class Periodic(_Entity_):
"""
Periodic call\-home message
.. attribute:: interval
none
**type**\: :py:class:`SnapshotInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.SnapshotInterval>`
.. attribute:: day
Day of month
**type**\: int
**range:** 0..31
.. attribute:: weekday
Day of week
**type**\: :py:class:`CallHomeDayOfWeek <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHomeDayOfWeek>`
.. attribute:: hour
Hour
**type**\: int
**range:** 0..23
.. attribute:: minute
Minute
**type**\: int
**range:** 0..59
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot.Periodic, self).__init__()
self.yang_name = "periodic"
self.yang_parent_name = "snapshot"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interval', (YLeaf(YType.enumeration, 'interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'SnapshotInterval', '')])),
('day', (YLeaf(YType.uint32, 'day'), ['int'])),
('weekday', (YLeaf(YType.enumeration, 'weekday'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeDayOfWeek', '')])),
('hour', (YLeaf(YType.uint32, 'hour'), ['int'])),
('minute', (YLeaf(YType.uint32, 'minute'), ['int'])),
])
self.interval = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self._segment_path = lambda: "periodic"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot.Periodic, ['interval', 'day', 'weekday', 'hour', 'minute'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot.Periodic']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Snapshot']['meta_info']
class Inventory(_Entity_):
"""
inventory info
.. attribute:: periodic
Periodic call\-home message
**type**\: :py:class:`Periodic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory.Periodic>`
.. attribute:: subscribe
Subscribe the alert\-group
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory, self).__init__()
self.yang_name = "inventory"
self.yang_parent_name = "subscribe-alert-group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("periodic", ("periodic", CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory.Periodic))])
self._leafs = OrderedDict([
('subscribe', (YLeaf(YType.empty, 'subscribe'), ['Empty'])),
])
self.subscribe = None
self.periodic = CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory.Periodic()
self.periodic.parent = self
self._children_name_map["periodic"] = "periodic"
self._segment_path = lambda: "inventory"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory, ['subscribe'], name, value)
class Periodic(_Entity_):
"""
Periodic call\-home message
.. attribute:: interval
none
**type**\: :py:class:`CallHomeMailSendInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHomeMailSendInterval>`
.. attribute:: day
Day of month
**type**\: int
**range:** 0..31
.. attribute:: weekday
Day of week
**type**\: :py:class:`CallHomeDayOfWeek <ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg.CallHomeDayOfWeek>`
.. attribute:: hour
Hour
**type**\: int
**range:** 0..23
.. attribute:: minute
Minute
**type**\: int
**range:** 0..59
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory.Periodic, self).__init__()
self.yang_name = "periodic"
self.yang_parent_name = "inventory"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interval', (YLeaf(YType.enumeration, 'interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeMailSendInterval', '')])),
('day', (YLeaf(YType.uint32, 'day'), ['int'])),
('weekday', (YLeaf(YType.enumeration, 'weekday'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_call_home_cfg', 'CallHomeDayOfWeek', '')])),
('hour', (YLeaf(YType.uint32, 'hour'), ['int'])),
('minute', (YLeaf(YType.uint32, 'minute'), ['int'])),
])
self.interval = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self._segment_path = lambda: "periodic"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory.Periodic, ['interval', 'day', 'weekday', 'hour', 'minute'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory.Periodic']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Inventory']['meta_info']
class Crash(_Entity_):
"""
Crash info
.. attribute:: subscribe
Subscribe crash group
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'call-home-cfg'
_revision = '2018-07-24'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(CallHome.Profiles.Profile.SubscribeAlertGroup.Crash, self).__init__()
self.yang_name = "crash"
self.yang_parent_name = "subscribe-alert-group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('subscribe', (YLeaf(YType.empty, 'subscribe'), ['Empty'])),
])
self.subscribe = None
self._segment_path = lambda: "crash"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CallHome.Profiles.Profile.SubscribeAlertGroup.Crash, ['subscribe'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_call_home_cfg as meta
return meta._meta_table['CallHome.Profiles.Profile.SubscribeAlertGroup.Crash']['meta_info']
class Syslogs(_Entity_):
"""
syslog info
.. attribute:: syslog
Syslog | |
import numpy as np
import matplotlib.pyplot as plt
from os import makedirs
from os.path import isfile, exists
from scipy.constants import mu_0
# from numba import njit
def calcDipolMomentAnalytical(remanence, volume):
""" Calculating the magnetic moment from the remanence in T and the volume in m^3"""
m = remanence * volume / mu_0 # [A * m^2]
return m
def plotSimple(data, FOV, fig, ax, cbar=True, **args):
""" Generate simple colorcoded plot of 2D grid data with contour. Returns axes object."""
im = ax.imshow(data, extent=FOV, origin="lower", **args)
cs = ax.contour(data, colors="k", extent=FOV, origin="lower", linestyles="dotted")
class nf(float):
def __repr__(self):
s = f"{self:.1f}"
return f"{self:.0f}" if s[-1] == "0" else s
cs.levels = [nf(val) for val in cs.levels]
if plt.rcParams["text.usetex"]:
fmt = r"%r"
else:
fmt = "%r"
ax.clabel(cs, cs.levels, inline=True, fmt=fmt, fontsize=10)
if cbar == True:
fig.colorbar(im, ax=ax)
return im
def centerCut(field, axis):
"""return a slice of the data at the center for the specified axis"""
dims = np.shape(field)
return np.take(field, indices=int(dims[axis] / 2), axis=axis)
def isHarmonic(field, sphericalMask, shellMask):
"""Checks if the extrema of the field are in the shell."""
fullField = np.multiply(field, sphericalMask) # [T]
reducedField = np.multiply(field, shellMask)
if int(ptpPPM(fullField)) > int(ptpPPM(reducedField)):
print(
"ptpPPM of field:",
ptpPPM(fullField),
"ptpPPM on surface",
ptpPPM(reducedField),
)
print("Masked field is NOT a harmonic function...")
return False
else:
print(
"ptpPPM of field:",
ptpPPM(fullField),
"ptpPPM on surface",
ptpPPM(reducedField),
)
print("Masked field is harmonic.")
sizeSpherical = int(np.nansum(sphericalMask))
sizeShell = int(np.nansum(shellMask))
print(
"Reduced size of field from {} to {} ({}%)".format(
sizeSpherical, sizeShell, int(100 * sizeShell / sizeSpherical)
)
)
return True
def genQmesh(field, resolution):
"""Generate a mesh of quadratic coordinates"""
mask = np.zeros(np.shape(field))
xAxis = np.linspace(
-(np.size(field, 0) - 1) * resolution / 2,
(np.size(field, 0) - 1) * resolution / 2,
np.size(field, 0),
)
yAxis = np.linspace(
-(np.size(field, 1) - 1) * resolution / 2,
(np.size(field, 1) - 1) * resolution / 2,
np.size(field, 1),
)
zAxis = np.linspace(
-(np.size(field, 2) - 1) * resolution / 2,
(np.size(field, 2) - 1) * resolution / 2,
np.size(field, 2),
)
xAxis, yAxis, zAxis = np.meshgrid(xAxis, yAxis, zAxis)
xAxisSquare = np.square(xAxis)
yAxisSquare = np.square(yAxis)
zAxisSquare = np.square(zAxis)
return mask, xAxisSquare, yAxisSquare, zAxisSquare
def genMask(
field, resolution, diameter=False, shellThickness=False, axis=False, debug=False
):
"""Generate a mask for a spherical shell"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if (shellThickness != False) and (diameter != False):
if debug == True:
print(
"Creating shell mask. (resolution = {}, diameter = {}, shellThickness = {})".format(
resolution, diameter, shellThickness
)
)
print("The shell is added inside the sphere surface!")
rAxisSquare = xAxisSquare + yAxisSquare + zAxisSquare
innerRadiusSquare = (diameter / 2 - shellThickness) ** 2
outerRadiusSquare = (diameter / 2) ** 2
mask[
(rAxisSquare <= outerRadiusSquare) & (rAxisSquare >= innerRadiusSquare)
] = 1
mask[mask == 0] = "NaN"
return mask
def genSphericalMask(field, diameter, resolution):
"""generate spherical mask
with >>diameter<<
for a >>field<< and a given >>resolution<<
"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
mask[xAxisSquare + yAxisSquare + zAxisSquare <= (diameter / 2) ** 2] = 1
mask[mask == 0] = "NaN"
return mask
def genSliceMask(field, diameter, resolution, axis="x"):
"""generate mask for a circular slice
with >>diameter<<
for a >>field<< and a given >>resolution<<
Every input variable has to have the same unit (mm or m or ...)
"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if axis == "z":
mask[
(xAxisSquare + yAxisSquare <= (diameter / 2) ** 2) & (zAxisSquare == 0)
] = 1
if axis == "y":
mask[
(xAxisSquare + zAxisSquare <= (diameter / 2) ** 2) & (yAxisSquare == 0)
] = 1
if axis == "x":
mask[
(yAxisSquare + zAxisSquare <= (diameter / 2) ** 2) & (xAxisSquare == 0)
] = 1
mask[mask == 0] = "NaN"
return mask
def genEllipseSliceMask(field, a, b, resolution, axis="x"):
"""generate mask for a circulat slice
with >>diameter<<
for a >>field<< and a given >>resolution<<
Every input variable has to have the same unit (mm or m or ...)
"""
# generate spherical mask
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if axis == "z":
mask[
(xAxisSquare / (a / 2) ** 2 + yAxisSquare / (b / 2) ** 2 <= 1)
& (zAxisSquare == 0)
] = 1
elif axis == "y":
mask[
(xAxisSquare / (a / 2) ** 2 + zAxisSquare / (b / 2) ** 2 <= 1)
& (yAxisSquare == 0)
] = 1
elif axis == "x":
mask[
(yAxisSquare / (a / 2) ** 2 + zAxisSquare / (b / 2) ** 2 <= 1)
& (xAxisSquare == 0)
] = 1
mask[mask == 0] = "NaN"
return mask
def ptpPPM(field):
"""Calculate the peak-to-peak homogeneity in ppm."""
return 1e6 * (np.nanmax(field) - np.nanmin(field)) / np.nanmean(field)
def saveParameters(parameters, folder):
"""Saving a dict to the file parameters.npy .
If the file exist it is beeing updated, if the parameters are not stored already.
__future__: Fix usecase: Some parameters are in dict which are identical to the
stored ones and some are new!
"""
try:
print("Saving parameters to file...", end=" ")
print("\x1b[6;30;42m", *parameters.keys(), "\x1b[0m", end=" ")
oldParameters = loadParameters(folder)
if parameters.items() <= oldParameters.items():
print(" ... the parameters are already saved and identical.")
elif set(parameters).issubset(
set(oldParameters)
): # here just keys are compared!
print(
" ...\x1b[6;37;41m"
+ " parameters are NOT saved. Other parameters are stored. Please cleanup! "
+ "\x1b[0m"
)
else:
oldParameters.update(parameters)
np.save(folder + "/parameters", oldParameters)
print(" ... added.")
except FileNotFoundError or AttributeError:
np.save(folder + "/parameters", parameters)
oldParameters = parameters
# print('The following parameters are currently stored:\n', *oldParameters.keys())
def loadParameters(folder):
return np.load(folder + "/parameters.npy", allow_pickle=True).item()
def loadParameter(key, folder):
return loadParameters(folder)[key]
def displayParameters(folder):
print(loadParameters(folder))
def createShimfieldsShimRingV2(
numMagnets=(32, 44),
rings=4,
radii=(0.074, 0.097),
zRange=(-0.08, -0.039, 0.039, 0.08),
resolution=1000,
kValue=2,
simDimensions=(0.04, 0.04, 0.04),
numRotations=2,
):
""" Calculating the magnetic field distributions for a single or multiple Halbach Rings.
This has to be multiplied with the magnetic moment amplitude of a magnet to get the real distribution
For every magnet position we set 4 different rotations: 0°, 45°, 90°, 135°. This has to be considered in the cost function
otherwise two magnets are placed in one position
resolution is the amount of sample points times data points in one dimension
"""
mu = mu_0
# positioning of the magnets in a circle
if len(zRange) == 2:
rings = np.linspace(zRange[0], zRange[1], rings)
elif rings == len(zRange):
rings = np.array(zRange)
else:
print("No clear definition how to place shims...")
rotation_elements = np.linspace(0, np.pi, numRotations, endpoint=False)
# create array to store field data
count = 0
if type(numMagnets) in (list, tuple):
totalNumMagnets = np.sum(numMagnets) * np.size(rings) * numRotations
else:
totalNumMagnets = numMagnets * np.size(rings) * numRotations * len(radii)
print(totalNumMagnets, numMagnets, np.size(rings), np.size(numRotations))
shimFields = np.zeros(
(
int(simDimensions[0] * resolution) + 1,
int(simDimensions[1] * resolution) + 1,
int(simDimensions[2] * resolution) + 1,
3,
totalNumMagnets,
),
dtype=np.float32,
)
for rotation in rotation_elements:
# create halbach array
for row in rings:
for i, radius in enumerate(radii):
angle_elements = np.linspace(
-np.pi, np.pi, numMagnets[i], endpoint=False
)
for angle in angle_elements:
print(
"Simulating magnet "
+ str(count + 1)
+ " of "
+ str(totalNumMagnets),
end="\t",
)
position = (row, radius * np.cos(angle), radius * np.sin(angle))
print(
"@ position {:2.2},\t {:2.2},\t {:2.2}".format(*position),
end="\r",
)
angle = kValue * angle + rotation
dip_vec = [0, np.sin(angle), -np.cos(angle)]
dip_vec = np.multiply(dip_vec, mu)
dip_vec = np.divide(dip_vec, 4 * np.pi)
# create mesh coordinates
x = np.linspace(
-simDimensions[0] / 2 + position[0],
simDimensions[0] / 2 + position[0],
int(simDimensions[0] * resolution) + 1,
dtype=np.float32,
)
y = np.linspace(
-simDimensions[1] / 2 + position[1],
simDimensions[1] / 2 + position[1],
int(simDimensions[1] * resolution) + 1,
dtype=np.float32,
)
z = np.linspace(
-simDimensions[2] / 2 + position[2],
simDimensions[2] / 2 + position[2],
int(simDimensions[2] * resolution) + 1,
dtype=np.float32,
)
x, y, z = np.meshgrid(x, y, z)
vec_dot_dip = 3 * (y * dip_vec[1] + z * dip_vec[2])
# calculate the distance of each mesh point to magnet, optimised for speed
# for improved memory performance move in to b0 calculations
vec_mag = np.square(x) + np.square(y) | |
= histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=errors_squared)
if access_attributes_which_are_stored:
# This attribute will be stored (but under "_x"), so we want to make sure that it
# doesn't disrupt the equality comparison.
h1.x
if not test_equality:
h1.bin_edges = np.array([5, 6, 7, 8, 9])
if test_equality:
assert h1 == h2
else:
assert h1 != h2
@dataclass
class HistInfo:
"""Convenience for storing hist testing information.
Could reuse the ``Histogram1D`` object, but since we're testing it here, it seems better to use
a separate object.
"""
y: np.array
errors_squared: np.array
def convert_to_histogram_1D(self, bin_edges: np.array) -> histogram.Histogram1D:
""" Convert these stored values into a ``Histogram1D``. """
return histogram.Histogram1D(
bin_edges=bin_edges,
y=self.y,
errors_squared=self.errors_squared,
)
def convert_to_ROOT_hist(self, bin_edges: np.array) -> Hist:
"""Convert these stored values in a ROOT.TH1F.
This isn't very robust, which is why I'm not including it in ``Histogram1D``. However,
something simple is sufficient for our purposes here.
"""
import ROOT
hist = ROOT.TH1F("tempHist", "tempHist", len(bin_edges) - 1, bin_edges.astype(float))
hist.Sumw2()
# Exclude under- and overflow
for i, (val, error_squared) in enumerate(zip(self.y, self.errors_squared), start=1):
# ROOT hists are 1-indexed.
hist.SetBinContent(i, val)
hist.SetBinError(i, np.sqrt(error_squared))
return hist
class TestHistogramOperators:
"""Test ``Histogram1D`` operators.
In principle, we could refactor all of the tests by explicitly calling
the functions. But since the expected values are different for each test,
and the test code itself is very simple, there doesn't seem to be much point.
"""
_bin_edges = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
_filled_two_times = HistInfo(np.array([0, 0, 2.0, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 2.0, 0, 0, 0, 0, 0, 0, 0]))
_filled_four_times = HistInfo(
np.array([0, 0, 4.0, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 4.0, 0, 0, 0, 0, 0, 0, 0])
)
_filled_once_with_weight_of_2 = HistInfo(
np.array([0, 0, 2.0, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 4.0, 0, 0, 0, 0, 0, 0, 0])
)
_filled_twice_with_weight_of_2 = HistInfo(
np.array([0, 0, 4.0, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 8.0, 0, 0, 0, 0, 0, 0, 0])
)
@pytest.fixture(
params=[ # type: ignore
(
_filled_two_times,
_filled_four_times,
HistInfo(np.array([0, 0, 6, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 6, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_two_times,
_filled_twice_with_weight_of_2,
HistInfo(np.array([0, 0, 6, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 10, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_once_with_weight_of_2,
_filled_twice_with_weight_of_2,
HistInfo(np.array([0, 0, 6, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 12, 0, 0, 0, 0, 0, 0, 0])),
),
],
ids=["Standard filled", "One standard, one weighted", "Two weighted"],
)
def setup_addition(self, request: Any, logging_mixin: Any) -> Tuple[Any, ...]:
"""We want to share this parametrization between multiple tests, so we define it as a fixture.
However, each test performs rather different steps, so there is little else to do here.
Returns:
Tuple[HistInfo, HistInfo, HistInfo, histogram.Histogram1D, histogram.Histogram1D] . Recorded here
because mypy struggles with the typing information here.
"""
# Setup
h1 = request.param[0].convert_to_histogram_1D(bin_edges=self._bin_edges)
h2 = request.param[1].convert_to_histogram_1D(bin_edges=self._bin_edges)
return (*request.param, h1, h2)
def test_addition(self, setup_addition: Any) -> None:
""" Test addition in ``Histogram1D``. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_addition
# Operation
h3 = h1 + h2
# Check result
assert np.allclose(h3.bin_edges, self._bin_edges)
assert np.allclose(h3.y, expected.y)
assert np.allclose(h3.errors_squared, expected.errors_squared)
@pytest.mark.ROOT # type: ignore
def test_compare_addition_to_ROOT(self, setup_addition: Any) -> None:
""" Compare the result of ``Histogram1D`` addition vs ROOT. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_addition
h1_root = h1_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
h2_root = h2_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
# Operation
h3 = h1 + h2
h1_root.Add(h2_root)
# Check result
assert check_hist(h1_root, h3)
def test_sum_function(self, setup_addition: Any) -> None:
""" Test addition using sum(...) with ``Histogram1D``. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_addition
# Operation
h3 = sum([h1, h2])
# Check result
# Help out mypy...
assert isinstance(h3, histogram.Histogram1D)
assert np.allclose(h3.bin_edges, self._bin_edges)
assert np.allclose(h3.y, expected.y)
assert np.allclose(h3.errors_squared, expected.errors_squared)
@pytest.fixture(
params=[ # type: ignore
(
_filled_two_times,
_filled_four_times,
HistInfo(np.array([0, 0, 2, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 6, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_two_times,
_filled_twice_with_weight_of_2,
HistInfo(np.array([0, 0, 2, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 10, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_once_with_weight_of_2,
_filled_twice_with_weight_of_2,
HistInfo(np.array([0, 0, 2, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 12, 0, 0, 0, 0, 0, 0, 0])),
),
],
ids=["Standard filled", "One standard, one weighted", "Two weighted"],
)
def setup_subtraction(self, request: Any, logging_mixin: Any) -> Tuple[Any, ...]:
"""We want to share this parametrization between multiple tests, so we define it as a fixture.
However, each test performs rather different steps, so there is little else to do here.
"""
# Setup
h1 = request.param[0].convert_to_histogram_1D(bin_edges=self._bin_edges)
h2 = request.param[1].convert_to_histogram_1D(bin_edges=self._bin_edges)
return (*request.param, h1, h2)
def test_subtraction(self, setup_subtraction: Any) -> None:
""" Test subtraction. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_subtraction
# Operation
h3 = h2 - h1
# Check result
assert np.allclose(h3.bin_edges, self._bin_edges)
assert np.allclose(h3.y, expected.y)
assert np.allclose(h3.errors_squared, expected.errors_squared)
@pytest.mark.ROOT # type: ignore
def test_compare_subtraction_to_ROOT(self, setup_subtraction: Any) -> None:
""" Compare the result of ``Histogram1D`` subtraction vs ROOT. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_subtraction
h1_root = h1_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
h2_root = h2_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
# Operation
h3 = h2 - h1
h2_root.Add(h1_root, -1)
# Check result
assert check_hist(h2_root, h3)
@pytest.fixture(
params=[ # type: ignore
(
_filled_two_times,
_filled_four_times,
HistInfo(np.array([0, 0, 8, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 48, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_two_times,
_filled_twice_with_weight_of_2,
HistInfo(np.array([0, 0, 8, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 64, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_once_with_weight_of_2,
_filled_twice_with_weight_of_2,
HistInfo(np.array([0, 0, 8, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 96, 0, 0, 0, 0, 0, 0, 0])),
),
],
ids=["Standard filled", "One standard, one weighted", "Two weighted"],
)
def setup_multiplication(self, request: Any, logging_mixin: Any) -> Tuple[Any, ...]:
"""We want to share this parametrization between multiple tests, so we define it as a fixture.
However, each test performs rather different steps, so there is little else to do here.
"""
# Setup
h1 = request.param[0].convert_to_histogram_1D(bin_edges=self._bin_edges)
h2 = request.param[1].convert_to_histogram_1D(bin_edges=self._bin_edges)
return (*request.param, h1, h2)
def test_multiplication(self, setup_multiplication: Any) -> None:
""" Test multiplication. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_multiplication
# Operation
h3 = h2 * h1
# Check result
assert np.allclose(h3.bin_edges, self._bin_edges)
assert np.allclose(h3.y, expected.y)
assert np.allclose(h3.errors_squared, expected.errors_squared)
@pytest.mark.ROOT # type: ignore
def test_compare_multiplication_to_ROOT(self, setup_multiplication: Any) -> None:
""" Compare the result of ``Histogram1D`` multiplication vs ROOT. """
# Setup
h1_info, h2_info, expected, h1, h2 = setup_multiplication
h1_root = h1_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
h2_root = h2_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
# Operation
h3 = h2 * h1
h2_root.Multiply(h1_root)
# Check result
assert check_hist(h2_root, h3)
@pytest.fixture(
params=[ # type: ignore
(
_filled_two_times,
3,
HistInfo(np.array([0, 0, 6, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 18, 0, 0, 0, 0, 0, 0, 0])),
),
(
_filled_twice_with_weight_of_2,
3,
HistInfo(np.array([0, 0, 12, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 72, 0, 0, 0, 0, 0, 0, 0])),
),
],
ids=["Standard filled", "Weighing filled"],
)
def setup_scalar_multiplication(self, logging_mixin: Any, request: Any) -> Tuple[Any, ...]:
"""We want to share this parametrization between multiple tests, so we define it as a fixture.
However, each test performs rather different steps, so there is little else to do here.
"""
# Setup
h1 = request.param[0].convert_to_histogram_1D(bin_edges=self._bin_edges)
return (*request.param, h1)
def test_scalar_multiplication(self, setup_scalar_multiplication: Any) -> None:
""" Test scalar multiplication. """
# Setup
h1_info, scalar, expected, h1 = setup_scalar_multiplication
# Operation
h3 = h1 * scalar
# Check result
assert np.allclose(h3.bin_edges, self._bin_edges)
assert np.allclose(h3.y, expected.y)
assert np.allclose(h3.errors_squared, expected.errors_squared)
@pytest.mark.ROOT # type: ignore
def test_compare_scalar_multiplication_to_ROOT(self, setup_scalar_multiplication: Any) -> None:
""" Compare the results of ``Histogram1D`` multiplication vs ROOT. """
# Setup
h1_info, scalar, expected, h1 = setup_scalar_multiplication
h1_root = h1_info.convert_to_ROOT_hist(bin_edges=self._bin_edges)
# Operation
h3 = h1 * scalar
h1_root.Scale(scalar)
# Check result
assert check_hist(h1_root, h3)
@pytest.fixture(
params=[ # type: ignore
(
_filled_two_times,
_filled_four_times,
HistInfo(np.array([0, 0, 2, 0, 0, 0, 0, | |
<filename>silx/gui/icons.py<gh_stars>10-100
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Set of icons for buttons.
Use :func:`getQIcon` to create Qt QIcon from the name identifying an icon.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "07/01/2019"
import os
import logging
import weakref
from . import qt
import silx.resources
from silx.utils import weakref as silxweakref
from silx.utils.deprecation import deprecated
_logger = logging.getLogger(__name__)
"""Module logger"""
_cached_icons = None
"""Cache loaded icons in a weak structure"""
def getIconCache():
"""Get access to all cached icons
:rtype: dict
"""
global _cached_icons
if _cached_icons is None:
_cached_icons = weakref.WeakValueDictionary()
# Clean up the cache before leaving the application
# See https://github.com/silx-kit/silx/issues/1771
qt.QApplication.instance().aboutToQuit.connect(cleanIconCache)
return _cached_icons
def cleanIconCache():
"""Clean up the icon cache"""
_logger.debug("Clean up icon cache")
_cached_icons.clear()
_supported_formats = None
"""Order of file format extension to check"""
class AbstractAnimatedIcon(qt.QObject):
"""Store an animated icon.
It provides an event containing the new icon everytime it is updated."""
def __init__(self, parent=None):
"""Constructor
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
qt.QObject.__init__(self, parent)
self.__targets = silxweakref.WeakList()
self.__currentIcon = None
iconChanged = qt.Signal(qt.QIcon)
"""Signal sent with a QIcon everytime the animation changed."""
def register(self, obj):
"""Register an object to the AnimatedIcon.
If no object are registred, the animation is paused.
Object are stored in a weaked list.
:param object obj: An object
"""
if obj not in self.__targets:
self.__targets.append(obj)
self._updateState()
def unregister(self, obj):
"""Remove the object from the registration.
If no object are registred the animation is paused.
:param object obj: A registered object
"""
if obj in self.__targets:
self.__targets.remove(obj)
self._updateState()
def hasRegistredObjects(self):
"""Returns true if any object is registred.
:rtype: bool
"""
return len(self.__targets)
def isRegistered(self, obj):
"""Returns true if the object is registred in the AnimatedIcon.
:param object obj: An object
:rtype: bool
"""
return obj in self.__targets
def currentIcon(self):
"""Returns the icon of the current frame.
:rtype: qt.QIcon
"""
return self.__currentIcon
def _updateState(self):
"""Update the object according to the connected objects."""
pass
def _setCurrentIcon(self, icon):
"""Store the current icon and emit a `iconChanged` event.
:param qt.QIcon icon: The current icon
"""
self.__currentIcon = icon
self.iconChanged.emit(self.__currentIcon)
class MovieAnimatedIcon(AbstractAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated."""
def __init__(self, filename, parent=None):
"""Constructor
:param str filename: An icon name to an animated format
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
AbstractAnimatedIcon.__init__(self, parent)
qfile = getQFile(filename)
self.__movie = qt.QMovie(qfile.fileName(), qt.QByteArray(), parent)
self.__movie.setCacheMode(qt.QMovie.CacheAll)
self.__movie.frameChanged.connect(self.__frameChanged)
self.__cacheIcons = {}
self.__movie.jumpToFrame(0)
self.__updateIconAtFrame(0)
def __frameChanged(self, frameId):
"""Callback everytime the QMovie frame change
:param int frameId: Current frame id
"""
self.__updateIconAtFrame(frameId)
def __updateIconAtFrame(self, frameId):
"""
Update the current stored QIcon
:param int frameId: Current frame id
"""
if frameId in self.__cacheIcons:
icon = self.__cacheIcons[frameId]
else:
icon = qt.QIcon(self.__movie.currentPixmap())
self.__cacheIcons[frameId] = icon
self._setCurrentIcon(icon)
def _updateState(self):
"""Update the movie play according to internal stat of the
AnimatedIcon."""
self.__movie.setPaused(not self.hasRegistredObjects())
class MultiImageAnimatedIcon(AbstractAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated."""
def __init__(self, filename, parent=None):
"""Constructor
:param str filename: An icon name to an animated format
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
AbstractAnimatedIcon.__init__(self, parent)
self.__frames = []
for i in range(100):
try:
frame_filename = os.sep.join((filename, ("%02d" %i)))
frame_file = getQFile(frame_filename)
except ValueError:
break
try:
icon = qt.QIcon(frame_file.fileName())
except ValueError:
break
self.__frames.append(icon)
if len(self.__frames) == 0:
raise ValueError("Animated icon '%s' do not exists" % filename)
self.__frameId = -1
self.__timer = qt.QTimer(self)
self.__timer.timeout.connect(self.__increaseFrame)
self.__updateIconAtFrame(0)
def __increaseFrame(self):
"""Callback called every timer timeout to change the current frame of
the animation
"""
frameId = (self.__frameId + 1) % len(self.__frames)
self.__updateIconAtFrame(frameId)
def __updateIconAtFrame(self, frameId):
"""
Update the current stored QIcon
:param int frameId: Current frame id
"""
self.__frameId = frameId
icon = self.__frames[frameId]
self._setCurrentIcon(icon)
def _updateState(self):
"""Update the object to wake up or sleep it according to its use."""
if self.hasRegistredObjects():
if not self.__timer.isActive():
self.__timer.start(100)
else:
if self.__timer.isActive():
self.__timer.stop()
class AnimatedIcon(MovieAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated.
It may not be available anymore for the silx release 0.6.
.. deprecated:: 0.5
Use :class:`MovieAnimatedIcon` instead.
"""
@deprecated
def __init__(self, filename, parent=None):
MovieAnimatedIcon.__init__(self, filename, parent=parent)
def getWaitIcon():
"""Returns a cached version of the waiting AbstractAnimatedIcon.
:rtype: AbstractAnimatedIcon
"""
return getAnimatedIcon("process-working")
def getAnimatedIcon(name):
"""Create an AbstractAnimatedIcon from a resource name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
Try to load a mng or a gif file, then try to load a multi-image animated
icon.
In Qt5 mng or gif are not used, because the transparency is not very well
managed.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding AbstractAnimatedIcon
:raises: ValueError when name is not known
"""
key = name + "__anim"
cached_icons = getIconCache()
if key not in cached_icons:
qtMajorVersion = int(qt.qVersion().split(".")[0])
icon = None
# ignore mng and gif in Qt5
if qtMajorVersion != 5:
try:
icon = MovieAnimatedIcon(name)
except ValueError:
icon = None
if icon is None:
try:
icon = MultiImageAnimatedIcon(name)
except ValueError:
icon = None
if icon is None:
raise ValueError("Not an animated icon name: %s", name)
cached_icons[key] = icon
else:
icon = cached_icons[key]
return icon
def getQIcon(name):
"""Create a QIcon from its name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QIcon
:raises: ValueError when name is not known
"""
cached_icons = getIconCache()
if name not in cached_icons:
qfile = getQFile(name)
icon = qt.QIcon(qfile.fileName())
cached_icons[name] = icon
else:
icon = cached_icons[name]
return icon
def getQPixmap(name):
"""Create a QPixmap from its name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QPixmap
:raises: ValueError when name is not known
"""
qfile = getQFile(name)
return qt.QPixmap(qfile.fileName())
def getQFile(name):
"""Create a QFile from an icon name. Filename is found
according to supported Qt formats.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
| |
import json
import logging
import re
from typing import Iterable, List, Optional, Type, TypeVar
from bs4 import BeautifulSoup
from sqlalchemy.orm import Session
from fallen_london_chronicler.images import get_or_cache_image, ImageType
from fallen_london_chronicler.model import Area, AreaType, Storylet, \
StoryletCategory, StoryletObservation, StoryletDistribution, \
StoryletFrequency, StoryletUrgency, Branch, BranchObservation, Challenge, \
ChallengeNature, ChallengeType, \
record_observation, Quality, QualityNature, \
BranchQualityRequirement, OutcomeObservation, OutcomeMessage, \
OutcomeMessageType, QualityRequirement, StoryletQualityRequirement, \
Setting, User, UserPossession
from fallen_london_chronicler.model.storylet import StoryletStickiness
from fallen_london_chronicler.model.utils import pairwise
from fallen_london_chronicler.schema import StoryletInfo, AreaInfo, \
BranchInfo, ChallengeInfo, QualityRequirementInfo, \
StoryletBranchOutcomeInfo, StoryletBranchOutcomeMessageInfo, PossessionInfo
from fallen_london_chronicler.schema.setting import SettingInfo
from fallen_london_chronicler.schema.storylet import CardInfo
from fallen_london_chronicler.utils import match_any
TOOLTIPS_NONE = (
re.compile(
r"^You unlocked this by not having any "
r"<span class='quality-name'>(?P<quality>.+)</span>$"
),
re.compile(
r"^You can't do this when you have "
r"<span class='quality-name'>(?P<quality>.+)</span>$"
),
re.compile(
r"^Unlocked when you do not have "
r"<span class='quality-name'>(?P<quality>.+)</span>$"
),
re.compile(
r"^You unlocked this by having no "
r"<span class='quality-name'>(?P<quality>.+)</span>$"
)
)
TOOLTIPS_AT_LEAST_ONE = (
re.compile(
r"^You need (?:an? )?<span class='quality-name'>(?P<quality>.+)</span>$"
),
re.compile(
r"^Unlocked when you have <span class='quality-name'>(?P<quality>.+)"
r"</span>$"
),
re.compile(
r"^You need to be <span class='quality-name'>(?P<quality>.+)</span> "
r"someone$"
),
re.compile(
r"^You unlocked this with (?:an? )?"
r"<span class='quality-name'>(?P<quality>.+)</span> "
r"<em>\(you have (?P<current>\d+) in all\)</em>$"
),
re.compile(
r"^You can't do this when you have any "
r"<span class='quality-name'>(?P<quality>.+)</span>$"
),
re.compile(r"^You must be (?P<quality>.+)\.$"),
re.compile(r"^This is unlocked because you have the (?P<quality>.+)\.$")
)
TOOLTIPS_MINIMUM = (
re.compile(
r"^You unlocked this with "
r"<span class='quality-name'>(?P<quality>.+)</span> (?P<current>\d+) "
r"<em>\(you needed (?P<quantity_min>\d+)\)</em>$"
),
re.compile(
r"^You unlocked this with (?P<current>\d+) "
r"<span class='quality-name'>(?P<quality>.+)</span> "
r"<em>\(you needed (?P<quantity_min>\d+)\)</em>$"
),
re.compile(
r"^You need <span class='quality-name'>(?P<quality>.+)</span> "
r"(?P<quantity_min>\d+)<em> \(you have (?P<current>\d+)\)</em>$"
),
re.compile(
r"^You need (?P<quantity_min>\d+) "
r"<span class='quality-name'>(?P<quality>.+)</span> "
r"<em>\(you have (?P<current>\d+)\)</em>$"
),
re.compile(
r"^You need (?P<quantity_min>\d+) "
r"<span class='quality-name'>(?P<quality>.+)</span>$"
),
re.compile(
r"^You need <span class='quality-name'>(?P<quality>.+)</span> "
r"(?P<quantity_min>\d+)$"
),
)
TOOLTIPS_MAXIMUM = (
re.compile(
r"^You can't do this when you have <span class='quality-name'>"
r"(?P<quality>.+)</span> higher than (?P<quantity_max>\d+)"
r"<em> \(you have (?P<current>\d+)\)</em>$"
),
re.compile(
r"^You unlocked this with "
r"<span class='quality-name'>(?P<quality>.+)</span> (?P<current>\d+) "
r"<em>\(you needed (?P<quantity_max>\d+) at most\)</em>$"
),
re.compile(
r"^You unlocked this by not having "
r"<span class='quality-name'>(?P<quality>.+)</span> "
r"<em>\(you needed (?P<quantity_max>\d+) at most\)</em>$"
)
)
TOOLTIPS_EXACTLY = (
re.compile(
r"^You unlocked this with (?:an? )?"
r"<span class='quality-name'>(?P<quality>.+)</span> (?P<current>\d+)"
r"<em> \(you needed exactly (?P<quantity>\d+)\)</em>$"
),
re.compile(
r"^You need exactly "
r"<span class='quality-name'>(?P<quality>.+)</span> (?P<quantity>\d+)"
r"(?:<em> \(you have (?P<current>\d+)\)</em>)?$"
),
re.compile(
r"^You need <span class='quality-name'>(?P<quality>.+)</span> "
r"exactly (?P<quantity>\d+)$"
),
re.compile(
r"^You unlocked this with any "
r"<span class='quality-name'>(?P<quality>.+)</span>"
r"<em> \(you needed exactly (?P<quantity>\d+)\)</em>$"
),
)
TOOLTIPS_RANGE = (
re.compile(
r"^You unlocked this with "
r"<span class='quality-name'>(?P<quality>.+)</span> (?P<current>\d+)"
r"<em> \(you needed (?P<quantity_min>\d+)-(?P<quantity_max>\d+)\)</em>$"
),
re.compile(
r"^You need <span class='quality-name'>(?P<quality>.+)</span> "
r"(?P<quantity_min>\d+)-(?P<quantity_max>\d+)"
r"(?:<em> \(you have (?P<current>\d+)\)</em>)?$"
)
)
TOOLTIPS_WORDY = (
re.compile(
r"^Unlocked when <span class='quality-name'>(?P<quality>.+)</span> is:"
r"<ul class='wordy-list'>(?P<requirements>.+)</ul>$"
),
)
TOOLTIPS_WORDY_ITEM = re.compile(
r"<li(?: class='current')?>(?:<em>)?(.*?)(?:</em>)?</li>"
)
QUALITY_GAIN = (
re.compile(
r"^You've gained (?P<quantity>\d+) x (?P<quality>.+?)"
r"(?: \(new total (?P<new_state>.+)\))?\.$"
),
re.compile(r"^You now have (?P<quantity>\d+) x (?P<quality>.+)\.$"),
re.compile(r"^You've gained (?P<quantity>\d+) x (?P<quality>.+)\.$"),
)
QUALITY_LOSS = (
re.compile(
r"^You've lost (?P<quantity>\d+) x (?P<quality>.+?)"
r"(?: \(new total (?P<new_state>.+)\))?\.$"
),
)
QUALITY_SET_ZERO = (
re.compile(r"^Your '(?P<quality>.+)' Quality has gone!$"),
re.compile(
r"^'(?P<quality>.+)' has been reset: a conclusion, or a "
r"new beginning\?$"
)
)
QUALITY_SET_TO = (
re.compile(
r"^An occurrence! Your '(?P<quality>.+)' Quality "
r"is now (?P<quantity>\d+)!$"
),
)
T = TypeVar("T", bound=QualityRequirement)
def record_area(
session: Session,
area_info: AreaInfo,
setting_id: Optional[int] = None
) -> Area:
area = Area.get_or_create(session, area_info.id)
area.name = area_info.name
area.description = fix_html(area_info.description)
area.image = get_or_cache_image(ImageType.HEADER, area_info.image)
area.type = AreaType(area_info.type)
if setting_id is not None:
setting = Setting.get_or_create(session, setting_id)
if setting not in area.settings:
area.settings.append(setting)
return area
def record_setting(
session: Session,
setting_info: SettingInfo,
area_id: Optional[int] = None
) -> Setting:
setting = Setting.get_or_create(session, setting_info.id)
setting.name = setting_info.name
setting.can_change_outfit = setting_info.canChangeOutfit
setting.can_travel = setting_info.canTravel
setting.is_infinite_draw = setting_info.isInfiniteDraw
setting.items_usable_here = setting_info.itemsUsableHere
if area_id is not None:
area = Area.get_or_create(session, area_id)
if area not in setting.areas:
setting.areas.append(area)
return setting
def record_opportunities(
session: Session,
cards_info: Iterable[CardInfo],
area_id: Optional[int],
setting_id: Optional[int],
) -> List[Storylet]:
return [
record_card(session, card_info, area_id, setting_id)
for card_info in cards_info
]
def record_area_storylets(
session: Session,
area_id: int,
setting_id: int,
storylets_info: Iterable[StoryletInfo]
) -> List[Storylet]:
storylets = [
record_storylet(session, storylet_info, area_id, setting_id)
for storylet_info in storylets_info
]
for storylet in storylets:
storylet.is_top_level = True
# Create ordered pairs to enable a sorted display - we don't know what the
# actual, total order is since we don't have access to every storylet that
# could appear in the area, so let's just create pairwise associations
for before, after in pairwise(storylets):
if after in before.before:
before.before.remove(after)
if after not in before.after:
before.after.append(after)
return storylets
def record_storylet(
session: Session,
storylet_info: StoryletInfo,
area_id: Optional[int],
setting_id: Optional[int],
) -> Storylet:
storylet = Storylet.get_or_create(session, storylet_info.id)
if storylet_info.canGoBack is not None:
storylet.can_go_back = storylet_info.canGoBack
if storylet_info.distribution is not None:
storylet.distribution = StoryletDistribution(
str(storylet_info.distribution)
)
if storylet_info.frequency is not None:
storylet.frequency = StoryletFrequency(storylet_info.frequency)
if storylet_info.urgency is not None:
storylet.urgency = StoryletUrgency(storylet_info.urgency)
storylet.category = StoryletCategory(storylet_info.category)
storylet.image = get_or_cache_image(ImageType.ICON, storylet_info.image)
record_observation(
storylet.observations,
StoryletObservation,
name=storylet_info.name,
description=fix_html(storylet_info.description),
teaser=fix_html(storylet_info.teaser),
quality_requirements=[
record_quality_requirement(
session, StoryletQualityRequirement, quality_requirement_info
)
for quality_requirement_info in storylet_info.qualityRequirements
],
)
if storylet_info.childBranches is not None:
for branch_info in storylet_info.childBranches:
storylet.branches.append(record_branch(session, branch_info))
if area_id is not None:
area = Area.get_or_create(session, area_id)
area.storylets.append(storylet)
if setting_id is not None:
setting = Setting.get_or_create(session, setting_id)
setting.storylets.append(storylet)
return storylet
def record_card(
session: Session,
card_info: CardInfo,
area_id: Optional[int],
setting_id: Optional[int],
) -> Storylet:
storylet = Storylet.get_or_create(session, card_info.eventId)
storylet.category = StoryletCategory(card_info.category)
storylet.image = get_or_cache_image(ImageType.ICON, card_info.image)
storylet.is_card = True
storylet.is_autofire = card_info.isAutofire
storylet.stickiness = StoryletStickiness(card_info.stickiness)
record_observation(
storylet.observations,
StoryletObservation,
name=card_info.name,
teaser=fix_html(card_info.teaser),
quality_requirements=[
record_quality_requirement(
session, StoryletQualityRequirement, quality_requirement_info
)
for quality_requirement_info in card_info.qualityRequirements
],
)
if area_id is not None:
area = Area.get_or_create(session, area_id)
area.storylets.append(storylet)
if setting_id is not None:
setting = Setting.get_or_create(session, setting_id)
setting.storylets.append(storylet)
return storylet
def record_branch(session: Session, branch_info: BranchInfo) -> Branch:
branch = Branch.get_or_create(session, branch_info.id)
branch.action_cost = branch_info.actionCost
branch.button_text = branch_info.buttonText
branch.image = get_or_cache_image(ImageType.ICON, branch_info.image)
branch.ordering = branch_info.ordering
record_observation(
branch.observations,
BranchObservation,
currency_cost=branch_info.currencyCost,
description=fix_html(branch_info.description),
name=branch_info.name,
challenges=[
record_challenge(challenge_info)
for challenge_info in branch_info.challenges
],
quality_requirements=[
record_quality_requirement(
session, BranchQualityRequirement, quality_requirement_info
)
for quality_requirement_info in branch_info.qualityRequirements
],
)
return branch
def record_challenge(challenge_info: ChallengeInfo) -> Challenge:
challenge = Challenge()
challenge.game_id = challenge_info.id
challenge.category = challenge_info.category
challenge.name = challenge_info.name
challenge.description = fix_html(challenge_info.description)
challenge.image = get_or_cache_image(
ImageType.ICON_SMALL, challenge_info.image
)
challenge.target = challenge_info.targetNumber
challenge.nature = ChallengeNature(challenge_info.nature)
challenge.type = ChallengeType(challenge_info.type)
return challenge
def record_quality_requirement(
session: Session,
cls: Type[T],
quality_requirement_info: QualityRequirementInfo
) -> T:
quality_requirement = cls()
quality_requirement.game_id = quality_requirement_info.id
quality_requirement.image = get_or_cache_image(
ImageType.ICON_SMALL, quality_requirement_info.image
)
quality_requirement.is_cost = quality_requirement_info.isCost
# Copy the quality ID for diffing purposes
quality_requirement.quality = record_quality(
session,
game_id=quality_requirement_info.qualityId,
name=quality_requirement_info.qualityName,
category=quality_requirement_info.category,
nature=quality_requirement_info.nature
)
quality_requirement.quality_id = quality_requirement.quality.id
quantity_min = quantity_max = required_values = None
tooltip = quality_requirement_info.tooltip
if match_any(TOOLTIPS_NONE, quality_requirement_info.tooltip):
quantity_max = 0
elif match_any(TOOLTIPS_AT_LEAST_ONE, quality_requirement_info.tooltip):
quantity_min = 1
elif match := match_any(TOOLTIPS_MINIMUM, quality_requirement_info.tooltip):
quantity_min = int(match.group("quantity_min"))
elif match := match_any(TOOLTIPS_MAXIMUM, quality_requirement_info.tooltip):
quantity_max = int(match.group("quantity_max"))
elif match := match_any(TOOLTIPS_EXACTLY, quality_requirement_info.tooltip):
quantity_min = quantity_max = int(match.group("quantity"))
elif match := match_any(TOOLTIPS_RANGE, quality_requirement_info.tooltip):
quantity_min = int(match.group("quantity_min"))
quantity_max = int(match.group("quantity_max"))
elif match := match_any(TOOLTIPS_WORDY, quality_requirement_info.tooltip):
required_values = [
req.replace(r'\"', '"')
for req in TOOLTIPS_WORDY_ITEM.findall(match.group("requirements"))
]
else:
logging.warning(f"Unknown tooltip: {tooltip}")
quality_requirement.fallback_text = fix_html(tooltip)
quality_requirement.required_quantity_min = quantity_min
quality_requirement.required_quantity_max = quantity_max
quality_requirement.required_values = json.dumps(required_values) \
if required_values else None
return quality_requirement
def record_quality(
session: Session,
game_id: int,
name: str,
category: str,
nature: str,
description: Optional[str] = None,
storylet_id: Optional[int] = None,
) -> Quality:
quality = Quality.get_or_create(session, game_id)
quality.name = name
quality.category = category
quality.nature = QualityNature(nature)
if description is not None:
quality.description = description
if storylet_id is not None:
quality.storylet = Storylet.get_or_create(session, storylet_id)
return quality
def record_outcome(
user: User,
session: Session,
branch_id: int,
outcome_info: Optional[StoryletBranchOutcomeInfo],
messages: Optional[List[StoryletBranchOutcomeMessageInfo]],
redirect: Optional[StoryletInfo],
area_id: Optional[int],
setting_id: Optional[int],
) -> OutcomeObservation:
if messages is None:
messages = []
branch = Branch.get_or_create(session, branch_id)
redirect_area = redirect_setting = None
outcome_messages = []
for message_info in messages:
if message_info.message is None:
continue
outcome_message = record_outcome_message(
user, session, message_info
)
outcome_messages.append(outcome_message)
if message_info.area:
redirect_area = record_area(session, message_info.area)
elif message_info.setting:
redirect_setting = record_setting(session, message_info.setting)
# Create associations between areas and settings
# If the player is redirected to both a new area and a setting, the two will
# be linked; otherwise, the old area/setting will be associated with the
# redirect
if redirect_area and redirect_setting:
if redirect_setting not in redirect_area.settings:
redirect_area.settings.append(redirect_setting)
elif redirect_area:
if setting_id is not None:
setting = Setting.get_or_create(session, setting_id)
if | |
source_1)
source_2 = _AppendType(add_type, source_2)
if _UnsignedType(add_type):
self.EmitOp3('uaddw', destination, source_1, source_2)
else:
self.EmitOp3('saddw', destination, source_1, source_2)
def EmitVCvt(self, cvt_to, cvt_from, destination, source):
if cvt_to == 'f32' and cvt_from == 's32':
self.EmitOp2('scvtf', _AppendType('s32', destination),
_AppendType('s32', source))
elif cvt_to == 'f32' and cvt_from == 'u32':
self.EmitOp2('ucvtf', _AppendType('u32', destination),
_AppendType('u32', source))
else:
raise ArgumentError('Convert not supported, to: %s from: %s' % (cvt_to,
cvt_from))
def EmitVDup(self, dup_type, destination, source):
if (isinstance(source, _GeneralRegister) or
isinstance(source, _MappedParameter)):
self.EmitOp2('dup', _AppendType(dup_type, destination), _Cast(
_TypeBits(dup_type), source))
else:
self.EmitOp2('dup', _AppendType(dup_type, destination),
_AppendType(dup_type, source))
def EmitVMov(self, mov_type, destination, source):
if isinstance(source, _ImmediateConstant):
self.EmitOp2('movi', _AppendType(mov_type, destination), source)
elif (isinstance(source, _GeneralRegister) or
isinstance(source, _MappedParameter)):
self.EmitOp2('mov', _AppendType(mov_type, destination), _Cast(
_TypeBits(mov_type), source))
else:
self.EmitOp2('mov', _AppendType(8, destination), _AppendType(8, source))
def EmitVQmovn(self, mov_type, destination, source):
narrow_type = _NarrowType(mov_type)
if destination.register_bits * 2 == source.register_bits:
self.EmitOp2('sqxtn', _AppendType(narrow_type, destination),
_AppendType(mov_type, source))
elif destination.register_bits == source.register_bits:
self.EmitOp2('sqxtn', _AppendType(narrow_type,
_Cast(destination.register_bits / 2,
destination)),
_AppendType(mov_type, source))
def EmitVQmovn2(self, mov_type, destination, source_1, source_2):
narrow_type = _NarrowType(mov_type)
if (destination.register_bits != source_1.register_bits or
destination.register_bits != source_2.register_bits):
raise ArgumentError('Register sizes do not match.')
self.EmitOp2('sqxtn', _AppendType(narrow_type,
_Cast(destination.register_bits / 2,
destination)),
_AppendType(mov_type, source_1))
self.EmitOp2('sqxtn2', _AppendType(narrow_type, destination),
_AppendType(mov_type, source_2))
def EmitVQmovun(self, mov_type, destination, source):
narrow_type = _NarrowType(mov_type)
if destination.register_bits * 2 == source.register_bits:
self.EmitOp2('sqxtun', _AppendType(narrow_type, destination),
_AppendType(mov_type, source))
elif destination.register_bits == source.register_bits:
self.EmitOp2('sqxtun', _AppendType(narrow_type,
_Cast(destination.register_bits / 2,
destination)),
_AppendType(mov_type, source))
def EmitVMul(self, mul_type, destination, source_1, source_2):
destination, source_1, source_2 = _MakeCompatibleDown(destination, source_1,
source_2)
if _FloatType(mul_type):
self.EmitOp3('fmul', _AppendType(mul_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
else:
self.EmitOp3('mul', _AppendType(mul_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
def EmitVMulScalar(self, mul_type, destination, source_1, source_2):
self.EmitOp3('mul', _AppendType(mul_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
def EmitVMull(self, mul_type, destination, source_1, source_2):
wide_type = _WideType(mul_type)
if _UnsignedType(mul_type):
self.EmitOp3('umull', _AppendType(wide_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
else:
self.EmitOp3('smull', _AppendType(wide_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
def EmitVPadd(self, add_type, destination, source_1, source_2):
self.EmitOp3('addp', _AppendType(add_type, destination),
_AppendType(add_type, source_1),
_AppendType(add_type, source_2))
def EmitVPaddl(self, add_type, destination, source):
wide_type = _WideType(add_type)
if _UnsignedType(add_type):
self.EmitOp2('uaddlp', _AppendType(wide_type, destination),
_AppendType(add_type, source))
else:
self.EmitOp2('saddlp', _AppendType(wide_type, destination),
_AppendType(add_type, source))
def EmitVPadal(self, add_type, destination, source):
wide_type = _WideType(add_type)
if _UnsignedType(add_type):
self.EmitOp2('uadalp', _AppendType(wide_type, destination),
_AppendType(add_type, source))
else:
self.EmitOp2('sadalp', _AppendType(wide_type, destination),
_AppendType(add_type, source))
def EmitVLoad(self, load_no, load_type, destination, source):
self.EmitVLoadA(load_no, load_type, [destination], source)
def EmitVLoadA(self, load_no, load_type, destinations, source):
if source.dereference_increment:
increment = sum([_LoadStoreSize(destination) for destination in
destinations]) / 8
self.EmitVLoadAPostIncrement(load_no, load_type, destinations, source,
self.ImmediateConstant(increment))
else:
self.EmitVLoadAPostIncrement(load_no, load_type, destinations, source,
None)
def EmitVLoadAPostIncrement(self, load_no, load_type, destinations, source,
increment):
"""Generate assembly to load memory to registers and increment source."""
if len(destinations) == 1 and destinations[0].lane is -1:
destination = '{%s}' % _AppendType(load_type, destinations[0])
if increment:
self.EmitOp3('ld%dr' % load_no, destination, source, increment)
else:
self.EmitOp2('ld%dr' % load_no, destination, source)
return
destination_list = _RegisterList(load_type, destinations)
if increment:
self.EmitOp3('ld%d' % load_no, destination_list, source, increment)
else:
self.EmitOp2('ld%d' % load_no, destination_list, source)
def EmitVLoadAE(self,
load_type,
elem_count,
destinations,
source,
alignment=None):
"""Generate assembly to load an array of elements of given size."""
bits_to_load = load_type * elem_count
min_bits = min([destination.register_bits for destination in destinations])
max_bits = max([destination.register_bits for destination in destinations])
if min_bits is not max_bits:
raise ArgumentError('Cannot mix double and quad loads.')
if len(destinations) * min_bits < bits_to_load:
raise ArgumentError('To few destinations: %d to load %d bits.' %
(len(destinations), bits_to_load))
leftover_loaded = 0
while bits_to_load > 0:
if bits_to_load >= 4 * min_bits:
self.EmitVLoadA(1, 32, destinations[:4],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 4 * min_bits
destinations = destinations[4:]
elif bits_to_load >= 3 * min_bits:
self.EmitVLoadA(1, 32, destinations[:3],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 3 * min_bits
destinations = destinations[3:]
elif bits_to_load >= 2 * min_bits:
self.EmitVLoadA(1, 32, destinations[:2],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 2 * min_bits
destinations = destinations[2:]
elif bits_to_load >= min_bits:
self.EmitVLoad(1, 32, destinations[0],
self.DereferenceIncrement(source, alignment))
bits_to_load -= min_bits
destinations = destinations[1:]
elif bits_to_load >= 64:
self.EmitVLoad(1, 32, _Cast(64, destinations[0]),
self.DereferenceIncrement(source))
bits_to_load -= 64
leftover_loaded += 64
elif bits_to_load >= 32:
self.EmitVLoad(1, 32,
self.Lane(32, destinations[0], leftover_loaded / 32),
self.DereferenceIncrement(source))
bits_to_load -= 32
leftover_loaded += 32
elif bits_to_load >= 16:
self.EmitVLoad(1, 16,
self.Lane(16, destinations[0], leftover_loaded / 16),
self.DereferenceIncrement(source))
bits_to_load -= 16
leftover_loaded += 16
elif bits_to_load is 8:
self.EmitVLoad(1, 8, self.Lane(8, destinations[0], leftover_loaded / 8),
self.DereferenceIncrement(source))
bits_to_load -= 8
leftover_loaded += 8
else:
raise ArgumentError('Wrong leftover: %d' % bits_to_load)
def EmitVLoadE(self, load_type, count, destination, source, alignment=None):
self.EmitVLoadAE(load_type, count, [destination], source, alignment)
def EmitVLoadAllLanes(self, load_no, load_type, destination, source):
new_destination = destination.Copy()
new_destination.lane = -1
new_destination.lane_bits = load_type
self.EmitVLoad(load_no, load_type, new_destination, source)
def EmitPld(self, load_address_register):
self.EmitOp2('prfm', 'pldl1keep', '[%s]' % load_address_register)
def EmitPldOffset(self, load_address_register, offset):
self.EmitOp2('prfm', 'pldl1keep',
'[%s, %s]' % (load_address_register, offset))
def EmitVShl(self, shift_type, destination, source, shift):
self.EmitOp3('sshl', _AppendType(shift_type, destination),
_AppendType(shift_type, source), _AppendType('i32', shift))
def EmitVStore(self, store_no, store_type, source, destination):
self.EmitVStoreA(store_no, store_type, [source], destination)
def EmitVStoreA(self, store_no, store_type, sources, destination):
if destination.dereference_increment:
increment = sum([_LoadStoreSize(source) for source in sources]) / 8
self.EmitVStoreAPostIncrement(store_no, store_type, sources, destination,
self.ImmediateConstant(increment))
else:
self.EmitVStoreAPostIncrement(store_no, store_type, sources, destination,
None)
def EmitVStoreAPostIncrement(self, store_no, store_type, sources, destination,
increment):
source_list = _RegisterList(store_type, sources)
if increment:
self.EmitOp3('st%d' % store_no, source_list, destination, increment)
else:
self.EmitOp2('st%d' % store_no, source_list, destination)
def EmitVStoreAE(self,
store_type,
elem_count,
sources,
destination,
alignment=None):
"""Generate assembly to store an array of elements of given size."""
bits_to_store = store_type * elem_count
min_bits = min([source.register_bits for source in sources])
max_bits = max([source.register_bits for source in sources])
if min_bits is not max_bits:
raise ArgumentError('Cannot mix double and quad stores.')
if len(sources) * min_bits < bits_to_store:
raise ArgumentError('To few destinations: %d to store %d bits.' %
(len(sources), bits_to_store))
leftover_stored = 0
while bits_to_store > 0:
if bits_to_store >= 4 * min_bits:
self.EmitVStoreA(1, 32, sources[:4],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 4 * min_bits
sources = sources[4:]
elif bits_to_store >= 3 * min_bits:
self.EmitVStoreA(1, 32, sources[:3],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 3 * min_bits
sources = sources[3:]
elif bits_to_store >= 2 * min_bits:
self.EmitVStoreA(1, 32, sources[:2],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 2 * min_bits
sources = sources[2:]
elif bits_to_store >= min_bits:
self.EmitVStore(1, 32, sources[0],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= min_bits
sources = sources[1:]
elif bits_to_store >= 64:
self.EmitVStore(1, 32, _Cast(64, sources[0]),
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 64
leftover_stored += 64
elif bits_to_store >= 32:
self.EmitVStore(1, 32, self.Lane(32, sources[0], leftover_stored / 32),
self.DereferenceIncrement(destination))
bits_to_store -= 32
leftover_stored += 32
elif bits_to_store >= 16:
self.EmitVStore(1, 16, self.Lane(16, sources[0], leftover_stored / 16),
self.DereferenceIncrement(destination))
bits_to_store -= 16
leftover_stored += 16
elif bits_to_store >= 8:
self.EmitVStore(1, 8, self.Lane(8, sources[0], leftover_stored / 8),
self.DereferenceIncrement(destination))
bits_to_store -= 8
leftover_stored += 8
else:
raise ArgumentError('Wrong leftover: %d' % bits_to_store)
def EmitVStoreE(self, store_type, count, source, destination, alignment=None):
self.EmitVStoreAE(store_type, count, [source], destination, alignment)
def EmitVStoreOffset(self, store_no, store_type, source, destination, offset):
self.EmitVStoreOffsetA(store_no, store_type, [source], destination, offset)
def EmitVStoreOffsetA(self, store_no, store_type, sources, destination,
offset):
self.EmitOp3('st%d' % store_no, _RegisterList(store_type, sources),
destination, offset)
def EmitVStoreOffsetE(self, store_type, count, source, destination, offset):
if store_type is not 32:
raise ArgumentError('Unsupported store_type: %d' % store_type)
if count == 1:
self.EmitVStoreOffset(1, 32, self.Lane(32, source, 0),
self.Dereference(destination, None), offset)
elif count == 2:
self.EmitVStoreOffset(1, 32, _Cast(64, source),
self.Dereference(destination, None), offset)
elif count == 3:
self.EmitVStore(1, 32, _Cast(64, source),
self.DereferenceIncrement(destination, None))
self.EmitVStoreOffset(1, 32, self.Lane(32, source, 2),
self.Dereference(destination, None), offset)
self.EmitSub(destination, destination, self.ImmediateConstant(8))
elif count == 4:
self.EmitVStoreOffset(1, 32, source, self.Dereference(destination, None),
offset)
else:
raise ArgumentError('To many elements: %d' % count)
def EmitVSumReduce(self, reduce_type, elem_count, reduce_count, destinations,
sources):
"""Generate assembly to perform n-fold horizontal sum reduction."""
if reduce_type is not 'u32':
raise ArgumentError('Unsupported reduce: %s' % reduce_type)
if (elem_count + 3) / 4 > len(destinations):
raise ArgumentError('To few destinations: %d (%d needed)' %
(len(destinations), (elem_count + 3) / 4))
if elem_count * reduce_count > len(sources) * 4:
raise ArgumentError('To few sources: %d' % len(sources))
if reduce_count <= 1:
raise ArgumentError('Unsupported reduce_count: %d' % reduce_count)
sources = [_Cast(128, source) for source in sources]
destinations = [_Cast(128, destination) for destination in destinations]
while reduce_count > 1:
if len(sources) % 2 == 1:
sources.append(sources[-1])
if reduce_count == 2:
for i in range(len(destinations)):
self.EmitVPadd(reduce_type, destinations[i], sources[2 * i],
sources[2 * i + 1])
return
else:
sources_2 = []
for i in range(len(sources) / 2):
self.EmitVPadd(reduce_type, sources[2 * i], sources[2 * i],
sources[2 * i + 1])
sources_2.append(sources[2 * i])
reduce_count /= 2
sources = sources_2
def Dereference(self, value, unused_alignment=None):
| |
%(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if ((__builtin_ia32_pmovmskb128((v16qi) resultslice) != 0xffff)) {
return 0;
}''',
'lt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Find the maximum values.
compslice = %(vmaxinstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if ((__builtin_ia32_pmovmskb128((v16qi) resultslice) != 0xffff)) {
return 0;
}''',
'ne' : '''// Compare for equality.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
}
# param_num_arr
SIMD_x86_uint_num_arr = {
'eq' : '''// Compare the slices.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ge' : '''// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'gt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'le' : '''// Find the maximum values.
compslice = %(vmaxinstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'lt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Find the maximum values.
compslice = %(vmaxinstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ne' : '''// Compare for equality.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
}
# param_arr_arr
SIMD_x86_uint_arr_arr = {
'eq' : '''// Compare the slices.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ge' : '''// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'gt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'le' : '''// Find the maximum values.
compslice = %(vmaxinstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'lt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Find the maximum values.
compslice = %(vmaxinstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ne' : '''// Compare for equality.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
}
# ==============================================================================
# SIMD code for x86. This set covers signed integer operations only.
# param_arr_num
SIMD_x86_int_arr_num = {
'eq' : '''// Compare the slices.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ge' : '''// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Check the results of the SIMD operation.
if ((__builtin_ia32_pmovmskb128((v16qi) resultslice) != 0xffff)) {
return 0;
}''',
'gt' : '''// Compare the slices.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'le' : '''// Compare the slices.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
'lt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Make sure they're not greater than.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
'ne' : '''// Compare for equality.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
}
# param_num_arr
SIMD_x86_int_num_arr = {
'eq' : '''// Compare the slices.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ge' : '''// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'gt' : '''// Compare the slices.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'le' : '''// Compare the slices.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
'lt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Make sure they're not greater than.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
'ne' : '''// Compare for equality.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
}
# param_arr_arr
SIMD_x86_int_arr_arr = {
'eq' : '''// Compare the slices.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'ge' : '''// Find the minimum values.
compslice = %(vmininstr)s(datasliceleft, datasliceright);
// If this is different from our compare parameter, then the test
// has failed.
resultslice = %(veqinstr)s(compslice, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'gt' : '''// Compare the slices.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0xffff)) {
return 0;
}''',
'le' : '''// Compare the slices.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
'lt' : '''// Make sure they're not equal.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}
// Make sure they're not greater than.
resultslice = %(vgtinstr)s(datasliceleft, datasliceright);
// Check the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
'ne' : '''// Compare for equality.
resultslice = %(veqinstr)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(__builtin_ia32_pmovmskb128((v16qi) resultslice) == 0x0000)) {
return 0;
}''',
}
# ==============================================================================
# Templates for x86 SIMD.
x86_unsigned_templates = {'arr_num' : SIMD_x86_uint_arr_num,
'num_arr' : SIMD_x86_uint_num_arr,
'arr_arr' : SIMD_x86_uint_arr_arr}
x86_signed_templates = {'arr_num' : SIMD_x86_int_arr_num,
'num_arr' : SIMD_x86_int_num_arr,
'arr_arr' : SIMD_x86_int_arr_arr}
SIMD_x86_SIMD_templates = {
'b' : x86_signed_templates,
'B' : x86_unsigned_templates,
'h' : x86_signed_templates,
'H' : x86_unsigned_templates,
'i' : x86_signed_templates,
'I' : x86_unsigned_templates,
}
# x86 SIMD attributes.
simdattr_x86 = {
'b' : 'v16qi',
'B' : 'v16qi',
'h' : 'v8hi',
'H' : 'v8hi',
'i' : 'v4si',
'I' : 'v4si',
'f' : 'v4sf',
'd' : 'v2df',
}
# x86 SIMD load instructions.
vldinstr_x86 = {
'b' : '(v16qi) __builtin_ia32_lddqu((char *) ',
'B' : '(v16qi) __builtin_ia32_lddqu((char *) ',
'h' : '(v8hi) __builtin_ia32_lddqu((char *) ',
'H' : '(v8hi) __builtin_ia32_lddqu((char *) ',
'i' : '(v4si) __builtin_ia32_lddqu((char *) ',
'I' : '(v4si) __builtin_ia32_lddqu((char *) ',
'f' : '(v4sf) __builtin_ia32_loadups( ',
'd' : '(v2df) __builtin_ia32_loadupd( ',
}
veqinstr_x86 = {
'b' : '__builtin_ia32_pcmpeqb128',
'B' : '__builtin_ia32_pcmpeqb128',
'h' : '__builtin_ia32_pcmpeqw128',
'H' : '__builtin_ia32_pcmpeqw128',
'i' : '__builtin_ia32_pcmpeqd128',
'I' : '__builtin_ia32_pcmpeqd128',
}
vmininstr_x86 = {
'b' : '__builtin_ia32_pminsb128',
'B' : '__builtin_ia32_pminub128',
'h' : '__builtin_ia32_pminsw128',
'H' : '__builtin_ia32_pminuw128',
'i' : '__builtin_ia32_pminsd128',
'I' : '__builtin_ia32_pminud128',
}
vmaxinstr_x86 = {
'b' : '__builtin_ia32_pmaxsb128',
'B' : '__builtin_ia32_pmaxub128',
'h' : '__builtin_ia32_pmaxsw128',
'H' : '__builtin_ia32_pmaxuw128',
'i' : '__builtin_ia32_pmaxsd128',
'I' : '__builtin_ia32_pmaxud128',
}
vgtinstr_x86 = {
'b' : '__builtin_ia32_pcmpgtb128',
'B' : '',
'h' : '__builtin_ia32_pcmpgtw128',
'H' : '',
'i' : '__builtin_ia32_pcmpgtd128',
'I' : '',
}
# Which compare operations need an additional vector for intermediate results.
# This depends both upon array type and function.
compslice = ', compslice'
compslice_uint_x86 = {
'eq' : '',
'ge' : compslice,
'gt' : compslice,
'le' : compslice,
'lt' : compslice,
'ne' : ''
}
compslice_int_x86 = {
'eq' : '',
'ge' : compslice,
'gt' : '',
'le' : '',
'lt' : '',
'ne' : ''
}
SIMD_x86_compslice = {
'b' : compslice_int_x86,
'B' : compslice_uint_x86,
'h' : compslice_int_x86,
'H' : compslice_uint_x86,
'i' : compslice_int_x86,
'I' : compslice_uint_x86,
}
# ==============================================================================
# SIMD | |
"""
Module that contains many useful utilities
for validating data or function arguments
"""
from typing import Iterable, Union
import warnings
import numpy as np
from my_happy_pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = "argument" if max_arg_count == 1 else "arguments"
raise TypeError(
f"{fname}() takes at most {max_arg_count} {argument} "
f"({actual_arg_count} given)"
)
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or (v1 is None and v2 is not None):
match = False
else:
match = v1 == v2
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = arg_val_dict[key] is compat_args[key]
if not match:
raise ValueError(
f"the '{key}' parameter is not supported in "
f"the pandas implementation of {fname}()"
)
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
Parameters
----------
fname : str
The name of the function being passed the `*args` parameter
args : tuple
The `*args` parameter passed into a function
max_fname_arg_count : int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args : dict
A dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, a dict ensures that the original
order of the keyword arguments is enforced.
Raises
------
TypeError
If `args` contains more values than there are `compat_args`
ValueError
If `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'")
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname : str
The name of the function being passed the `**kwargs` parameter
kwargs : dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: dict
A dictionary of keys that `kwargs` is allowed to
have and their associated default values.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : Purely args validation.
validate_kwargs : Purely kwargs validation.
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(
fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args
)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError(
f"{fname}() got multiple values for keyword argument '{key}'"
)
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError(
f'For argument "{arg_name}" expected type bool, received '
f"type {type(value).__name__}."
)
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""
Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame
args : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO: Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if "axis" in kwargs and any(x in kwargs for x in data._AXIS_TO_AXIS_NUMBER):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = f"{method_name} got multiple values for argument '{arg_name}'"
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get("axis", 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get("axis", 0))
out[axis] = args[0]
elif len(args) == 2:
if "axis" in kwargs:
# Unambiguously wrong
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
raise TypeError(msg)
msg = (
f"Interpreting call\n\t'.{method_name}(a, b)' as "
f"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments | |
<filename>datastore_utils.py
#Copyright July 2021 Ontocord LLC. Licensed under Apache v2 https://www.apache.org/licenses/LICENSE-2.0
#datastore_utils.py
from collections.abc import Iterable
from dataclasses import dataclass, field, fields
from typing import Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Union
from typing import TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from datasets.info import DatasetInfo
from datasets.features import PandasArrayExtensionArray, PandasArrayExtensionDtype, Features, Value, cast_to_python_objects, pandas_types_mapper
from datasets import utils, Dataset
from datasets.splits import NamedSplit
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence
import os
import io
import json
from pathlib import Path
from datasets.utils.typing import PathLike
from datasets.arrow_dataset import transmit_format# , replayable_table_alteration
#from transformers import PreTrainedModel, PretrainedConfig
import copy
import shutil
from datasets.fingerprint import (
fingerprint_transform,
generate_fingerprint,
generate_random_fingerprint,
get_temporary_cache_files_directory,
is_caching_enabled,
update_fingerprint,
)
from datasets.dataset_dict import DatasetDict
from torch import nn
import pickle
import threading
import glob, shutil, os, time
import indexed_gzip as igzip
#import zstandard, io
#from gzip_stream import GZIPCompressedStream
import fsspec.compression
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
import dataset
import six
from six.moves.urllib.parse import parse_qs, urlparse
def is_contiguous(arr):
start = None
prev = None
contiguous=True
for i in arr:
if start is None:
start = i
if prev is None or i == prev+1:
prev = i
continue
contiguous = False
break
return contiguous, start, i+1
class TableExt(dataset.Table):
def find(self, *_clauses, **kwargs):
"""Perform a simple search on the table similar to
dataset.Table's find, except: optionally gets a result only
for specific columns by passing in _columns keyword.
# TODO, full text search
Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus
sign to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (descending by year)
results = table.find(order_by=['country', '-year'])
To perform complex queries with advanced filters or to perform
aggregation, use :py:meth:`db.query() <dataset.Database.query>`
instead.
"""
if not self.exists:
return iter([])
_fts = kwargs.pop('_fts', None)
_columns = kwargs.pop('_columns', None)
_limit = kwargs.pop('_limit', None)
_offset = kwargs.pop('_offset', 0)
order_by = kwargs.pop('order_by', None)
_streamed = kwargs.pop('_streamed', False)
_step = kwargs.pop('_step', QUERY_STEP)
if _step is False or _step == 0:
_step = None
order_by = self._args_to_order_by(order_by)
args = self._args_to_clause(kwargs, clauses=_clauses)
if _fts:
# we could run against a local sqlite database and join manually using a list of id's
res = self.fts_db.executable.execute(f"""SELECT id, rank
FROM {table_name}_idx
WHERE {column} MATCH {fts_q}
ORDER BY rank
LIMIT {_limit}""").fetchall()
if columns is None:
query = self.table.select(whereclause=args,
limit=_limit,
offset=_offset)
else:
query = self.table.select(columns, whereclause=args,
limit=_limit,
offset=_offset)
if len(order_by):
query = query.order_by(*order_by)
conn = self.db.executable
if _streamed:
conn = self.db.engine.connect()
conn = conn.execution_options(stream_results=True)
return ResultIter(conn.execute(query),
row_type=self.db.row_type,
step=_step)
class DatabaseExt(dataset.Database):
"""A DatabaseExt object represents a SQL database with multiple tables of type TableExt."""
"""Extends the dataset.Database class and adds a
flask_sqlalchemy.SQLAlchemy reference. Connects to a
flask_sqlalchemy.
"""
def __init__(self, url, flask_app=None, schema=None, reflect_metadata=True,
engine_kwargs=None, reflect_views=True,
ensure_schema=True, row_type=dataset.util.row_type):
"""Configure and connect to the database."""
if url is None:
url = os.environ.get('DATABASE_URL', 'sqlite://')
if engine_kwargs is None:
engine_kwargs = {}
parsed_url = urlparse(url)
if type(flask_app) is Flask:
app = flask_app
else:
if flask_app is not None:
app = Flask(flask_app)
else:
app = None
if parsed_url.scheme.lower() in 'sqlite':
# ref: https://github.com/pudo/dataset/issues/163
if 'poolclass' not in engine_kwargs:
engine_kwargs.config['poolclass'] = StaticPool
engine_kwargs['SQLALCHEMY_DATABASE_URI'] = url
try:
if app:
app.config['SQLALCHEMY_DATABASE_URI'] = url
self.flask_db = SQLAlchemy(app, engine_options=engine_kwargs)
else:
self.flask_db = SQLAlchemy(engine_options=engine_kwargs)
# how do we work with session
self.engine = self.flask_db.engine
self.flask_db._engine_lock = self.lock = threading.RLock() # we are going to use a re-entrant lock because that's what dataset uses.
except:
self.engine = create_engine(url, **engine_kwargs)
self.lock = threading.RLock()
self.flask_db._engine_lock = self.lock = threading.RLock() # we are going to use a re-entrant lock because that's what dataset uses.
self.local = threading.local()
if len(parsed_url.query):
query = parse_qs(parsed_url.query)
if schema is None:
schema_qs = query.get('schema', query.get('searchpath', []))
if len(schema_qs):
schema = schema_qs.pop()
self.types = dataset.types.Types()
self.schema = schema
self.url = url
self.row_type = row_type
self.ensure_schema = ensure_schema
self._tables = {}
# will only work for sqlite.
# diferent databases have different fts.
def create_fts_index_column(self, table_name, column, stemmer="unicode61"): # porter
# maybe we create a mirror sqlite database called fts_db if the database we are opening is not of sqlite type.
# the idea is we want to be able to locally attach fts with our datasets arrow files.
self.db.executeable.execute('CREATE VIRTUAL TABLE {table_name}_idx USING FTS5(idx:INTEGER, {column}:VARCHAR, tokenize="{stemmer}");')
def create_table(self, table_name, primary_id=None, primary_type=None):
"""Create a new table.
Either loads a table or creates it if it doesn't exist yet. You can
define the name and type of the primary key field, if a new table is to
be created. The default is to create an auto-incrementing integer,
``id``. You can also set the primary key to be a string or big integer.
The caller will be responsible for the uniqueness of ``primary_id`` if
it is defined as a text type.
Returns a :py:class:`Table <dataset.Table>` instance.
::
table = db.create_table('population')
# custom id and type
table2 = db.create_table('population2', 'age')
table3 = db.create_table('population3',
primary_id='city',
primary_type=db.types.text)
# custom length of String
table4 = db.create_table('population4',
primary_id='city',
primary_type=db.types.string(25))
# no primary key
table5 = db.create_table('population5',
primary_id=False)
"""
assert not isinstance(primary_type, six.string_types), \
'Text-based primary_type support is dropped, use db.types.'
try:
self.flask_db.create_all() # TODO, don't call this if we already called
except:
pass
table_name = dataset.util.normalize_table_name(table_name)
with self.lock:
if table_name not in self._tables:
self._tables[table_name] = TableExt(self, table_name,
primary_id=primary_id,
primary_type=primary_type,
auto_create=True)
return self._tables.get(table_name)
def load_table(self, table_name):
"""Load a table.
This will fail if the tables does not already exist in the database. If
the table exists, its columns will be reflected and are available on
the :py:class:`Table <dataset.Table>` object.
Returns a :py:class:`Table <dataset.Table>` instance.
::
table = db.load_table('population')
"""
try:
self.flask_db.create_all() # TODO, don't call this if we already called. how to sync the ._tables variable with the corresponding variable in
except:
pass
table_name = dataset.util.normalize_table_name(table_name)
with self.lock:
if table_name not in self._tables:
self._tables[table_name] = TableExt(self, table_name)
return self._tables.get(table_name)
class IndexGzipFileExt(igzip.IndexedGzipFile):
"""This class inheriets from `` ingdex_gzip.IndexedGzipFile``. This class allows in addition to the functionality
of IndexedGzipFile, access to a specific line based on the seek point of the line, using the __getitem__ method.
Additionally, a (conginguous) list or slice can be used, which will be more efficient then doing line by line access.
The base IndexedGzipFile class allows for fast random access of a gzip
file by using the ``zran`` library to build and maintain an index of seek
points into the file.
``IndexedGzipFile`` is an ``io.BufferedReader`` which wraps an
:class:`_IndexedGzipFile` instance. By accessing the ``_IndexedGzipFile``
instance through an ``io.BufferedReader``, read performance is improved
through buffering, and access to the I/O methods is made thread-safe.
A :meth:`pread` method is also implemented, as it is not implemented by
the ``io.BufferedReader``.
"""
def __init__(self, *args, **kwargs):
"""Create an ``LineIndexGzipFile``. The file may be specified either
with an open file handle (``fileobj``), or with a ``filename``. If the
former, the file must have been opened in ``'rb'`` mode.
.. note:: The ``auto_build`` behaviour only takes place on calls to
:meth:`seek`.
:arg filename: File name or open file handle.
:arg fileobj: Open file handle.
:arg mode: Opening mode. Must be either ``'r'`` or ``'rb``.
:arg auto_build: If ``True`` (the default), the index is
automatically built on calls to :meth:`seek`.
:arg skip_crc_check: Defaults to ``False``. If ``True``, CRC/size
validation of the uncompressed data is not
performed.
:arg spacing: Number of bytes between index seek points.
:arg window_size: Number of bytes of uncompressed data stored with
each seek point.
:arg readbuf_size: Size of buffer in bytes for storing compressed
data read in from the file.
:arg readall_buf_size: Size of buffer in bytes used by :meth:`read`
when reading until EOF.
:arg drop_handles: Has no effect if an open ``fid`` is specified,
rather than a ``filename``. If ``True`` (the
default), a handle to the file is opened and
closed on every access. Otherwise the file is
opened | |
<reponame>ElvisNguyen/Hoshii-kun
import discord
import asyncio
import dateparser
import datetime
import math
import json
import pytz
import apscheduler
import pymongo
import dns
import requests
import arrow
import csv
import random
from difflib import SequenceMatcher
from discord.ext import commands, tasks
from discord.ext.commands.cooldowns import BucketType
from datetime import datetime, timedelta
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from pymongo import MongoClient
from ics import Calendar, Event
#Discord token goes here. It's possible to read the token in from an environment variable or another remote location if required
TOKEN = ""
#Discord client
client = commands.Bot(command_prefix = '/')
#Notification scheduler, used for event hosting notifications
notifier = AsyncIOScheduler(daemon=True)
notifier.start()
#MongoDB initialization, fill in pymongo.MongoClient("") with your mongoDB connection string
mongo_client = pymongo.MongoClient("")
db = mongo_client.events
collection = db.event_data
#12* unit dictionaries, these were used to load in armor data from csv files for quick access back in episode 5.
back_dict = {}
arms_dict = {}
legs_dict = {}
subs_dict = {}
#Event calendar, global so we don't need to request it every single time someone calls /uq
cal = None
#************** init_calendar() ****************
#pulls scheduled events from SEGA's event calendar, deserializes the json file,
#adds it into an ics calendar, then converts it into timeline format.
#[It's an adapter so we can use the system we wrote from when SEGA used google calendar for their events]
def init_calendar():
#Global variable to store the calendar so we don't have to pull the data every time someone calls /uq
global cal
#Create calendar object to store events in
calendar = Calendar()
#SEGA's PSO2 site has an age gate, so in order to get to any other page, we'll need to pretend we passed through the agegate already by changing the cookies
#Sidenote: In the future, we could read all the cookies and see which ones contain the word 'age' and a T/F value and set them to true for resilience to small name changes
url = requests.session()
url = requests.get("https://pso2.com/news/LoadScheduleCampaigns", cookies = {'pso2-age.verification':'true'} )
#Make sure the event schedule url is alive before continuing, if it isn't, print to console and exit function.
if url is None:
print("Trouble loading json file from SEGA website. Please check their API")
return
#Pulls the json file located at the url and stores it in this variable
json_file = json.loads(url.content.decode())
#For every event in the json file, add Event and its details to the ics calendar.
for event in json_file:
id = event["id"]
title = event["title"]
events = event["events"]
locale = pytz.timezone('US/Pacific')
for event_number in events:
start = event_number["startDate"]
end = event_number["endDate"]
new_event = Event()
new_event.name = title
new_event.begin = dateparser.parse(start).replace(tzinfo=locale)
new_event.end = dateparser.parse(end).replace(tzinfo=locale)
calendar.events.add(new_event)
cal = calendar.timeline
#Old code for when PSO2 Global used google calendars for their events
"""
url = "https://calendar.google.com/calendar/ical/rc1af7l1sv3mt995hvjqpi4qb0%40group.calendar.google.com/public/basic.ics"
cal = Calendar(requests.get(url).text)
cal = cal.timeline
"""
#************** init_unit_data() ****************
#This function reads stat information about armors in the game from a CSV file and stores it into a dictionary for quick access
def init_unit_data():
with open('backs.csv', newline='') as backs:
reader = csv.DictReader(backs)
for row in reader:
back_dict[row['Unit']] = row
with open('arms.csv', newline='') as arms:
reader = csv.DictReader(arms)
for row in reader:
arms_dict[row['Unit']] = row
with open('legs.csv', newline='') as legs:
reader = csv.DictReader(legs)
for row in reader:
legs_dict[row['Unit']] = row
with open('subs.csv', newline='') as subs:
reader = csv.DictReader(subs)
for row in reader:
subs_dict[row['Unit']] = row
#This class is used for the event hosting function of hoshii-kun
class GuildEvent:
#Every event will have an id, an associated guild, an event name, a party type, a start time, a host and a list of attendees.
#Event IDs are just the discord message IDs for when the events are posted, this is so they're 1. Unique and 2. Easily referencable on the discord side of things.
def __init__(self, guild, eventName, partyType, eventTime, host):
self.event_ID = None
self.guild = guild
self.eventName = eventName
self.partyType = partyType
self.eventTime = eventTime
self.host = host
self.playerList = []
#************** listEventInfo() ****************
#Prints event information to the console
async def listEventInfo(self):
print("\n[Alliance] {}\n[Event Name] {}\n[Party Size] {}\n[Event Time] {}\n[Host] {}".format(self.guild.name, self.eventName, self.partyType, self.eventTime, self.host))
#************** eventToDB() ****************
#Stores events in mongoDB for data persistence
def eventToDB(self):
if self.event_ID and self.guild and self.eventName and self.partyType and self.host:
j_eventID = self.event_ID
j_guild = self.guild.id
j_eventName = self.eventName
j_partyType = self.partyType
j_eventTime = str(self.eventTime)
j_host = self.host
j_playerList = self.playerList
filter = {"_id": j_eventID}
event = {"$set": {"_id": j_eventID, "guild": j_guild, "eventName": j_eventName, "partyType": j_partyType, "eventTime": j_eventTime, "host": j_host, "playerList": j_playerList} }
x = collection.update_one(filter ,event, upsert=True)
'''
list = [j_eventID, j_guild, j_eventName, j_partyType, j_eventTime, j_host, j_playerList]
json_string = json.dumps(list)
file_name = str(self.event_ID)+".json"
json_file = open(file_name, 'w+')
json_file.write(json_string)
json_file.truncate()
json_file.close()
'''
#************** DBToEvent() ****************
#Pulls event data from mongoDB using the event ID
def DBToEvent(self, event_ID):
'''
file_name = str(event_ID)+".json"
file = open(file_name, 'r')
json_string = file.read()
file.close()
list = json.loads(json_string)
'''
filter = {"_id": event_ID}
cursor = collection.find_one(filter)
self.event_ID = event_ID
self.guild = client.get_guild(cursor['guild'])
self.eventName = cursor['eventName']
self.partyType = cursor['partyType']
self.eventTime = dateparser.parse(cursor['eventTime'])
self.playerList = cursor['playerList']
self.host = cursor['host']
#************** shareEvent() ****************
#Makes a post about the event on the 'event-hosting' guild channel. Creates the channel if it doesn't exist. [In the future, we could let them configure what channel they want it posted in]
#Allows people to sign up for the event by reacting a heart to the event post
#Sends people timezone conversions for the event if they react with the clock
#Notes: If the system ever got converted to an embed based rather than message based system, we could just load the start time in the user's local time.
#Unlimited party size hasn't been completed since release, but was left in at request. Unexpected behaviour is expected to occur if the message size for the event ever exceeds the character limit for the message.
#Patchable by detecting a character limit overflow and adding function to list all the signees by reacting to a list.
#Wishlist: A waitlist function
async def shareEvent(self):
#Grabs a list of channels from the event's guild
channels = self.guild.text_channels
#Channel to post event in
event_channel = None
#Convert event start time to UTC
self.eventTime = self.eventTime.astimezone(pytz.utc)
#Set the format to print the time out in as Y/M/D @ H:M UTC
formatted_time = self.eventTime.strftime("%Y/%m/%d @ %H:%M %Z")
#Scans channels for 'event-hosting' channel, sets event-channel to 'event-hosting' if it's found
for i in channels:
if(i.name) == ('event-hosting'):
event_channel = i
#If 'event-hosting' doesn't exist on the server, create the channel
if event_channel is None:
event_channel = await self.guild.create_text_channel('event-hosting', overwrites={self.guild.default_role: discord.PermissionOverwrite(send_messages=False)})
#Post event data with reaction emojis, store event in DB and return event ID
message = await event_channel.send("**[Event Name]** {}\n**[Hosted by]** {}\n**[Party size]** {}\n**[Date/Time]** {} ".format(self.eventName, self.host, self.partyType, formatted_time))
self.event_ID = message.id
await message.add_reaction('❤️')
await message.add_reaction('🕒')
self.eventToDB()
return self.event_ID
#Really disturbing redundant code removed. It must've made sense at one point in time -w-.
'''
if self.partyType == '4':
message = await event_channel.send("**[Event Name]** {}\n**[Hosted by]** {}\n**[Party size]** {}\n**[Date/Time]** {} ".format(self.eventName, self.host, self.partyType, formatted_time))
self.event_ID = message.id
await message.add_reaction('❤️')
await message.add_reaction('🕒')
self.eventToDB()
return self.event_ID
elif self.partyType == '8':
message = await event_channel.send("**[Event Name]** {}\n**[Hosted by]** {}\n**[Party size]** {}\n**[Date/Time]** {}".format(self.eventName, self.host, self.partyType, formatted_time))
self.event_ID = message.id
await message.add_reaction('❤️')
await message.add_reaction('🕒')
self.eventToDB()
return self.event_ID
elif self.partyType == '12':
message = await event_channel.send("**[Event Name]** {}\n**[Hosted by]** {}\n**[Party size]** {}\n**[Date/Time]** {}".format(self.eventName, self.host, self.partyType, formatted_time))
self.event_ID = message.id
await message.add_reaction('❤️')
await message.add_reaction('🕒')
self.eventToDB()
return self.event_ID
elif self.partyType == 'unlimited':
message = await event_channel.send("**[Event Name]** {}\n**[Hosted by]** {}\n**[Party size]** {}\n**[Date/Time]** {}".format(self.eventName, self.host, self.partyType, formatted_time))
self.event_ID = message.id
await message.add_reaction('❤️')
await message.add_reaction('🕒')
self.eventToDB()
return self.event_ID
'''
#************** on_ready() ****************
#Things that need to be run once before the bot can start itself
@client.event
async def on_ready():
#Loads armor data into their dictionaries from CSV files for the '/back', '/arms', '/legs', '/sub' and '/planunits' commands to have | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Bioindustrial-Park: BioSTEAM's Premier Biorefinery Models and Results
# Copyright (C) 2020-, <NAME> <<EMAIL>>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
'''
References
----------
[1] Humbird et al., Process Design and Economics for Biochemical Conversion of
Lignocellulosic Biomass to Ethanol: Dilute-Acid Pretreatment and Enzymatic
Hydrolysis of Corn Stover; Technical Report NREL/TP-5100-47764;
National Renewable Energy Lab (NREL), 2011.
https://www.nrel.gov/docs/fy11osti/47764.pdf
[2] Davis et al., Process Design and Economics for the Conversion of Lignocellulosic
Biomass to Hydrocarbon Fuels and Coproducts: 2018 Biochemical Design Case Update;
NREL/TP-5100-71949; National Renewable Energy Lab (NREL), 2018.
https://doi.org/10.2172/1483234
Naming conventions:
D = Distillation column
E = Evaporator
F = Flash tank
H = Heat exchange
M = Mixer
P = Pump (including conveying belt)
R = Reactor
S = Splitter (including solid/liquid separator)
T = Tank or bin for storage
U = Other units
PS = Process specificiation, not physical units, but for adjusting streams
Processes:
100: Feedstock preprocessing
200: Pretreatment
300: Carbohydrate conversion
400: Carbohydrate product separation
500: Wastewater treatment
600: Facilities
700: Lignin conversion and separation
'''
# %%
import biosteam as bst
import flexsolve as fs
from biosteam import Stream, System
from biosteam.process_tools import UnitGroup
from biorefineries.ethanol_adipic import _units as units
from biorefineries.ethanol_adipic import _facilities as facilities
from biorefineries.ethanol_adipic._settings import price, CFs, \
_labor_2011to2016, set_feedstock_price
from biorefineries.ethanol_adipic._chemicals import chems, soluble_organics, \
solubles, insolubles, COD_chemicals, combustibles
from biorefineries.ethanol_adipic._utils import convert_ethanol_wt_2_mol, \
_ethanol_kg_2_gal, cell_mass_split, AD_split, MB_split
from biorefineries.ethanol_adipic._tea import EthanolAdipicTEA
from biorefineries.lactic._processes import (
create_pretreatment_process as create_acid_pretreatment_process,
create_wastewater_process as la_create_wastewater_process,
)
from biorefineries.ethanol_adipic._preprocessing import \
create_default_depot, PreprocessingCost
__all__ = (
'create_preprocessing_process',
'create_acid_pretreatment_process',
'create_base_pretreatment_process',
'create_ethanol_process',
'create_adipic_process',
'create_wastewater_process',
'create_facilities',
'create_biorefinery'
)
'''
TODOs:
Add include blowdown or not; recycle Na2SO4 or not in wastewater
Add options of ins/outs for connections between processes
'''
hasattr = hasattr
# %%
# =============================================================================
# Preprocessing
# =============================================================================
def create_preprocessing_process(kind='HMPP', with_AFEX=False):
flowsheet = create_default_depot(kind=kind, with_AFEX=with_AFEX)
prep_sys = flowsheet.system.prep_sys
prep_sys.simulate()
prep_cost = PreprocessingCost(depot_sys=prep_sys,
labor_adjustment=_labor_2011to2016)
# $/Mg
set_feedstock_price(flowsheet.stream.preprocessed,
preprocessing=prep_cost.feedstock_unit_price)
return flowsheet, prep_cost
# %%
# =============================================================================
# Base pretreatment
# =============================================================================
def create_base_pretreatment_process(flowsheet, groups, feed):
bst.main_flowsheet.set_flowsheet(flowsheet)
######################## Streams ########################
# Flows updated in DeacetylationReactor
caustic_R201 = Stream('caustic_R201', units='kg/hr')
water_R201 = Stream('water_R201', units='kg/hr')
######################## Units ########################
R201 = units.DeacetylationReactor('R201', ins=(feed, caustic_R201, water_R201))
P201 = units.BlackLiquorPump('P201', ins=R201-0)
U201 = units.DiscMill('U201', ins=R201-1)
F201 = units.PretreatmentFlash('F201', ins=U201-0,
outs=('F201_waste_vapor', 'F201_to_fermentation'),
P=101325, Q=0)
# Seems like don't need the condenser (no vapor per simualted by F201)
# F201_H = bst.units.HXutility('F201_H', ins=F201-0, V=0, rigorous=True)
P202 = units.HydrolysatePump('P202', ins=F201-1)
######################## Systems ########################
pretreatment_sys = System('pretreatment_sys',
path=(R201, P201, U201, F201, P202))
pretreatment_group = UnitGroup('pretreatment_group', units=pretreatment_sys.units)
groups.append(pretreatment_group)
return flowsheet, groups
# %%
# =============================================================================
# Carbohydrate conversion and separation
# =============================================================================
def create_ethanol_process(flowsheet, groups, feed,
cell_mass_split=cell_mass_split):
bst.main_flowsheet.set_flowsheet(flowsheet)
######################## Streams ########################
# Flow updated in EnzymeHydrolysateMixer
enzyme_M301 = Stream('enzyme_M301', units='kg/hr', price=price['Enzyme'])
# Used to adjust enzymatic hydrolysis solid loading,
# flow updated in EnzymeHydrolysateMixer
water_M301 = Stream('water_M301', units='kg/hr')
# Streams 311 and 309 from ref [1]
CSL_R301 = Stream('CSL_R301', units='kg/hr')
CSL_R302 = Stream('CSL_R302', units='kg/hr')
# Streams 312 and 310 from ref [1]
DAP_R301 = Stream('DAP_R301', units='kg/hr')
DAP_R302 = Stream('DAP_R302', units='kg/hr')
water_U401 = Stream('water_U401', units='kg/hr')
######################## Units ########################
M301 = units.EnzymeHydrolysateMixer('M301', ins=(feed, enzyme_M301, water_M301),
enzyme_loading=20, solid_loading=0.2)
R301 = units.SaccharificationAndCoFermentation(
'R301', ins=(M301-0, '', CSL_R301, DAP_R301),
outs=('R301_g', 'effluent', 'side_draw'), C5_saccharification=False)
# Followed ref [2], no sorbitol in the final seed fermenter as in ref [1]
R302 = units.SeedTrain('R302', ins=(R301-2, CSL_R302, DAP_R302),
outs=('R302_g', 'seed'))
T301 = units.SeedHoldTank('T301', ins=R302-1, outs=1-R301)
M401 = bst.units.Mixer('M401', ins=(R301-0, R302-0), outs='fermentation_vapor')
def update_U401_water():
M401._run()
# 26836 and 21759 from streams 524 and 523 in ref [1]
water_U401.imass['Water'] = 26836/21759 * M401.F_mass_in
M401.specification = update_U401_water
U401 = bst.units.VentScrubber('U401', ins=(water_U401, M401-0),
outs=('U401_vent', 'U401_recycled'),
gas=('CO2', 'NH3', 'O2'))
# Mixer crude ethanol beer
M402 = bst.units.Mixer('M402', ins=(R301-1, U401-1))
T401 = units.BeerTank('T401', ins=M402-0)
# Heat up crude beer by exchanging heat with stillage
H401 = bst.units.HXprocess('H401', ins=(T401-0, ''),
phase0='l', phase1='l', U=1.28)
# Remove solids from fermentation broth, based on the pressure filter in ref [1]
# Moisture content is 35% in ref [1] but 25% in ref [2], used 35% to be conservative
S401 = units.CellMassFilter('S401', ins=H401-1, outs=('S401_cell_mass', 'S401_to_WWT'),
moisture_content=0.35, split=cell_mass_split)
# Beer column
xbot = convert_ethanol_wt_2_mol(0.00001)
ytop = convert_ethanol_wt_2_mol(0.5)
D401 = bst.units.BinaryDistillation('D401', ins=H401-0, k=1.25, Rmin=0.6,
P=101325, y_top=ytop, x_bot=xbot,
LHK=('Ethanol', 'Water'),
tray_material='Stainless steel 304',
vessel_material='Stainless steel 304')
D401.boiler.U = 1.85
D401_P = bst.units.Pump('D401_P', ins=D401-1, outs=1-H401)
D401_P.BM = 3.1
# Mix recycled ethanol
M403 = bst.units.Mixer('M403', ins=(D401-0, ''))
ytop = convert_ethanol_wt_2_mol(0.915)
D402 = bst.units.BinaryDistillation('D402', ins=M403-0, k=1.25, Rmin=0.6,
P=101325, y_top=ytop, x_bot=xbot,
LHK=('Ethanol', 'Water'),
tray_material='Stainless steel 304',
vessel_material='Stainless steel 304',
is_divided=True)
D402.boiler.U = 1.85
D402_P = bst.units.Pump('D402_P', ins=D402-1, outs='D402_to_WWT')
D402_P.BM = 3.1
D402_H = bst.units.HXutility('D402_H', ins=D402-0, T=115+283.15, V=1)
# Molecular sieve, split based on streams 515 and 511 in ref [1]
split_ethanol = 1 - 21673/27022
split_water = 1 - 108/2164
S402 = bst.units.MolecularSieve('S402', ins=D402_H-0, outs=(1-M403, ''),
split=(split_ethanol, split_water),
order=('Ethanol', 'Water'))
# Condense ethanol product
S402_H = bst.units.HXutility('S402_H', ins=S402-1, outs='ethanol_to_storage',
V=0, T=350)
######################## Systems ########################
ethanol_production_sys = System('ethanol_production_sys',
path=(M301, R301, R302, T301), recycle=R302-1)
ethanol_recycle = System('ethanol_recycle',
path=(M403, D402, D402_P, D402_H, S402, S402_H),
recycle=S402-0)
ethanol_separation_sys = System('ethanol_separation_sys',
path=(M401, U401, M402, T401, H401,
D401, H401, D401_P, H401, S401,
ethanol_recycle))
ethanol_sys = System('ethanol_sys',
path=(ethanol_production_sys, ethanol_separation_sys))
ethanol_group = UnitGroup('ethanol_group', units=ethanol_sys.units)
groups.append(ethanol_group)
return flowsheet, groups
# %%
# =============================================================================
# Lignin conversion and separation
# =============================================================================
# This process should be constructed and simulated before the wastewater process
# and facilities, but numbered in 700-series to keep the consistent 500-series
# for the wastewater process and 600-series for facilities
def create_adipic_process(flowsheet, groups, black_liquor, cell_mass):
bst.main_flowsheet.set_flowsheet(flowsheet)
######################## Streams ########################
# Used to maintain a minimum of 2 wt% caustic level
caustic_R701 = Stream('caustic_R701', units='kg/hr')
# Used to neutralize the deconstructed pulp
sulfuric_acid_T702 = Stream('sulfuric_acid_T702', units='kg/hr')
# Based on stream 708 in ref [2]
water_R702 = Stream('water_R702', units='kg/hr')
ammonia_R702 = Stream('ammonia_R702', units='kg/hr')
caustic_R702 = Stream('caustic_R702', units='kg/hr')
CSL_R702 = Stream('CSL_R702', units='kg/hr')
DAP_R702 = Stream('DAP_R702', units='kg/hr')
air_R702 = Stream('air_R702', phase='g', units='kg/hr')
# Used to reacidify sodium muconate to muconic acid for crystallization
sulfuric_acid_S702 = Stream('sulfuric_acid_S702', units='kg/hr')
ethanol_T703 = Stream('ethanol_T703', units='kg/hr')
hydrogen_R703 = Stream('hydrogen_R703', units='kg/hr', price=price['H2'])
######################## Units ########################
T701 = units.BlackLiquorStorage('T701', ins=black_liquor)
R701 = units.PulpingReactor('R701', ins=(T701-0, cell_mass, caustic_R701))
T702 = units.NeutralizationTank('T702', ins=(R701-0, sulfuric_acid_T702))
R702 = units.MuconicFermentation('R702', ins=(T702-0, water_R702, ammonia_R702,
caustic_R702, CSL_R702, DAP_R702,
air_R702),
outs=('R702_vent', 'crude_muconic'))
# Adjusting lignin conversion to meet titer requirement
def titer_at_yield(lignin_yield):
R702.main_fermentation_rxns.X[-1] = lignin_yield
R702._run()
return R702.effluent_titer-R702.target_titer
#!!! This needs reviewing, need to compare the non-adjusting yield
def adjust_R702_titer():
R702.main_fermentation_rxns.X[-1] = fs.IQ_interpolation(
f=titer_at_yield, x0=0, x1=1, xtol=0.001, ytol=0.01, maxiter=50,
args=(), checkbounds=False)
R702._run()
PS701 = bst.units.ProcessSpecification(
'PS701', ins=R702-1, specification=adjust_R702_titer)
S701 = units.MuconicMembrane('S701', ins=PS701-0, outs=('S701_l', 'S701_to_WWT'))
S702 = units.MuconicCrystallizer('S702', ins=(S701-0, sulfuric_acid_S702),
outs=('S702_to_WWT', 'muconic'))
T703 = units.MuconicDissolution('T703', ins=(S702-1, '', ethanol_T703))
R703 = units.MuconicHydrogenation('R703', ins=(T703-0, hydrogen_R703),
outs='crude_adipic')
S703 = units.AdipicEvaporator('S703', ins=(R703-0, ''),
outs=('ethanol_to_recycle', 'concentrated_adipic'))
S704 = units.AdipicCrystallizer('S704', ins=S703-1,
outs=(1-S703, 'adipic_to_storage'))
H701 = units.AdipicCondenser('H701', ins=S703-0, outs=1-T703, V=0)
######################## Systems ########################
adipic_recycle = System('adipic_recycle', path=(S703, S704), recycle=S704-0)
solvent_recycle = System('solvent_recycle',
path=(T703, R703, adipic_recycle, H701),
recycle=H701-0)
adipic_sys = System('adipic_sys',
path=(T701, R701, T702, R702, PS701, S701, S702,
solvent_recycle))
adipic_group = UnitGroup('adipic_group', units=adipic_sys.units)
groups.append(adipic_group)
return flowsheet, groups
# %%
# =============================================================================
# Wastewater
# =============================================================================
def create_wastewater_process(flowsheet, groups, get_flow_tpd, wwt_streams,
need_ammonia, bypass_R501, recover_sodium_sulfate):
flowsheet, groups = la_create_wastewater_process(
flowsheet, groups, get_flow_tpd, wwt_streams,
AD_split=AD_split, MB_split=MB_split, COD_chemicals=COD_chemicals,
soluble_organics=soluble_organics, solubles=solubles, insolubles=insolubles,
need_ammonia=need_ammonia, bypass_R501=bypass_R501)
if recover_sodium_sulfate:
S506 = units.SodiumSulfateRecovery('S506', ins=flowsheet.stream.brine,
outs=('S506_vent', 'residuals_to_CHP',
'sodium_sulfate_to_storage'))
wwt_sys = flowsheet.system.wastewater_sys
wwt_sys._set_path(path=(*wwt_sys.path, S506))
groups[-1].units = (*wwt_sys.units, S506)
return flowsheet, groups
# %%
# =============================================================================
# Facilities
# =============================================================================
def create_facilities(flowsheet, groups, get_flow_tpd,
CHP_wastes, CHP_biogas='', CHP_side_streams=(),
process_water_streams={}, recycled_water='',
if_HXN=False, if_BDM=False):
bst.main_flowsheet.set_flowsheet(flowsheet)
s = flowsheet.stream
u = flowsheet.unit
######################## Streams ########################
# For products
ethanol = Stream('ethanol', units='kg/hr', price=price['Ethanol'])
denaturant = Stream('denaturant', units='kg/hr', price=price['Denaturant'])
# Process chemicals
caustic = Stream('caustic', units='kg/hr', price=price['NaOH'])
CSL = Stream('CSL', units='kg/hr', price=price['CSL'])
DAP = Stream('DAP', units='kg/hr', price=price['DAP'])
ammonia = Stream('ammonia', units='kg/hr', price=price['NH4OH'])
sulfuric_acid = Stream('sulfuric_acid', units='kg/hr', price=price['H2SO4'])
# Chemicals used/generated in CHP
lime_CHP = Stream('lime_CHP', units='kg/hr', price=price['Lime'])
# Scaled based on feedstock flow, 1054 from Table 33 in ref [2] as NH3
ammonia_CHP = Stream('ammonia_CHP', units='kg/hr',
NH4OH=1054*35.046/17.031*get_flow_tpd()/2205)
boiler_chems = Stream('boiler_chems', units='kg/hr', price=price['Boiler chems'])
baghouse_bag = Stream('baghouse_bag', units='kg/hr', price=price['Baghouse bag'])
# Supplementary natural gas for CHP if produced steam not enough for regenerating
# all steam streams required by the system
natural_gas = Stream('natural_gas', units='kg/hr', price=price['Natural gas'])
vent_CHP = Stream('vent_CHP', phase='g', units='kg/hr')
ash = Stream('ash', units='kg/hr', | |
= set()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
if url in locations_manager.GetURLs():
removee_media.add( m )
if len( removee_media ) > 0:
removee_hashes = { m.GetHash() for m in removee_media }
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_DELETE, ( ( url, ), removee_hashes ) )
for m in removee_media:
m.GetMediaResult().ProcessContentUpdate( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, content_update )
self._pending_content_updates.append( content_update )
#
self._UpdateList()
def _SetSearchFocus( self ):
self._url_input.setFocus( QC.Qt.OtherFocusReason )
def _UpdateList( self ):
self._urls_listbox.clear()
self._current_urls_count = collections.Counter()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
for url in locations_manager.GetURLs():
self._current_urls_count[ url ] += 1
for ( url, count ) in self._current_urls_count.items():
if len( self._current_media ) == 1:
label = url
else:
label = '{} ({})'.format( url, count )
item = QW.QListWidgetItem()
item.setText( label )
item.setData( QC.Qt.UserRole, url )
self._urls_listbox.addItem( item )
def EventListDoubleClick( self, item ):
urls = [ QP.GetClientData( self._urls_listbox, selection.row() ) for selection in list( self._urls_listbox.selectedIndexes() ) ]
for url in urls:
self._RemoveURL( url )
if len( urls ) == 1:
url = urls[0]
self._url_input.setText( url )
def EventListKeyDown( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in ClientGUIShortcuts.DELETE_KEYS:
urls = [ QP.GetClientData( self._urls_listbox, selection.row() ) for selection in list( self._urls_listbox.selectedIndexes() ) ]
for url in urls:
self._RemoveURL( url )
else:
return True # was: event.ignore()
def AddURL( self ):
url = self._url_input.text()
if url == '':
self.parentWidget().DoOK()
else:
parse_result = urllib.parse.urlparse( url )
if parse_result.scheme == '':
QW.QMessageBox.critical( self, 'Error', 'Could not parse that URL! Please make sure you include http:// or https://.' )
return
self._EnterURL( url )
self._url_input.setText( '' )
def CommitChanges( self ):
if len( self._pending_content_updates ) > 0:
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : self._pending_content_updates }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
def ProcessApplicationCommand( self, command ):
command_processed = True
command_type = command.GetCommandType()
data = command.GetData()
if command_type == CC.APPLICATION_COMMAND_TYPE_SIMPLE:
action = data
if action == 'manage_file_urls':
self._OKParent()
elif action == 'set_search_focus':
self._SetSearchFocus()
else:
command_processed = False
else:
command_processed = False
return command_processed
class RepairFileSystemPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, missing_locations ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._only_thumbs = True
self._incorrect_locations = {}
self._correct_locations = {}
for ( incorrect_location, prefix ) in missing_locations:
self._incorrect_locations[ prefix ] = incorrect_location
if prefix.startswith( 'f' ):
self._only_thumbs = False
text = 'This dialog has launched because some expected file storage directories were not found. This is a serious error. You have two options:'
text += os.linesep * 2
text += '1) If you know what these should be (e.g. you recently remapped their external drive to another location), update the paths here manually. For most users, this will be clicking _add a possibly correct location_ and then select the new folder where the subdirectories all went. You can repeat this if your folders are missing in multiple locations. Check everything reports _ok!_'
text += os.linesep * 2
text += 'Although it is best if you can find everything, you only _have_ to fix the subdirectories starting with \'f\', which store your original files. Those starting \'t\' and \'r\' are for your thumbnails, which can be regenerated with a bit of work.'
text += os.linesep * 2
text += 'Then hit \'apply\', and the client will launch. You should double-check all your locations under database->migrate database immediately.'
text += os.linesep * 2
text += '2) If the locations are not available, or you do not know what they should be, or you wish to fix this outside of the program, hit \'cancel\' to gracefully cancel client boot. Feel free to contact hydrus dev for help.'
if self._only_thumbs:
text += os.linesep * 2
text += 'SPECIAL NOTE FOR YOUR SITUATION: The only paths missing are thumbnail paths. If you cannot recover these folders, you can hit apply to create empty paths at the original or corrected locations and then run a maintenance routine to regenerate the thumbnails from their originals.'
st = ClientGUICommon.BetterStaticText( self, text )
st.setWordWrap( True )
columns = [ ( 'missing location', -1 ), ( 'expected subdirectory', 23 ), ( 'correct location', 36 ), ( 'now ok?', 9 ) ]
self._locations = ClientGUIListCtrl.BetterListCtrl( self, 'repair_locations', 12, 36, columns, self._ConvertPrefixToListCtrlTuples, activation_callback = self._SetLocations )
self._set_button = ClientGUICommon.BetterButton( self, 'set correct location', self._SetLocations )
self._add_button = ClientGUICommon.BetterButton( self, 'add a possibly correct location (let the client figure out what it contains)', self._AddLocation )
# add a button here for 'try to fill them in for me'. you give it a dir, and it tries to figure out and fill in the prefixes for you
#
self._locations.AddDatas( [ prefix for ( incorrect_location, prefix ) in missing_locations ] )
self._locations.Sort( 0 )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._locations, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._set_button, CC.FLAGS_LONE_BUTTON )
QP.AddToLayout( vbox, self._add_button, CC.FLAGS_LONE_BUTTON )
self.widget().setLayout( vbox )
def _AddLocation( self ):
with QP.DirDialog( self, 'Select the potential correct location.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
for prefix in self._locations.GetData():
ok = os.path.exists( os.path.join( path, prefix ) )
if ok:
self._correct_locations[ prefix ] = ( path, ok )
self._locations.UpdateDatas()
def _ConvertPrefixToListCtrlTuples( self, prefix ):
incorrect_location = self._incorrect_locations[ prefix ]
if prefix in self._correct_locations:
( correct_location, ok ) = self._correct_locations[ prefix ]
if ok:
pretty_ok = 'ok!'
else:
pretty_ok = 'not found'
else:
correct_location = ''
ok = None
pretty_ok = ''
pretty_incorrect_location = incorrect_location
pretty_prefix = prefix
pretty_correct_location = correct_location
display_tuple = ( pretty_incorrect_location, pretty_prefix, pretty_correct_location, pretty_ok )
sort_tuple = ( incorrect_location, prefix, correct_location, ok )
return ( display_tuple, sort_tuple )
def _SetLocations( self ):
prefixes = self._locations.GetData( only_selected = True )
if len( prefixes ) > 0:
with QP.DirDialog( self, 'Select correct location.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
for prefix in prefixes:
ok = os.path.exists( os.path.join( path, prefix ) )
self._correct_locations[ prefix ] = ( path, ok )
self._locations.UpdateDatas()
def CommitChanges( self ):
correct_rows = []
thumb_problems = False
for prefix in self._locations.GetData():
incorrect_location = self._incorrect_locations[ prefix ]
if prefix not in self._correct_locations:
if prefix.startswith( 'f' ):
raise HydrusExceptions.VetoException( 'You did not correct all the file locations!' )
else:
thumb_problems = True
correct_location = incorrect_location
else:
( correct_location, ok ) = self._correct_locations[ prefix ]
if not ok:
if prefix.startswith( 'f' ):
raise HydrusExceptions.VetoException( 'You did not find all the correct file locations!' )
else:
thumb_problems = True
correct_rows.append( ( incorrect_location, prefix, correct_location ) )
if thumb_problems:
message = 'Some or all of your incorrect paths have not been corrected, but they are all thumbnail paths.'
message += os.linesep * 2
message += 'Would you like instead to create new empty subdirectories at the previous (or corrected, if you have entered them) locations?'
message += os.linesep * 2
message += 'You can run database->regenerate->thumbnails to | |
(lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
return xnext
def func_78cafb3f55af4821b4ab185596e77daf(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
return frac
def func_1de6f325b48b4c2eb848027b942ddccc(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
return ylower
def func_06d2eee39bff4319a01d802082235d31(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return ylower
def func_de223f7766814ab486223883b850334e(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return h2
def func_7745a84b6d30440ab598465094891377(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return frac
def func_71eaa3d83e8b4437be7ff83366658fed(li, frac, unext, lnext, lower):
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ui
def func_405bfda1d08549e386569a8579825ebb(li, frac, unext, lnext, lower):
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return h2
def func_835ffb291b15465688cadf05371704d6(li, frac, unext, lnext, lower):
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ylower
def func_02b7045761be4c569715f5009733f9c0(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return ylower
def func_738223841ada4beb839291e2bab629a5(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return xnext
def func_547180f651d84e1f98010ec35d8ae306(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return frac
def func_dce2fe5516a8419aad7c944e070e4376(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
return h2
def func_ea12da6d49214a37a989be0a9d440491(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ui
def func_00bcdfdce7d0440892755d9076192840(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return frac
def func_7646456bc6774d6e9ec0350241564073(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return h2
def func_173edb86cf6047a19b07c898ac7497e3(li, unext, lnext, xnext, lower):
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ylower
def func_22949779e1054f73af6ad6010f924187(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ylower
def func_080c419843f544bcad882a4d3924c476(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return h2
def func_72db9684fca1499e8ea0af8890799fd2(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return frac
def func_01d21ecec0a94cdf9a2f541a44f4ba70(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return xnext
def func_d3833033353144beaaf3f73eba4c9b80(li, unext, lnext, lower):
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ui
def func_04d4818806de4e34921847df3451b277(ui, li, upper, lower):
lnext = lower[li + 1]
unext = upper[ui + 1]
return lnext
def func_2f94794af44c40ea844081582af6008f(ui, li, upper, lower):
lnext = lower[li + 1]
unext = upper[ui + 1]
return unext
def func_04b627d8366e4c5e83a112bae6ebb479(lnext, upper, lower):
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return frac
def func_4e7e5c47f9ba418b88a5e8aedb8de95c(lnext, upper, lower):
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return xnext
def func_9e752507439e471a8540f19657497480(lnext, upper, lower):
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return h2
def func_b4ab78e1e0dc4daf9daa63384412fa48(lnext, upper, lower):
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return ui
def func_20ec254bfbd9463999ceaaccaa521d55(lnext, upper, lower):
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
return unext
def func_d1d4de159dcd4ce594db05e3214c4138(lnext, upper, lower):
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += | |
}
},
TaskInvocationParameters={
'RunCommand': {
'Comment': 'string',
'DocumentHash': 'string',
'DocumentHashType': 'Sha256'|'Sha1',
'NotificationConfig': {
'NotificationArn': 'string',
'NotificationEvents': [
'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed',
],
'NotificationType': 'Command'|'Invocation'
},
'OutputS3BucketName': 'string',
'OutputS3KeyPrefix': 'string',
'Parameters': {
'string': [
'string',
]
},
'ServiceRoleArn': 'string',
'TimeoutSeconds': 123
},
'Automation': {
'DocumentVersion': 'string',
'Parameters': {
'string': [
'string',
]
}
},
'StepFunctions': {
'Input': 'string',
'Name': 'string'
},
'Lambda': {
'ClientContext': 'string',
'Qualifier': 'string',
'Payload': b'bytes'
}
},
Priority=123,
MaxConcurrency='string',
MaxErrors='string',
LoggingInfo={
'S3BucketName': 'string',
'S3KeyPrefix': 'string',
'S3Region': 'string'
},
Name='string',
Description='string',
ClientToken='string'
)
**Response Syntax**
::
{
'WindowTaskId': 'string'
}
**Response Structure**
- *(dict) --*
- **WindowTaskId** *(string) --*
The ID of the task in the Maintenance Window.
:type WindowId: string
:param WindowId: **[REQUIRED]**
The ID of the Maintenance Window the task should be added to.
:type Targets: list
:param Targets: **[REQUIRED]**
The targets (either instances or Maintenance Window targets).
Specify instances using the following format:
``Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>``
Specify Maintenance Window targets using the following format:
``Key=<WindowTargetIds>,Values=<window-target-id-1>,<window-target-id-2>``
- *(dict) --*
An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don\'t provide one or more instance IDs in the call.
- **Key** *(string) --*
User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* .
- **Values** *(list) --*
User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
:type TaskArn: string
:param TaskArn: **[REQUIRED]**
The ARN of the task to run.
:type ServiceRoleArn: string
:param ServiceRoleArn:
The role to assume when running the Maintenance Window task.
If you do not specify a service role ARN, Systems Manager will use your account\'s service-linked role for Systems Manager by default. If no service-linked role for Systems Manager exists in your account, it will be created when you run ``RegisterTaskWithMaintenanceWindow`` without specifying a service role ARN.
For more information, see `Service-Linked Role Permissions for Systems Manager <http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions>`__ and `Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance Window Tasks? <http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role>`__ in the *AWS Systems Manager User Guide* .
:type TaskType: string
:param TaskType: **[REQUIRED]**
The type of task being registered.
:type TaskParameters: dict
:param TaskParameters:
The parameters that should be passed to the task when it is run.
.. note::
``TaskParameters`` has been deprecated. To specify parameters to pass to a task when it runs, instead use the ``Parameters`` option in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .
- *(string) --*
- *(dict) --*
Defines the values for a task parameter.
- **Values** *(list) --*
This field contains an array of 0 or more strings, each 1 to 255 characters in length.
- *(string) --*
:type TaskInvocationParameters: dict
:param TaskInvocationParameters:
The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.
- **RunCommand** *(dict) --*
The parameters for a RUN_COMMAND task type.
- **Comment** *(string) --*
Information about the command(s) to run.
- **DocumentHash** *(string) --*
The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated.
- **DocumentHashType** *(string) --*
SHA-256 or SHA-1. SHA-1 hashes have been deprecated.
- **NotificationConfig** *(dict) --*
Configurations for sending notifications about command status changes on a per-instance basis.
- **NotificationArn** *(string) --*
An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic.
- **NotificationEvents** *(list) --*
The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see `Configuring Amazon SNS Notifications for Run Command <http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
- **NotificationType** *(string) --*
Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes.
- **OutputS3BucketName** *(string) --*
The name of the Amazon S3 bucket.
- **OutputS3KeyPrefix** *(string) --*
The Amazon S3 bucket subfolder.
- **Parameters** *(dict) --*
The parameters for the RUN_COMMAND task execution.
- *(string) --*
- *(list) --*
- *(string) --*
- **ServiceRoleArn** *(string) --*
The IAM service role to assume during task execution.
- **TimeoutSeconds** *(integer) --*
If this time is reached and the command has not already started running, it doesn\'t run.
- **Automation** *(dict) --*
The parameters for an AUTOMATION task type.
- **DocumentVersion** *(string) --*
The version of an Automation document to use during task execution.
- **Parameters** *(dict) --*
The parameters for the AUTOMATION task.
For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow and UpdateMaintenanceWindowTask .
.. note::
``LoggingInfo`` has been deprecated. To specify an S3 bucket to contain logs, instead use the ``OutputS3BucketName`` and ``OutputS3KeyPrefix`` options in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .
``TaskParameters`` has been deprecated. To specify parameters to pass to a task when it runs, instead use the ``Parameters`` option in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .
For AUTOMATION task types, Systems Manager ignores any values specified for these parameters.
- *(string) --*
- *(list) --*
- *(string) --*
- **StepFunctions** *(dict) --*
The parameters for a STEP_FUNCTION task type.
- **Input** *(string) --*
The inputs for the STEP_FUNCTION task.
- **Name** *(string) --*
The name of the STEP_FUNCTION task.
- **Lambda** *(dict) --*
The parameters for a LAMBDA task type.
- **ClientContext** *(string) --*
Pass client-specific information to the Lambda function that you are invoking. You can then process the client information in your Lambda function as you choose through the context variable.
- **Qualifier** *(string) --*
(Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to invoke the Lambda function version to which the alias points.
- **Payload** *(bytes) --*
JSON to provide to your Lambda function as input.
:type Priority: integer
:param Priority:
The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel.
:type MaxConcurrency: string
:param MaxConcurrency: **[REQUIRED]**
The maximum number of targets this task can be run for in parallel.
:type MaxErrors: string
:param MaxErrors: **[REQUIRED]**
The maximum number of errors allowed before this task stops being scheduled.
:type LoggingInfo: dict
:param LoggingInfo:
A structure containing information about an Amazon S3 bucket to write instance-level logs to.
.. note::
``LoggingInfo`` has been deprecated. To specify an S3 bucket to contain logs, instead use the ``OutputS3BucketName`` and ``OutputS3KeyPrefix`` options in the ``TaskInvocationParameters`` structure. For information about how Systems Manager handles these options for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters .
- | |
<reponame>davidgcameron/arc
from __future__ import print_function
from __future__ import absolute_import
from .ControlCommon import *
import subprocess
import sys
import re
import pickle
import time
import pwd
import signal
try:
input = raw_input # Redefine for Python 2
except NameError:
pass
def complete_job_owner(prefix, parsed_args, **kwargs):
arcconf = get_parsed_arcconf(parsed_args.config)
return JobsControl(arcconf).complete_owner(parsed_args)
def complete_job_id(prefix, parsed_args, **kwargs):
arcconf = get_parsed_arcconf(parsed_args.config)
return JobsControl(arcconf).complete_job(parsed_args)
class JobsControl(ComponentControl):
def __init__(self, arcconfig):
self.logger = logging.getLogger('ARCCTL.Jobs')
self.control_dir = None
self.arcconfig = arcconfig
# arcctl is inside arex package as well as gm-jobs
self.gm_jobs = ARC_LIBEXEC_DIR + '/gm-jobs'
if not os.path.exists(self.gm_jobs):
self.logger.error('A-REX gm-jobs is not found at %s. It seams you A-REX install is broken.', self.gm_jobs)
sys.exit(1)
# config is mandatory
if arcconfig is None:
self.logger.error('Failed to get parsed arc.conf. Jobs control is not possible.')
sys.exit(1)
# controldir is mandatory
self.control_dir = self.arcconfig.get_value('controldir', 'arex').rstrip('/')
if self.control_dir is None:
self.logger.critical('Jobs control is not possible without controldir.')
sys.exit(1)
self.logger.debug('Using controldir location: %s', self.control_dir)
# construct the path to A-REX runtime configuration
# using configuration other that A-REX has is not consistent
arex_pidfile = self.arcconfig.get_value('pidfile', 'arex')
controldir_fallback = True
if arex_pidfile is not None:
arex_runconf = arex_pidfile.rsplit('.', 1)[0] + '.cfg'
if os.path.exists(arex_runconf):
self.logger.debug('Using A-REX runtime configuration (%s) for managing jobs.', arex_runconf)
controldir_fallback = False
self.gm_jobs += ' -c {0}'.format(arex_runconf)
if controldir_fallback:
self.logger.warning('A-REX runtime configuration is not found. Falling back to directly using '
'configured controldir at %s', self.control_dir)
self.gm_jobs += ' -d {0}'.format(self.control_dir)
self.cache_min_jobs = 1000
self.cache_ttl = 30
self.jobs = {}
self.process_job_log_file = False # dummy assignment for job_log follow
def __get_config_value(self, block, option, default_value=None):
value = self.arcconfig.get_value(option, block)
if value is None:
value = default_value
return value
def __run_gmjobs(self, args, stderr=False):
__GMJOBS = self.gm_jobs.split()
loglevel = logging.getLogger('ARCCTL').getEffectiveLevel()
__GMJOBS += ['-x', {50: 'FATAL', 40: 'ERROR', 30: 'WARNING', 20: 'INFO', 10: 'DEBUG'}[loglevel]]
__GMJOBS += args.split()
self.logger.debug('Running %s', ' '.join(__GMJOBS))
if stderr:
return subprocess.Popen(__GMJOBS, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return subprocess.Popen(__GMJOBS, stdout=subprocess.PIPE)
@staticmethod
def __xargs_jobs(joblist, cmdarg):
# leave enough headroom to not bump into MAX_ARG_STRLEN (131072)
_MAX_ARG_STRLEN_LIMIT = 100000
# loop over argument list and create argument string with proper length
argstring = ''
for arg in joblist:
argstring += ' ' + cmdarg + ' ' + arg['id']
if len(argstring) > _MAX_ARG_STRLEN_LIMIT:
yield argstring
argstring = ''
yield argstring
@staticmethod
def __list_job(job_dict, list_long=False):
if list_long:
print('{id} {state:>13}\t{name:<32}\t{userdn}'.format(**job_dict))
else:
print(job_dict['id'])
def __job_exists(self, jobid):
if jobid not in self.jobs:
self.logger.error('There is no such job %s', jobid)
sys.exit(1)
def __parse_job_attrs(self, jobid):
_KEYVALUE_RE = re.compile(r'^([^=]+)=(.*)$')
local_file = '{0}/job.{1}.local'.format(self.control_dir, jobid)
job_attrs = {}
if os.path.exists(local_file):
with open(local_file, 'r') as local_f:
for line in local_f:
kv = _KEYVALUE_RE.match(line)
if kv:
job_attrs[kv.group(1)] = kv.group(2)
job_attrs['mapped_account'] = pwd.getpwuid(os.stat(local_file).st_uid).pw_name
else:
self.__get_jobs()
self.__job_exists(jobid)
self.logger.error('Failed to open job attributes file: %s', local_file)
return job_attrs
def _service_log_print(self, log_path, jobid):
print('### ' + log_path + ':')
if os.path.exists(log_path):
with open(log_path, 'r') as log_f:
for line in log_f:
if jobid in line:
sys.stdout.write(line)
sys.stdout.flush()
else:
self.logger.error('Failed to open service log file: %s', log_path)
def __get_jobs(self):
# check cache first
__cache_file = '/tmp/.arcctl.jobs.cache'
if os.path.exists(__cache_file):
time_allowed = time.time() - self.cache_ttl
if os.stat(__cache_file).st_mtime > time_allowed:
with open(__cache_file, 'rb') as cfd:
self.logger.debug('Using cached jobs information (cache valid for %s seconds)', self.cache_ttl)
self.jobs = pickle.load(cfd)
return
# invoke gm-jobs and parse the list of jobs
self.jobs = {}
__JOB_RE = re.compile(r'^Job:\s*(.*)\s*$')
__JOB_ATTRS = {
'state': re.compile(r'^\s*State:\s*(.*)\s*$'),
'modified': re.compile(r'^\s*Modified:\s*(.*)\s*$'),
'userdn': re.compile(r'^\s*User:\s*(.*)\s*$'),
'lrmsid': re.compile(r'^\s*LRMS id:\s*(.*)\s*$'),
'name': re.compile(r'^\s*Name:\s*(.*)\s*$'),
'from': re.compile(r'^\s*From:\s*(.*)\s*$'),
}
gmjobs_out = self.__run_gmjobs('--longlist --notshowstates')
job_dict = {}
for line in iter(gmjobs_out.stdout.readline, ''):
jobre = __JOB_RE.match(line)
if jobre:
if job_dict:
for attr in __JOB_ATTRS:
if attr not in job_dict:
job_dict[attr] = 'N/A'
self.jobs[job_dict['id']] = job_dict
job_dict = {'id': jobre.group(1)}
continue
for attr, regex in __JOB_ATTRS.items():
attr_re = regex.match(line)
if attr_re:
job_dict[attr] = attr_re.group(1)
if job_dict:
for attr in __JOB_ATTRS:
if attr not in job_dict:
job_dict[attr] = 'N/A'
self.jobs[job_dict['id']] = job_dict
# dump jobs dictionary to cache in case there are many jobs
if len(self.jobs) > self.cache_min_jobs:
with open(__cache_file, 'wb') as cfd:
self.logger.debug('Dumping jobs information to cache')
pickle.dump(self.jobs, cfd)
def __filtered_jobs(self, args):
for job_d in self.jobs.values():
if hasattr(args, 'state'):
if args.state:
if job_d['state'] not in args.state:
continue
if hasattr(args, 'owner'):
if args.owner:
if job_d['userdn'] not in args.owner:
continue
yield job_d
def list(self, args):
self.__get_jobs()
for job_d in self.__filtered_jobs(args):
self.__list_job(job_d, args.long)
def kill_or_clean(self, args, action='-k'):
self.__get_jobs()
for jobid in args.jobid:
self.__job_exists(jobid)
__JOB_RE = re.compile(r'^Job:\s*')
gmjobs_out = self.__run_gmjobs('-J -S ' + action + ' ' + ' '.join(args.jobid))
for line in iter(gmjobs_out.stdout.readline, ''):
if __JOB_RE.match(line):
sys.stdout.write(line)
sys.stdout.flush()
def kill_or_clean_all(self, args, action='-k'):
# safe check when killing/cleaning all jobs
if not args.owner and not args.state:
reply = str(input('You have not specified any filters and operation will continue for ALL A-REX jobs. '
'Please type "yes" if it is desired behaviour: '))
if reply != 'yes':
sys.exit(0)
self.__get_jobs()
__JOB_RE = re.compile(r'^Job:\s*')
for argstr in self.__xargs_jobs(self.__filtered_jobs(args), action):
gmjobs_out = self.__run_gmjobs('-J -S' + argstr)
for line in iter(gmjobs_out.stdout.readline, ''):
if __JOB_RE.match(line):
sys.stdout.write(line)
sys.stdout.flush()
def job_log_signal_handler(self, signum, frame):
self.process_job_log_file = False
def job_log(self, args):
error_log = '{0}/job.{1}.errors'.format(self.control_dir, args.jobid)
self.process_job_log_file = True
if os.path.exists(error_log):
el_f = open(error_log, 'r')
print_line = True
pos = 0
if args.follow:
signal.signal(signal.SIGINT, self.job_log_signal_handler)
while self.process_job_log_file:
el_f.seek(pos)
for line in el_f:
if line.startswith('----- starting submit'):
print_line = args.lrms
if line.startswith('----- exiting submit'):
print_line = True
if not args.lrms:
continue
if print_line:
sys.stdout.write(line)
sys.stdout.flush()
pos = el_f.tell()
if not args.follow:
self.process_job_log_file = False
else:
time.sleep(0.1)
el_f.close()
else:
self.__get_jobs()
self.__job_exists(args.jobid)
self.logger.error('Failed to find job log file: %s', error_log)
def job_service_logs(self, args):
# A-REX main log
arex_log = self.__get_config_value('arex', 'logfile', '/var/log/arc/arex.log')
self._service_log_print(arex_log, args.jobid)
# WS interface logs
if self.arcconfig.check_blocks('arex/ws'):
arexws_log = self.__get_config_value('arex/ws', 'logfile', '/var/log/arc/ws-interface.log')
self._service_log_print(arexws_log, args.jobid)
# GridFTP interface logs
if self.arcconfig.check_blocks('gridftpd'):
gridftpd_log = self.__get_config_value('gridftpd', 'logfile', '/var/log/arc/gridftpd.log')
self._service_log_print(gridftpd_log, args.jobid)
# A-REX jobs log
arexjob_log = self.__get_config_value('a-rex', 'joblog')
if arexjob_log is not None:
self._service_log_print(arexjob_log, args.jobid)
def jobinfo(self, args):
self.__get_jobs()
self.__job_exists(args.jobid)
print('Name\t\t: {name}\n' \
'Owner\t\t: {userdn}\n' \
'State\t\t: {state}\n' \
'LRMS ID\t\t: {lrmsid}\n' \
'Modified\t: {modified}'.format(**self.jobs[args.jobid]))
def job_getattr(self, args):
job_attrs = self.__parse_job_attrs(args.jobid)
if args.attr:
if args.attr in job_attrs:
print(job_attrs[args.attr])
else:
self.logger.error('There is no such attribute \'%s\' defined for job %s', args.attr, args.jobid)
else:
for k, v in job_attrs.items():
print('{0:<32}: {1}'.format(k, v))
def job_stats(self, args):
__RE_JOBSTATES = re.compile(r'^\s*([A-Z]+):\s+([0-9]+)\s+\(([0-9]+)\)\s*$')
__RE_TOTALSTATS = re.compile(r'^\s*([A-Za-z]+):\s+([0-9]+)/([-0-9]+)\s*$')
__RE_DATASTATS = re.compile(r'^\s*Processing:\s+([0-9]+)\+([0-9]+)\s*$')
jobstates = []
totalstats = []
data_download = 0
data_upload = 0
# collect information from gm-jobs
gmjobs_out = self.__run_gmjobs('-J')
for line in iter(gmjobs_out.stdout.readline, ''):
js_re = __RE_JOBSTATES.match(line)
if js_re:
state = js_re.group(1)
jobstates.append({'processing': js_re.group(2), 'waiting': js_re.group(3), 'state': state})
continue
t_re = __RE_TOTALSTATS.match(line)
if t_re:
limit = t_re.group(3)
if limit == '-1':
limit = 'unlimited'
totalstats.append({'jobs': t_re.group(2), 'limit': limit, 'state': t_re.group(1)})
continue
ds_re = __RE_DATASTATS.match(line)
if ds_re:
data_download = ds_re.group(1)
data_upload = ds_re.group(2)
# show total stats if requested
if args.total:
for t in totalstats:
if args.long:
if t['state'] == 'Accepted':
t['state'] = 'Total number of jobs accepted for further processing by A-REX'
elif t['state'] == 'Running':
t['state'] = 'Total number of jobs running in LRMS backend'
elif t['state'] == 'Total':
t['state'] = 'Total number of jobs managed by A-REX (including completed)'
print('{state}\n Jobs: {jobs:>15}\n Limit: {limit:>15}'.format(**t))
else:
print('{state:>11}: {jobs:>8} of {limit}'.format(**t))
# show datastaging stats
elif args.data_staging:
if args.long:
print('Processing jobs in data-staging:')
print(' Downloading: {0:>9}'.format(data_download))
print(' Uploading: {0:>9}'.format(data_upload))
# add detailed stats from gm-jobs on long output
gmjobs_out = self.__run_gmjobs('-s')
for line in iter(gmjobs_out.stdout.readline, ''):
print(line, end='')
else:
print('{0:>11}: {1:>8}'.format('Downloading', data_download))
print('{0:>11}: {1:>8}'.format('Uploading', data_upload))
# show general stats per-state by default
else:
for s in jobstates:
if args.long:
print('{state}\n Processing: {processing:>10}\n Waiting: {waiting:>10}'.format(**s))
else:
print('{state:>11}: {processing:>8} ({waiting})'.format(**s))
def control(self, args):
self.cache_ttl = args.cachettl
if args.action == 'list':
self.list(args)
elif args.action == 'killall':
self.kill_or_clean_all(args, '-k')
elif args.action == 'cleanall':
self.kill_or_clean_all(args, '-r')
elif args.action == 'kill':
self.kill_or_clean(args, '-k')
elif args.action == 'clean':
self.kill_or_clean(args, '-r')
elif args.action == 'info':
self.jobinfo(args)
elif args.action == 'log':
if args.service:
self.job_service_logs(args)
else:
self.job_log(args)
elif args.action == 'attr':
self.job_getattr(args)
elif args.action == 'stats':
self.job_stats(args)
def complete_owner(self, args):
owners = []
self.__get_jobs()
for job_d in self.__filtered_jobs(args):
if job_d['userdn'] not in owners:
owners.append(job_d['userdn'])
return owners
def complete_job(self, args):
self.__get_jobs()
joblist = []
for job_d in self.__filtered_jobs(args):
joblist.append(job_d['id'])
return joblist
| |
"""
Coupling
********
Functions to estimate coupling from twiss dataframes and different methods to calculate the closest tune
approach from the calculated coupling RDTs.
"""
import logging
from contextlib import suppress
from typing import Sequence, Tuple
import numpy as np
from pandas import DataFrame, Series
from tfs import TfsDataFrame
from optics_functions.constants import (ALPHA, BETA, GAMMA, X, Y, TUNE, DELTA,
MINIMUM, PI2, PHASE_ADV, S, LENGTH,
IMAG, REAL, F1010, F1001)
from optics_functions.rdt import calculate_rdts
from optics_functions.utils import split_complex_columns, timeit
COUPLING_RDTS = [F1001, F1010]
LOG = logging.getLogger(__name__)
# Coupling ---------------------------------------------------------------------
def coupling_via_rdts(df: TfsDataFrame, complex_columns: bool = True, **kwargs) -> TfsDataFrame:
"""Returns the coupling terms.
.. warning::
This function changes sign of the real part of the RDTs compared to
[FranchiAnalyticFormulas2017]_ to be consistent with the RDT
calculations from [CalagaBetatronCoupling2005]_ .
Args:
df (TfsDataFrame): Twiss Dataframe.
complex_columns (bool): Output complex values in single column of type complex.
If ``False``, split complex columns into
two real-valued columns.
Keyword Args:
**kwargs: Remaining arguments from :func:`~optics_functions.rdt.rdts`
i.e. ``qx``, ``qy``, ``feeddown``, ``loop_phases``
and ``hamiltionian_terms``.
Returns:
A new ``TfsDataFrame`` with Coupling Columns.
"""
df_res = calculate_rdts(df, rdts=COUPLING_RDTS, **kwargs)
for rdt in COUPLING_RDTS:
rdt_array = df_res[rdt].to_numpy() # might return a copy!
rdt_array.real *= -1 # definition
df_res.loc[:, rdt] = rdt_array
if not complex_columns:
df_res = split_complex_columns(df_res, COUPLING_RDTS)
return df_res
def coupling_via_cmatrix(df: DataFrame, complex_columns: bool = True,
output: Sequence[str] = ("rdts", "gamma", "cmatrix")) -> DataFrame:
"""Calculates C matrix then Coupling and Gamma from it.
See [CalagaBetatronCoupling2005]_ .
Args:
df (DataFrame): Twiss Dataframe
complex_columns (bool): Output complex values in single column of type complex.
If ``False``, split complex columns into two
real-valued columns.
output (Sequence[str]): Combination of 'rdts', 'gamma' and 'cmatrix'.
Specifies which parameters one wants to output.
Returns:
New TfsDataFrame with columns as specified in 'output'.
"""
LOG.info("Calculating coupling from c-matrix.")
df_res = DataFrame(index=df.index)
with timeit("CMatrix calculation", print_fun=LOG.debug):
n = len(df)
gx, r, inv_gy = np.zeros((n, 2, 2)), np.zeros((n, 2, 2)), np.zeros((n, 2, 2))
# Eq. (16) C = 1 / (1 + |R|) * -J R J
# rs form after -J R^T J
r[:, 0, 0] = df["R22"]
r[:, 0, 1] = -df["R12"]
r[:, 1, 0] = -df["R21"]
r[:, 1, 1] = df["R11"]
r *= 1 / np.sqrt(1 + np.linalg.det(r)[:, None, None])
# Cbar = Gx * C * Gy^-1, Eq. (5)
sqrt_betax = np.sqrt(df[f"{BETA}{X}"])
sqrt_betay = np.sqrt(df[f"{BETA}{Y}"])
gx[:, 0, 0] = 1 / sqrt_betax
gx[:, 1, 0] = df[f"{ALPHA}{X}"] * gx[:, 0, 0]
gx[:, 1, 1] = sqrt_betax
inv_gy[:, 1, 1] = 1 / sqrt_betay
inv_gy[:, 1, 0] = -df[f"{ALPHA}{Y}"] * inv_gy[:, 1, 1]
inv_gy[:, 0, 0] = sqrt_betay
c = np.matmul(gx, np.matmul(r, inv_gy))
gamma = np.sqrt(1 - np.linalg.det(c))
if "rdts" in output:
# Eq. (9) and Eq. (10)
denom = 1 / (4 * gamma)
df_res.loc[:, F1001] = denom * (+c[:, 0, 1] - c[:, 1, 0] + (c[:, 0, 0] + c[:, 1, 1]) * 1j)
df_res.loc[:, F1010] = denom * (-c[:, 0, 1] - c[:, 1, 0] + (c[:, 0, 0] - c[:, 1, 1]) * 1j)
LOG.info(f"Average coupling amplitude |F1001|: {df_res[F1001].abs().mean():g}")
LOG.info(f"Average coupling amplitude |F1010|: {df_res[F1010].abs().mean():g}")
if not complex_columns:
df_res = split_complex_columns(df_res, COUPLING_RDTS)
if "cmatrix" in output:
df_res["C11"] = c[:, 0, 0]
df_res["C12"] = c[:, 0, 1]
df_res["C21"] = c[:, 1, 0]
df_res["C22"] = c[:, 1, 1]
if "gamma" in output:
df_res.loc[:, GAMMA] = gamma
LOG.debug(f"Average gamma: {df_res[GAMMA].mean():g}")
return df_res
# R-Matrix ---------------------------------------------------------------------
def rmatrix_from_coupling(df: DataFrame, complex_columns: bool = True) -> DataFrame:
"""Calculates the R-matrix from a DataFrame containing the coupling columns
as well as alpha and beta columns. This is the inverse of
:func:`optics_functions.coupling.coupling_via_cmatrix`.
See [CalagaBetatronCoupling2005]_ .
Args:
df (DataFrame): Twiss Dataframe.
complex_columns (bool): Tells the function if the coupling input columns
are complex-valued or split into real and
imaginary parts.
Returns:
A new ``DataFrame`` containing the R-columns.
"""
LOG.info("Calculating r-matrix from coupling rdts.")
df_res = DataFrame(index=df.index)
with timeit("R-Matrix calculation", print_fun=LOG.debug):
if complex_columns:
df = split_complex_columns(df, COUPLING_RDTS, drop=False)
n = len(df)
# From Eq. (5) in reference:
inv_gx, jcj, gy = np.zeros((n, 2, 2)), np.zeros((n, 2, 2)), np.zeros((n, 2, 2))
sqrt_betax = np.sqrt(df[f"{BETA}{X}"])
sqrt_betay = np.sqrt(df[f"{BETA}{Y}"])
inv_gx[:, 1, 1] = 1 / sqrt_betax
inv_gx[:, 1, 0] = -df[f"{ALPHA}{X}"] * inv_gx[:, 1, 1]
inv_gx[:, 0, 0] = sqrt_betax
gy[:, 0, 0] = 1 / sqrt_betay
gy[:, 1, 0] = df[f"{ALPHA}{Y}"] * gy[:, 0, 0]
gy[:, 1, 1] = sqrt_betay
# Eq. (15)
if complex_columns:
abs_squared_diff = df[F1001].abs()**2 - df[F1010].abs()**2
else:
abs_squared_diff = (df[f"{F1001}{REAL}"]**2 + df[f"{F1001}{IMAG}"]**2 -
df[f"{F1010}{REAL}"]**2 - df[f"{F1010}{IMAG}"]**2)
gamma = np.sqrt(1.0 / (1.0 + 4.0 * abs_squared_diff))
# Eq. (11) and Eq. (12)
cbar = np.zeros((n, 2, 2))
cbar[:, 0, 0] = (df[f"{F1001}{IMAG}"] + df[f"{F1010}{IMAG}"]).to_numpy()
cbar[:, 0, 1] = -(df[f"{F1010}{REAL}"] - df[f"{F1001}{REAL}"]).to_numpy()
cbar[:, 1, 0] = -(df[f"{F1010}{REAL}"] + df[f"{F1001}{REAL}"]).to_numpy()
cbar[:, 1, 1] = (df[f"{F1001}{IMAG}"] - df[f"{F1010}{IMAG}"]).to_numpy()
cbar = 2 * gamma.to_numpy()[:, None, None] * cbar
# Gx^-1 * Cbar * Gy = C (Eq. (5) inverted)
c = np.matmul(inv_gx, np.matmul(cbar, gy))
# from above: -J R^T J == inv(R)*det|R| == C
# therefore -J C^T J = R
jcj[:, 0, 0] = c[:, 1, 1]
jcj[:, 0, 1] = -c[:, 0, 1]
jcj[:, 1, 0] = -c[:, 1, 0]
jcj[:, 1, 1] = c[:, 0, 0]
rmat = jcj * np.sqrt(1 / (1 - np.linalg.det(jcj))[:, None, None])
df_res["R11"] = rmat[:, 0, 0]
df_res["R12"] = rmat[:, 0, 1]
df_res["R21"] = rmat[:, 1, 0]
df_res["R22"] = rmat[:, 1, 1]
return df_res
# Closest Tune Approach --------------------------------------------------------
def closest_tune_approach(
df: TfsDataFrame, qx: float = None, qy: float = None, method: str = "teapot"
) -> TfsDataFrame:
"""Calculates the closest tune approach from coupling resonances.
A complex F1001 column is assumed to be present in the DataFrame.
This can be calculated by :func:`~optics_functions.rdt.rdts`
:func:`~optics_functions.coupling.coupling_from_rdts` or
:func:`~optics_functions.coupling.coupling_from_cmatrix`.
If F1010 is also present it is used, otherwise it is assumed 0.
The closest tune approach is calculated by means of Eq. (27) in
[CalagaBetatronCoupling2005]_ (method="teapot" or "calaga") by default,
or approximated by
Eq. (1) in [PerssonImprovedControlCoupling2014]_ (method="franchi"),
Eq. (27) in [CalagaBetatronCoupling2005]_ with the Franchi appoximation (method="teapot_franchi"),
Eq. (2) in [PerssonImprovedControlCoupling2014]_ (method="persson"),
the latter without the exp(i(Qx-Qy)s/R) term (method="persson_alt"),
Eq. (14) in [HoydalsvikEvaluationOfTheClosestTuneApproach2021]_ (method="hoydalsvik"),
or the latter without the exp(i(Qx-Qy)s/R) term (method="hoydalsvik_alt").
For "persson[_alt]" and "hoydalsvik[_alt]" methods, also MUX and MUY columns
are needed in the DataFrame as well as LENGTH (of the machine) and S column
for the "persson" and "hoydalsvik" methods.
Args:
df (TfsDataFrame): Twiss Dataframe, needs to have complex-valued F1001 column.
qx (float): Tune in X-Plane (if not given, header df.Q1 is assumed present).
qy (float): Tune in Y-Plane (if not given, header df.Q2 is assumed present).
method (str): Which method to use for evaluation.
Choices: "calaga", "teapot", "franchi", "teapot_franchi",
"persson", "persson_alt", "hoydalsvik" or "hoydalsvik_alt".
Returns:
A new ``TfsDataFrame`` with a closest tune approach (DELTAQMIN) column.
The value is real for "calaga", "teapot", "teapot_franchi" and "franchi"
methods. The actual closest tune approach value is the absolute value
of the mean of this column.
"""
if F1001 not in df.columns:
raise KeyError(f"'{F1001}' column not in dataframe. Needed to calculated closest tune approach.")
method_map = {
"teapot": _cta_teapot, # as named in [HoydalsvikEvaluationOfTheClosestTuneApproach2021]_
"calaga": _cta_teapot, # for compatibility reasons
"teapot_franchi": _cta_teapot_franchi,
"franchi": _cta_franchi,
"persson": _cta_persson,
"persson_alt": _cta_persson_alt,
"hoydalsvik": _cta_hoydalsvik,
"hoydalsvik_alt": _cta_hoydalsvik_alt,
}
if qx is None:
qx = df.headers[f"{TUNE}1"]
if qy is None:
qy = df.headers[f"{TUNE}2"]
qx_frac, qy_frac = qx % 1, qy % 1
check_resonance_relation(df)
dqmin_str = f"{DELTA}{TUNE}{MINIMUM}"
df_res = TfsDataFrame(index=df.index, columns=[dqmin_str])
df_res[dqmin_str] = method_map[method.lower()](df, qx_frac, qy_frac)
LOG.info(f"({method.lower()}) |C-| = {np.abs(df_res[dqmin_str].dropna().mean())}")
return df_res
def _cta_franchi(df: TfsDataFrame, qx_frac: float, qy_frac: float) -> Series:
""" Closest tune approach calculated by Eq. (1) in [PerssonImprovedControlCoupling2014]_ . """
return 4 * (qx_frac - qy_frac) * df[F1001].abs()
def _cta_persson_alt(df: TfsDataFrame, qx_frac: float, qy_frac: float) -> Series:
"""Closest tune approach calculated by Eq. (2) in [PerssonImprovedControlCoupling2014]_ .
The exp(i(Qx-Qy)s/R) term is omitted.
"""
deltaq = qx_frac - qy_frac # fractional tune split
length_weights = _get_weights_from_lengths(df)
phase_diff = df[f"{PHASE_ADV}{X}"] - df[f"{PHASE_ADV}{Y}"]
return 4 * deltaq * length_weights * df[F1001] * np.exp(-1j * PI2 * phase_diff)
def _cta_persson(df: TfsDataFrame, qx_frac: float, qy_frac: float) | |
`{name}` is not valid")
if name in main_args:
Manager.exit(1, f"[{error_msg_name_part}] Configuration name `{name}` cannot be changed")
if not value:
removed_args.add(name)
elif ELIF_THEN_RE.search(name) or VAR_RE.search(value):
# if the value contains a variable, we only check the name
# same if it's a "elif" or "then" because it depends on the other configuration options
if name not in entity.allowed_args:
if "." not in name:
if isinstance(entity, KeyEvent) and cls.event_set_var_re.match(name):
pass
else:
Manager.exit(1, f"[{error_msg_name_part}] Configuration name `{name}` is not valid")
else:
main_name = name.split(".", 1)[0]
if main_name not in entity.allowed_partial_args or not entity.allowed_partial_args[
main_name
].match(name):
Manager.exit(1, f"[{error_msg_name_part}] Configuration name `{name}` is not valid")
entity.save_raw_arg(name, value, args)
else:
# check name and value
filename = base_filename + f";{name}={value}"
parsed_args = entity.raw_parse_filename(filename, False, entity.path.parent, {}).args
if parsed_args and (name in parsed_args or "VAR" in parsed_args):
entity.save_raw_arg(name, value, args)
else:
Manager.exit(1, f"[{error_msg_name_part}] Configuration `{name} {value}` is not valid")
return args, removed_args
@classmethod
def update_filename(cls, entity, names_and_values, main_override=None, error_msg_name_part=None):
parts = entity.path.name.split(";")
main_part = parts.pop(0)
main = entity.main_part_re.match(main_part).groupdict()
if main_override:
main |= main_override
main_part = entity.compose_main_part(main)
updated_args, removed_args = cls.validate_names_and_values(
entity, main, names_and_values, error_msg_name_part or entity
)
filename = entity.make_new_filename(updated_args, removed_args)
try:
# ensure the new filename is valid, but only if it does not have vars
if (
VAR_PREFIX not in filename
and not entity.parse_filename(filename, False, entity.parent, entity.get_available_vars()).main
):
raise ValueError
except Exception:
Manager.exit(1, f"[{error_msg_name_part}] Configuration is not valid")
return filename
@classmethod
def rename_entity(cls, entity, new_filename=None, new_path=None, dry_run=False):
return entity.rename(new_filename, new_path, check_only=dry_run)
@classmethod
def delete_entity(cls, entity, dry_run=False):
if not dry_run and entity.path.exists():
if entity.is_dir and not entity.path.is_symlink():
shutil.rmtree(entity.path)
else:
entity.path.unlink()
return entity.path
@classmethod
def check_new_path(cls, path, is_dir, error_msg_name_part):
if path.exists():
Manager.exit(
1,
f'[{error_msg_name_part}] Cannot create {"directory" if is_dir else "file"} "{path}" because it already exists',
)
@classmethod
def create_entity(
cls, entity_class, parent, identifier, main_args, names_and_values, link, error_msg_name_part, dry_run=False
):
entity = entity_class.create_basic(parent, main_args, identifier)
filename = cls.update_filename(entity, names_and_values, None, error_msg_name_part)
path = parent.path / filename
cls.check_new_path(path, entity_class.is_dir, error_msg_name_part)
if not dry_run:
if entity_class.is_dir:
path.mkdir()
elif link:
path.symlink_to(link)
else:
path.touch()
return path
@classmethod
def copy_entity(cls, entity, parent, main_override, names_and_values, error_msg_name_part, dry_run=False):
filename = cls.update_filename(entity, names_and_values, main_override, error_msg_name_part)
path = parent.path / filename
cls.check_new_path(path, entity.is_dir, error_msg_name_part)
if not dry_run:
if entity.is_dir:
shutil.copytree(entity.path, path, symlinks=True)
else:
shutil.copy2(entity.path, path, follow_symlinks=False)
return path
@classmethod
def move_entity(cls, entity, parent, main_override, names_and_values, error_msg_name_part, dry_run=False):
filename = cls.update_filename(entity, names_and_values, main_override, error_msg_name_part)
path = parent.path / filename
cls.check_new_path(path, entity.is_dir, error_msg_name_part)
if not dry_run:
cls.rename_entity(entity, new_path=path)
return path
@classmethod
def validate_number_expression(cls, ctx, param, value):
if not value:
return "first", None, None, value
try:
value = int(value)
if value <= 0:
raise click.BadParameter(f"{value} is not a positive integer.")
return "exact", int(value), None, value
except ValueError:
pass
for r in cls.validate_number_expression_regexs.values():
if match := r.match(value):
parts = match.groupdict()
return (
"random" if "random" in parts else "first",
int(parts["low"]) if "low" in parts else None,
int(parts["high"]) if "high" in parts else None,
value,
)
raise click.BadParameter(
f'{value} is not a positive integer or one of these expression: "", "NUMBER+", "NUMBER+NUMBER", "?", "NUMBER?", "?NUMBER" or "NUMBER?NUMBER"'
)
validate_number_expression_regexs = {
"first_after": re.compile(r"^(?P<low>\d+)(?P<first>\+)$"),
"first_between": re.compile(r"^(?P<low>\d+)(?P<first>\+)(?P<high>\d+)$"),
"random": re.compile(r"^(?P<random>\?)$"),
"ramdom_after": re.compile(r"^(?P<low>\d+)(?P<random>\?)$"),
"random_between": re.compile(r"^(?P<low>\d+)(?P<random>\?)(?P<high>\d+)$"),
"ramdom_before": re.compile(r"^(?P<random>\?)(?P<high>\d+)$"),
}
@staticmethod
def get_one_number(used, mode, low, high, min_low, max_high):
if low is None:
low = min_low
if high is None:
high = max_high
if mode == "exact":
# no constraints when using exact mode
return low
if low < min_low or high > max_high:
return None
if mode == "first":
for number in sorted(used):
if number > low:
return number if number < high else None
elif mode == "random":
inc_low, inc_high = low + 1, high - 1
used = set(number for number in used if number > low and number < high)
# ensure we have at least one possible before launching our while loop
if len(used) < high - low - 1:
while True:
if (number := randint(inc_low, inc_high)) not in used:
return number
return None
@classmethod
def get_one_page(cls, deck, mode, low, high, original):
if number := cls.get_one_number(
(number for number, page in deck.pages.items() if page and page.is_renderable()),
mode,
low,
high,
0,
100000,
):
return number
Manager.exit(1, f'Cannot find an available page matching "{original}"')
@classmethod
def validate_key_expression(cls, ctx, param, value):
if not value:
return None
if value in ("?", "+"):
return value
if cls.validate_key_regex.match(value):
return tuple(map(int, value.split(",")))
raise click.BadParameter(f'{value} is not in the format "row,col", or one of "+" or "?"')
validate_key_regex = re.compile(r"^\d+,\d+$")
@classmethod
def get_one_key(cls, page, key):
if not key:
return None
if isinstance(key, str):
used_keys = set(
row_col for row_col, page_key in page.keys.items() if page_key and page_key.is_renderable()
)
if len(used_keys) < page.deck.nb_cols * page.deck.nb_rows:
if key == "+": # get first available key
for row in range(1, page.deck.nb_rows + 1):
for col in range(1, page.deck.nb_cols + 1):
if (key := (row, col)) not in used_keys:
return key
if key == "?": # get random key
while True:
if (key := (randint(1, page.deck.nb_rows), randint(1, page.deck.nb_cols))) not in used_keys:
return key
else:
return key
Manager.exit(1, f'Cannot find an available key matching "{key}"')
@classmethod
def create_page(cls, deck, number, names_and_values, dry_run=False):
return cls.create_entity(
Page,
deck,
number,
{"page": number},
names_and_values,
None,
f"{deck}, NEW PAGE {number}",
dry_run=dry_run,
)
@classmethod
def copy_page(cls, page, to_number, names_and_values, dry_run=False):
return cls.copy_entity(
page,
page.deck,
{"page": to_number},
names_and_values,
f"{page.deck}, NEW PAGE {to_number}",
dry_run=dry_run,
)
@classmethod
def move_page(cls, page, to_number, names_and_values, dry_run=False):
return cls.move_entity(
page,
page.deck,
{"page": to_number},
names_and_values,
f"{page.deck}, NEW PAGE {to_number}",
dry_run=dry_run,
)
@classmethod
def create_key(cls, page, to_row, to_col, names_and_values, dry_run=False):
return cls.create_entity(
Key,
page,
(to_row, to_col),
{"row": to_row, "col": to_col},
names_and_values,
None,
f"{page}, NEW KEY",
dry_run=dry_run,
)
@classmethod
def copy_key(cls, key, to_page, to_row, to_col, names_and_values, dry_run=False):
return cls.copy_entity(
key,
to_page,
{"row": to_row, "col": to_col},
names_and_values,
f"{to_page}, NEW KEY {key}",
dry_run=dry_run,
)
@classmethod
def move_key(cls, key, to_page, to_row, to_col, names_and_values, dry_run=False):
return cls.move_entity(
key,
to_page,
{"row": to_row, "col": to_col},
names_and_values,
f"{to_page}, NEW KEY {key}",
dry_run=dry_run,
)
@classmethod
def create_layer(cls, key, names_and_values, link, dry_run=False):
return cls.create_entity(
KeyImageLayer, key, -1, {}, names_and_values, link, f"{key}, NEW LAYER", dry_run=dry_run
)
@classmethod
def copy_layer(cls, layer, to_key, names_and_values, dry_run=False):
return cls.copy_entity(layer, to_key, {}, names_and_values, f"{to_key}, NEW LAYER", dry_run=dry_run)
@classmethod
def move_layer(cls, layer, to_key, names_and_values, dry_run=False):
return cls.move_entity(layer, to_key, {}, names_and_values, f"{to_key}, NEW LAYER", dry_run=dry_run)
@classmethod
def create_text_line(cls, key, names_and_values, link, dry_run=False):
return cls.create_entity(
KeyTextLine, key, -1, {}, names_and_values, link, f"{key}, NEW TEXT LINE", dry_run=dry_run
)
@classmethod
def copy_text_line(cls, text_line, to_key, names_and_values, dry_run=False):
return cls.copy_entity(text_line, to_key, {}, names_and_values, f"{to_key}, NEW TEXT LINE", dry_run=dry_run)
@classmethod
def move_text_line(cls, text_line, to_key, names_and_values, dry_run=False):
return cls.move_entity(text_line, to_key, {}, names_and_values, f"{to_key}, NEW TEXT LINE", dry_run=dry_run)
@classmethod
def get_entity_container(cls, directory, page_filter, key_filter, kind):
assert kind in ("event", "var")
filtering = {
"page_filter": FILTER_DENY if page_filter is None else None,
"key_filter": FILTER_DENY if (key_filter is None or page_filter is None) else None,
"layer_filter": FILTER_DENY,
"text_line_filter": FILTER_DENY,
}
# we never set FILTER_DENY for `var_filter` because every kind of entity may need a var
if kind == "var":
filtering["event_filter"] = FILTER_DENY
deck = FC.get_deck(directory, **filtering)
if page_filter is None:
return deck
page = FC.find_page(deck, page_filter)
return page if key_filter is None else FC.find_key(page, key_filter)
@classmethod
def create_event(cls, container, kind, names_and_values, link, dry_run=False):
return cls.create_entity(
container.event_class,
container,
kind,
{"kind": kind},
names_and_values,
link,
f"{container}, NEW EVENT {kind}",
dry_run=dry_run,
)
@classmethod
def copy_event(cls, event, to_parent, to_kind, names_and_values, dry_run=False):
return cls.copy_entity(
event,
to_parent,
{"kind": to_kind},
names_and_values,
f"{to_parent}, NEW EVENT {to_kind}",
dry_run=dry_run,
)
@classmethod
def move_event(cls, event, to_parent, to_kind, names_and_values, dry_run=False):
return cls.move_entity(
event,
to_parent,
{"kind": to_kind},
names_and_values,
f"{to_parent}, NEW EVENT {to_kind}",
dry_run=dry_run,
)
@classmethod
def create_var(cls, container, name, names_and_values, link, dry_run=False):
return cls.create_entity(
container.var_class,
container,
name,
{"name": name},
names_and_values,
link,
f"{container}, NEW VAR {name}",
dry_run=dry_run,
)
@classmethod
def copy_var(cls, var, to_parent, to_name, names_and_values, dry_run=False):
return cls.copy_entity(
var,
to_parent,
{"name": to_name},
names_and_values,
f"{to_parent}, NEW VAR {to_name}",
dry_run=dry_run,
)
@classmethod
def move_var(cls, var, to_parent, to_name, names_and_values, dry_run=False):
return cls.move_entity(
var,
to_parent,
{"name": to_name},
names_and_values,
f"{to_parent}, NEW VAR {to_name}",
dry_run=dry_run,
)
@classmethod
def iter_content(cls, holder, entity_class, with_disabled):
entities = sorted(
getattr(holder, entity_class.parent_container_attr).items(),
key=entity_class.identifier_and_entity_sort_key,
)
for | |
is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above max(0.81, 1.5*0.81)
# CUDA 11.6 issue failure https://github.com/pytorch/pytorch/issues/75391
@skipCUDAIf(torch.version.cuda is not None
and torch.version.cuda.split(".") == ["11", "6"], "There's a bug in CUDA 11.6")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
# Trigger warning
torch.linalg.matrix_rank(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_matrix_rank(self, device, dtype):
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a).item(), 10)
self.assertEqual(torch.matrix_rank(a, True).item(), 10)
a[5, 5] = 0
self.assertEqual(torch.matrix_rank(a).item(), 9)
self.assertEqual(torch.matrix_rank(a, True).item(), 9)
a = torch.randn(24, 42, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))
aTa = torch.mm(a.conj().t(), a)
self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))
a = torch.randn(35, 75, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))
self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))
self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.
def test_chain_matmul(self, device, dtype):
# chain_matmul accepts a single input tensor while multi_dot does not
t = make_tensor((2, 2), dtype=dtype, device=device)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
# chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to
# be either 1D or 2D
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, dtype=dtype, device=device), make_tensor(1, dtype=dtype, device=device))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes):
tensors = [make_tensor(shape, dtype=dtype, device=device) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
# test for inputs with empty dimensions
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
# test variable output shapes
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
# test multiple input tensors
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
# test large tensors
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, dtype=dtype, device=device)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), dtype=dtype, device=device), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, dtype=torch.double, device=device)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, dtype=dtype, device="cpu")], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, dtype=dtype, device=device)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), dtype=dtype, device=device), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
# Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
# Check1: A = QR
self.assertEqual(A_, np.matmul(Q_, R_))
# Check2: A = QR (with out)
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
# Check3: Q == Q_out, R == R_out
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
# Check4: Q^{T}Q = I, triu(R) = R
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors
(3, 5), (5, 5), (5, 3), # Single matrix
(7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
#
# for mode='r' we need a special logic because numpy returns only r
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd_errors(self, device, dtype):
# torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but
# without 'q' you cannot compute the backward pass. Check that
# linalg_qr_backward complains cleanly in that case.
inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='r')
self.assertEqual(q.shape, (0,)) # empty tensor
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='r'"):
b.backward()
#
inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='complete')
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='complete' and nrows > ncols"):
b.backward()
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr. We need some special logic
because numpy does not support batched qr
"""
def np_qr_batched(a, mode):
"""poor's man batched version of np.linalg.qr"""
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
# for mode='r' we need a special logic because numpy returns only r
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, | |
by using deploy_id and its stage (hostStage = 0 ~ 8)
def get_hosts_by_deploy(request, name, stage, deploy_id):
hostStageString = request.GET.get('hostStage')
if hostStageString is None:
hostStage = TOTAL_ALIVE_HOST_REPORT
else:
hostStage = hostStageString
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
progress = deploys_helper.update_progress(request, name, stage)
agents_wrapper = agent_report.gen_agent_by_deploy(progress, deploy_id,
ALIVE_STAGE_HOST_REPORT, hostStage)
title = "All hosts with deploy " + deploy_id
return render(request, 'environs/env_hosts.html', {
"envs": envs,
"env": env,
"stages": stages,
"agents_wrapper": agents_wrapper,
"title": title,
})
# reset all failed hosts for this env, this deploy
def reset_failed_hosts(request, name, stage, deploy_id):
agents_helper.reset_failed_agents(request, name, stage, deploy_id)
return HttpResponse(json.dumps({'html': ''}), content_type="application/json")
# retry failed deploy stage for this env, this host
def reset_deploy(request, name, stage, host_id):
agents_helper.retry_deploy(request, name, stage, host_id)
return HttpResponse(json.dumps({'html': ''}), content_type="application/json")
# pause deploy for this this env, this host
def pause_deploy(request, name, stage, host_id):
agents_helper.pause_deploy(request, name, stage, host_id)
return HttpResponse(json.dumps({'html': ''}), content_type="application/json")
# resume deploy stage for this env, this host
def resume_deploy(request, name, stage, host_id):
agents_helper.resume_deploy(request, name, stage, host_id)
return HttpResponse(json.dumps({'html': ''}), content_type="application/json")
# pause hosts for this env and stage
def pause_hosts(request, name, stage):
post_params = request.POST
host_ids = None
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
environs_helper.pause_hosts(request, name, stage, host_ids)
return redirect('/env/{}/{}/'.format(name, stage))
# resume hosts for this env and stage
def resume_hosts(request, name, stage):
post_params = request.POST
host_ids = None
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
environs_helper.resume_hosts(request, name, stage, host_ids)
return redirect('/env/{}/{}/'.format(name, stage))
# reset hosts for this env and stage
def reset_hosts(request, name, stage):
post_params = request.POST
host_ids = None
if 'hostIds' in post_params:
hosts_str = post_params['hostIds']
host_ids = [x.strip() for x in hosts_str.split(',')]
environs_helper.reset_hosts(request, name, stage, host_ids)
return redirect('/env/{}/{}/hosts'.format(name, stage))
# get total unknown(unreachable) hosts
def get_unknown_hosts(request, name, stage):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
progress = deploys_helper.update_progress(request, name, stage)
agents_wrapper = agent_report.gen_agent_by_deploy(progress, env['deployId'],
UNKNOWN_HOST_REPORT)
title = "Unknow hosts"
return render(request, 'environs/env_hosts.html', {
"envs": envs,
"env": env,
"stages": stages,
"agents_wrapper": agents_wrapper,
"title": title,
})
# get provisioning hosts
def get_provisioning_hosts(request, name, stage):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
progress = deploys_helper.update_progress(request, name, stage)
agents_wrapper = agent_report.gen_agent_by_deploy(progress, env['deployId'],
PROVISION_HOST_REPORT)
title = "Provisioning hosts"
return render(request, 'environs/env_hosts.html', {
"envs": envs,
"env": env,
"stages": stages,
"agents_wrapper": agents_wrapper,
"title": title,
})
# get total (unknown+alive) hosts
def get_all_hosts(request, name, stage):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
progress = deploys_helper.update_progress(request, name, stage)
agents_wrapper = agent_report.gen_agent_by_deploy(progress, env['deployId'],
TOTAL_HOST_REPORT)
title = "All hosts"
return render(request, 'environs/env_hosts.html', {
"envs": envs,
"env": env,
"stages": stages,
"agents_wrapper": agents_wrapper,
"title": title,
})
# get failed (but alive) hosts (agent status > 0)
def get_failed_hosts(request, name, stage):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
progress = deploys_helper.update_progress(request, name, stage)
agents_wrapper = agent_report.gen_agent_by_deploy(progress, env['deployId'],
FAILED_HOST_REPORT)
failed_hosts = [agent['hostId'] for agent in agents_wrapper[env['deployId']]]
host_ids = ",".join(failed_hosts)
title = "Failed Hosts"
return render(request, 'environs/env_hosts.html', {
"envs": envs,
"env": env,
"stages": stages,
"agents_wrapper": agents_wrapper,
"title": title,
"is_retryable": True,
"host_ids": host_ids,
"pinterest": IS_PINTEREST,
})
def get_pred_deploys(request, name, stage):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
env = environs_helper.get_env_by_stage(request, name, stage)
env_promote = environs_helper.get_env_promotes_config(request, name, stage)
show_lock = False
predStage = env_promote.get('predStage')
if env_promote['type'] != "MANUAL" and predStage:
show_lock = True
current_startDate = 0
if not predStage or predStage == "BUILD":
deploys = []
else:
pred_env = environs_helper.get_env_by_stage(request, name, predStage)
result = deploys_helper.get_all(request, envId=[pred_env['id']], pageIndex=index,
pageSize=size)
deploys = result["deploys"]
if env.get('deployId'):
deploy = deploys_helper.get(request, env['deployId'])
build = builds_helper.get_build(request, deploy['buildId'])
current_startDate = build['publishDate']
deploy_wrappers = []
for deploy in deploys:
build = builds_helper.get_build(request, deploy['buildId'])
if build['publishDate'] > current_startDate:
deploy_wrapper = {}
deploy_wrapper['deploy'] = deploy
deploy_wrapper['build'] = build
deploy_wrappers.append(deploy_wrapper)
html = render_to_string('deploys/simple_pred_deploys.tmpl', {
"deploy_wrappers": deploy_wrappers,
"envName": name,
"stageName": predStage,
"show_lock": show_lock,
"current_startDate": current_startDate,
})
return HttpResponse(html)
def warn_for_deploy(request, name, stage, buildId):
""" Returns a warning message if:
1. The build has been tagged as build build
2. a build doesn't have a successful deploy on the preceding stage.
TODO: we would have call backend twice since the getAllDeploys call does not support filtering on multiple states;
Also, getAllDeploys return all deploys with commits after the specific commit, it would be good if there is options
to return the exact matched deploys.
"""
build_info = builds_helper.get_build_and_tag(request, buildId)
build = build_info["build"]
tag = build_info.get("tag")
if tag is not None and tag["value"] == tags_helper.TagValue.BAD_BUILD:
html = render_to_string('warn_deploy_bad_build.tmpl', {
'tag': tag,
})
return HttpResponse(html)
env_promote = environs_helper.get_env_promotes_config(request, name, stage)
pred_stage = env_promote.get('predStageName')
if not pred_stage or pred_stage == BUILD_STAGE:
return HttpResponse("")
pred_env = environs_helper.get_env_by_stage(request, name, pred_stage)
filter = {}
filter['envId'] = [pred_env['id']]
filter['commit'] = build['commit']
filter['repo'] = build['repo']
filter['oldestFirst'] = True
filter['deployState'] = "SUCCEEDING"
filter['pageIndex'] = 1
filter['pageSize'] = 1
result = deploys_helper.get_all(request, **filter)
succeeding_deploys = result['deploys']
if succeeding_deploys:
return HttpResponse("")
filter['deployState'] = "SUCCEEDED"
result = deploys_helper.get_all(request, **filter)
succeeded_deploys = result['deploys']
if succeeded_deploys:
return HttpResponse("")
html = render_to_string('warn_no_success_deploy_in_pred.tmpl', {
'envName': name,
'predStageName': pred_stage,
})
return HttpResponse(html)
def get_env_config_history(request, name, stage):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
env = environs_helper.get_env_by_stage(request, name, stage)
configs = environs_helper.get_config_history(request, name, stage, index, size)
for config in configs:
replaced_config = config["configChange"].replace(",", ", ").replace("#", "%23").replace("\"", "%22")\
.replace("{", "%7B").replace("}", "%7D").replace("_", "%5F")
config["replaced_config"] = replaced_config
return render(request, 'configs/config_history.html', {
"envName": name,
"stageName": stage,
"envId": env['id'],
"configs": configs,
"pageIndex": index,
"pageSize": DEFAULT_PAGE_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(configs) < DEFAULT_PAGE_SIZE,
})
def _parse_config_comparison(query_dict):
configs = {}
for key, value in query_dict.iteritems():
if key.startswith('chkbox_'):
id = key[len('chkbox_'):]
split_data = value.split('_')
config_change = split_data[1]
configs[id] = config_change
return configs
def get_config_comparison(request, name, stage):
configs = _parse_config_comparison(request.POST)
if len(configs) > 1:
ids = configs.keys()
change1 = configs[ids[0]]
change2 = configs[ids[1]]
return HttpResponse(json.dumps({'change1': change1, 'change2': change2}),
content_type="application/json")
return HttpResponse("", content_type="application/json")
def show_config_comparison(request, name, stage):
change1 = request.GET.get('change1')
change2 = request.GET.get('change2')
diff_res = GenerateDiff()
result = diff_res.diff_main(change1, change2)
diff_res.diff_cleanupSemantic(result)
old_change = diff_res.old_content(result)
new_change = diff_res.new_content(result)
return render(request, 'configs/env_config_comparison_result.html', {
"envName": name,
"stageName": stage,
"oldChange": old_change,
"newChange": new_change,
})
def get_deploy_schedule(request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
envs = environs_helper.get_all_env_stages(request, name)
schedule_id = env.get('scheduleId', None);
if schedule_id != None:
schedule = schedules_helper.get_schedule(request, name, stage, schedule_id)
else:
schedule = None
agent_number = agents_helper.get_agents_total_by_env(request, env["id"])
return render(request, 'deploys/deploy_schedule.html', {
"envs": envs,
"env": env,
"schedule": schedule,
"agent_number": agent_number,
})
class GenerateDiff(diff_match_patch):
def old_content(self, diffs):
html = []
for (flag, data) in diffs:
text = (data.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace("\n", "<br>")
.replace(",", ",<br>"))
if flag == self.DIFF_DELETE:
html.append("""<b style=\"background:#FFB5B5;
\">%s</b>""" % text)
elif flag == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def new_content(self, diffs):
html = []
for (flag, data) in diffs:
text = (data.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace("\n", "<br>")
.replace(",", ",<br>"))
if flag == self.DIFF_INSERT:
html.append("""<b style=\"background:#97f697;
\">%s</b>""" % text)
elif flag == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def get_new_commits(request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
current_deploy = deploys_helper.get(request, env['deployId'])
current_build = builds_helper.get_build(request, current_deploy['buildId'])
startSha = current_build['commit']
repo = current_build['repo']
scm_url = systems_helper.get_scm_url(request)
diffUrl = "%s/%s/compare/%s...%s" % (scm_url, repo, startSha, startSha)
last_deploy = common.get_last_completed_deploy(request, env)
if not last_deploy:
return render(request, 'deploys/deploy_commits.html', {
"env": env,
"title": "No previous deploy found!",
"startSha": startSha,
"endSha": startSha,
"repo": repo,
"diffUrl": diffUrl,
})
last_build = builds_helper.get_build(request, last_deploy['buildId'])
endSha = last_build['commit']
diffUrl = "%s/%s/compare/%s...%s" % (scm_url, repo, endSha, startSha)
return render(request, 'deploys/deploy_commits.html', {
"env": env,
"startSha": startSha,
"endSha": endSha,
"repo": repo,
"title": "Commits since last deploy",
"diffUrl": diffUrl,
})
def compare_deploys(request, name, stage):
start_deploy_id = request.GET.get('start_deploy', None)
start_deploy = deploys_helper.get(request, start_deploy_id)
start_build = builds_helper.get_build(request, start_deploy['buildId'])
startSha = start_build['commit']
repo = start_build['repo']
end_deploy_id = request.GET.get('end_deploy', None)
if end_deploy_id:
end_deploy = deploys_helper.get(request, end_deploy_id)
else:
env = environs_helper.get_env_by_stage(request, name, stage)
end_deploy = common.get_previous_deploy(request, env, start_deploy)
if not end_deploy:
end_deploy = start_deploy
end_build = builds_helper.get_build(request, end_deploy['buildId'])
endSha = end_build['commit']
commits, truncated, new_start_sha = common.get_commits_batch(request, repo, startSha,
endSha, keep_first=True)
html = render_to_string('builds/commits.tmpl', {
"commits": commits,
"start_sha": new_start_sha,
"end_sha": endSha,
"repo": repo,
"truncated": truncated,
"show_checkbox": False,
})
return HttpResponse(html)
def compare_deploys_2(request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
configs = {}
for key, value in request.GET.iteritems():
if key.startswith('chkbox_'):
index = key[len('chkbox_'):]
configs[index] = value
indexes = configs.keys()
start_build_id = configs[indexes[0]]
end_build_id = configs[indexes[1]]
if int(indexes[0]) > int(indexes[1]):
start_build_id = configs[indexes[1]]
end_build_id = configs[indexes[0]]
start_build = builds_helper.get_build(request, start_build_id)
| |
which="psf", index=None, **kwargs):
""" kwargs goes to get_keys()
Parameters
----------
which: [string] -optional-
Source of magnitude measurements
- 'psf'
- 'ap'
- 'apbig'
"""
extra = "g" if which != "psf" else "" # strange ipac structure
return self.get_keys(["jd",f"mag{which}",f"sigma{extra}{which}","fid"], index=index, **kwargs)
def get_reference_timerange(self):
""" """
# mean because unique sets as single element list
return self.data.groupby(["field","fid"])[["jdstartref","jdendref"]].mean()
def get_history_timerange(self, perband=True, groupby="field", **kwargs):
""" """
return self.get_keys(["jdstarthist","jdendhist"], perband=perband, groupby=groupby,
**{**{"usestat":"mean"},**kwargs})
# -------- #
# PLOTTER #
# -------- #
def show_lc(self, ax=None, which="psf", index=None):
""" """
import matplotlib.pyplot as mpl
from matplotlib import dates as mdates # fancy x-axis
if ax is None:
fig = mpl.figure(figsize=[7,4])
ax = fig.add_subplot(111)
else:
fig = ax.figure
# Data
extra = "g" if which != "psf" else "" # strange ipac structure
lc = self.get_lightcurve(which=which, index=index)
#
det_prop = dict(ls="None", marker="o", ms=8, ecolor="0.8", mec="0.8")
for filt_ in lc["fid"].unique():
data_ = lc[lc["fid"]==filt_]
date_ = time.Time(data_["jd"], format="jd").datetime
ax.errorbar(date_, data_[f"mag{which}"], yerr=data_[f"sigma{extra}{which}"],
color=ZTFCOLOR[FID_TO_NAME[filt_]], **det_prop)
# low mag means bright
ax.invert_yaxis()
# Fancy matplotlib dates
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
# Labels
ax.set_ylabel("magnitude", fontsize="large")
ax.set_xlabel("date", fontsize="large")
# ============== #
# Properties #
# ============== #
@property
def data(self):
""" """
if not hasattr(self,"_data"):
return None
return self._data
def has_data(self):
""" """
return self.data is not None
@property
def name(self):
""" short cut to self.obj_id"""
return self.obj_id
@property
def obj_id(self):
""" """
if "objid" in self.data:
return self.data.iloc[0]["objid"]
return None
class FritzAccess( object ):
def __init__(self, load_groups=False, **kwargs):
"""
"""
if load_groups:
self.load_groups(**kwargs)
# ============= #
# Method #
# ============= #
# --------- #
# I/O #
# --------- #
def store(self, use_id=False):
""" Store the individual samples
= calling the sample's FritzSample.store() =
Parameters
----------
use_id: [bool -optional-
to store the individual sample, should this use the current sample keys (use_id=True)
or the groupid of the sample (use_id=False)
Returns
-------
Void
"""
for id_, sample_ in self.samples.items():
groupname = None if not use_id else id_
sample_.store(groupname=groupname)
@classmethod
def load_local(cls, groupnames_or_id=None, force_dl=False, ignorenames_or_id=None,):
""" """
this = cls()
this.load_samples(local_only=True,
force_dl=force_dl,
groupnames_or_id=groupnames_or_id,
ignorenames_or_id=ignorenames_or_id)
return this
# --------- #
# LOADER #
# --------- #
def load_groups(self, force_dl=False, token=None):
""" which could be:
'user_groups', 'user_accessible_groups', 'all_groups'
"""
self._groups = FritzGroups.load(force_dl=force_dl, token=token, store=True)
def load_samples(self, groupnames_or_id=None, local_only=False, force_dl=False, ignorenames_or_id=None,
reset=False):
""" This fetchs the fritz sample and set them.
(see fetch_samples())
Parameters
----------
groupnames_or_id: [string/int (or list of)] -optional-
provide the list of group name or id you want.
If None, it will be the list of groups you have access to or the list of store local
(see local_only)
local_only: [bool] -optional-
Load only the groups locally stored.
Remark if groupnames_or_id is not None, it will be the groupnames_or_id locally stored.
force_dl: [bool] -optional-
Should the sample be updated while been loaded if they exist locally.
(FritzSample.from_group option)
ignorenames_or_id: [string/int (or list of)] -optional-
similar to groupnames_or_id but for the sample to be ignored.
for instance: ignorenames_or_id=["RCF Junk and Variables"]
Returns
-------
list of FritzSample
"""
samples = self.fetch_samples(groupnames_or_id=groupnames_or_id, local_only=local_only, force_dl=force_dl,
ignorenames_or_id=ignorenames_or_id)
for sample_ in samples:
self.set_sample(sample_)
def load_sources(self, client=None, nprocess=4, **kwargs):
""" """
return self._call_down_sample_("load_sources", isfunc=True, client=client, nprocess=nprocess, **kwargs)
def fetch_samples(self, groupnames_or_id=None, load_sources=False,
local_only=False, update_sources=False,
ignorenames_or_id=None,
force_dl=False, store=False,
client=None):
""" loads the individual samples using the FritzSample.from_group() class method.
Parameters
----------
groupnames_or_id: [string/int (or list of)] -optional-
provide the list of group name or id you want.
If None, it will be the list of groups you have access to or the list of store local
(see local_only)
local_only: [bool] -optional-
Load only the groups locally stored.
Remark if groupnames_or_id is not None, it will be the groupnames_or_id locally stored.
force_dl: [bool] -optional-
Should the sample be updated while been loaded if they exist locally.
(FritzSample.from_group option)
client: [dask client or None] -optional-
Use dask client to distribute the computations.
ignorenames_or_id: [string/int (or list of)] -optional-
similar to groupnames_or_id but for the sample to be ignored.
for instance: ignorenames_or_id=["RCF Junk and Variables"]
Returns
-------
list of FritzSample
"""
if groupnames_or_id is not None:
groupid = [g_ if str(g_).isdigit() else self.groupname_to_groupid(g_) for g_ in np.atleast_1d(groupnames_or_id)]
if local_only:
glob_groupid = ",".join(np.asarray(groupid, dtype="str"))
groupid = self.get_storedsamples(basename=f"fritz_sample_[{glob_groupid}]*", get_id=True)
elif local_only:
groupid = self.get_storedsamples(basename=f"fritz_sample_*", get_id=True)
else:
groupid = self.get_mygroups("id")
if ignorenames_or_id is not None:
ignoredgroupid = [g_ if str(g_).isdigit() else self.groupname_to_groupid(g_) for g_ in np.atleast_1d(ignorenames_or_id)]
groupid = [id_ for id_ in groupid if id_ not in ignoredgroupid]
from_group_prop = dict(force_dl=force_dl, store=store, load_sources=load_sources, update_sources=update_sources)
if client is not None:
from dask import delayed
d_fsample = [delayed(FritzSample.from_group)(id_, **from_group_prop) for id_ in groupid]
return client.compute(d_fsample)
return [FritzSample.from_group(id_, **from_group_prop) for id_ in groupid]
def fetch_data(self, fobject, names=None, store=True, force_dl=False,
client=None, nprocess=4, show_progress=False, gather=True, **kwargs):
""" uses bulk_download to download data using multiprocessing.
This will only download data you don't have stored already (except if force_dl=True)
Parameters
----------
fobject: [string]
What you want to download.
- "lightcurve" (or "photometry"), "spectra" (or "spectrum"), "alerts", or "sources"
names: [list of string] -optional-
list of names for which you want to download data.
uses self.names if names=None
force_dl: [bool] -optional-
Should this redownload existing data ?
nprocess: [int] -optional-
list of parallel download processes.
client: [dask client or None] -optional-
Use dask client to distribute the computations.
store: [bool] -optional-
Should the downloaded data be stored ?
**kwargs goes to bulk_download
Returns
-------
Dictionary {name: fritz{fobject}}
"""
if names is None:
names = self.names
sources = bulk_download( fobject, names, client=client,
nprocess=nprocess, show_progress=show_progress,
store=store, force_dl=force_dl, asdict=False, **kwargs)
if client is not None and gather:
# means sources are actually futures
sources = client.gather(sources, errors="skip")
return sources
# --------- #
# SETTER #
# --------- #
def set_sample(self, fritzsample, id_=None, overwrite=False):
""" """
if FritzSample not in fritzsample.__class__.__mro__:
raise TypeError(f"fritzsample must by a FritzSample object (or child of): type {type(fritzsample)} given")
if id_ is None:
if fritzsample.groupid is not None:
id_ = fritzsample.groupid
else:
from .utils import tools
id_ = tools.avoid_duplicate(list(self.samples_id)+["unknown_name"])[-1]
if id_ in self.samples_id and not overwrite:
raise AttributeError(f"{id_} is already a loaded sample. Set overwrite=True to overwrite it.")
self.samples[id_] = fritzsample
def set_sources(self, sources, update_data=True):
""" """
if type(sources[0])==dict:
self._sources = [FritzSource(s_) for s_ in sources]
elif type(sources[0])==FritzSource:
self._sources = sources
else:
raise TypeError("Only list of dict or list of FritzSource accepted.")
if update_data:
self._load_data_()
# --------- #
# GETTER #
# --------- #
@staticmethod
def get_storedsamples(basename="fritz_sample*", get_id=False):
""" """
from glob import glob
filenames = glob( os.path.join(FRITZSOURCE,"sample", basename) )
if not get_id:
return filenames
return [FritzSample._filename_to_groupid_(f_) for f_ in filenames]
# -------- #
# GROUPS #
# -------- #
def groupid_to_groupname(self, groupid, nickname=False, warn=True):
""" Short cut to groups.groupid_to_groupname """
return self.groups.groupid_to_groupname(groupid, nickname=nickname, warn=warn)
def groupname_to_groupid(self, groupname, nickname_ok=True, warn=True):
""" Short cut to groups.groupname_to_groupid """
return self.groups.groupname_to_groupid(groupname, nickname_ok=nickname_ok, warn=warn)
def get_mygroups(self, asformat="name"):
""" get the group's name/nickname/id for the group you have access to.
"""
if asformat not in ["name", "nickname", "id"]:
raise ValueError(f"asformat should be name, nickname or id, {asformat} given")
return self.groups.accessible[asformat].values
# -------- #
# SAMPLE #
# -------- #
def get_sample(self, groupname_or_id):
""" """
if not str(groupname_or_id).isdigit():
groupid = self.groupname_to_groupid(groupname_or_id)
else:
groupid = int(groupname_or_id)
return self.samples[groupid]
def get_sample_overlap(self, groupname_or_id_1, groupname_or_id_2):
""" get the name list of target that are in both """
sample1 = self.get_sample(groupname_or_id_1)
sample2 = self.get_sample(groupname_or_id_2)
return sample1.names[np.in1d(sample1.names, sample2.names)]
def get_names(self, sample):
""" """
return self._call_down_sample_("names", isfunc=False)
def get_samples_size(self, asgroup="name"):
""" """
if asgroup not in ["id", "groupid", "name", "nickname"]:
raise ValueError(f"asgroup should be id, name or nickname, {asgroup} given")
groupid_map = self._map_down_sample(len, "names", isfunc=False)
if asgroup in ["id", "groupid"]:
return groupid_map
if asgroup in ["name"]:
return {self.groupid_to_groupname(id_):v for id_,v in groupid_map.items()}
if asgroup in ["nickname"]:
return {self.groupid_to_groupname(id_, nickname=nickname):v for id_,v in groupid_map.items()}
#
# Internal | |
<filename>modules/embedding.py
#!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2021 by Caspar. All rights reserved.
# File Name: embedding.py
# Author: <NAME>
# E-mail: <EMAIL>
# Created Time: 2021-03-29 17:19:30
###########################################################################
#
import os, copy, logging
import torch
from torch import nn
from allennlp.modules.conditional_random_field import ConditionalRandomField
from util import func as H
from . import transformer as T
class EmbeddingClfHead(T.BaseClfHead):
def __init__(self, config, lm_model, lm_config, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
from util import config as C
super(EmbeddingClfHead, self).__init__(config, lm_model, lm_config, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=config.task_type in ['entlmnt', 'sentsim'] and task_params.setdefault('sentsim_func', None) is not None, task_params=task_params, **kwargs)
self.dim_mulriple = 2 if self.task_type in ['entlmnt', 'sentsim'] and (self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat') else 1
self.embed_type = embed_type
if embed_type.startswith('w2v'):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
assert(self.w2v_model)
self.n_embd = self.w2v_model.syn0.shape[1] + (self.n_embd if hasattr(self, 'n_embd') else 0)
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_w2v_model(self))
elif embed_type.startswith('elmo'):
self.vocab_size = 793471
self.n_embd = lm_config['elmoedim'] * 2 + (self.n_embd if hasattr(self, 'n_embd') else 0) # two ELMo layer * ELMo embedding dimensions
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_elmo_config(self))
elif embed_type.startswith('elmo_w2v'):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
assert(self.w2v_model)
self.vocab_size = 793471
self.n_embd = self.w2v_model.syn0.shape[1] + lm_config['elmoedim'] * 2 + (self.n_embd if hasattr(self, 'n_embd') else 0)
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_w2v_model(self))
config.register_callback('mdl_trsfm', EmbeddingClfHead.callback_update_elmo_config(self))
self.norm = C.NORM_TYPE_MAP[norm_type](self.maxlen) if self.task_type == 'nmt' else C.NORM_TYPE_MAP[norm_type](self.n_embd)
self._int_actvtn = C.ACTVTN_MAP[iactvtn]
self._out_actvtn = C.ACTVTN_MAP[oactvtn]
self.fchdim = fchdim
self.extfc = extfc
self.hdim = self.dim_mulriple * self.n_embd if self.mlt_trnsfmr and self.task_type in ['entlmnt', 'sentsim'] else self.n_embd
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
if self.do_extlin:
self.extlinear = nn.Linear(self.n_embd, self.n_embd)
if (initln): self.extlinear.apply(H._weights_init(mean=initln_mean, std=initln_std))
self.crf = ConditionalRandomField(num_lbs) if do_crf else None
def __init_linear__(self):
use_gpu = next(self.parameters()).is_cuda
linear = (nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), *([] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.fchdim, self.num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Sequential(nn.Linear(self.hdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.num_lbs))) if self.fchdim else (nn.Sequential(*([nn.Linear(self.hdim, self.hdim), self._int_actvtn()] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.hdim, self.num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Linear(self.hdim, self.num_lbs))
return linear.to('cuda') if use_gpu else linear
def __lm_head__(self):
return EmbeddingHead(self)
def _w2v(self, input_ids, use_gpu=False):
wembd_tnsr = torch.tensor([self.w2v_model.syn0[s] for s in input_ids])
if use_gpu: wembd_tnsr = wembd_tnsr.to('cuda')
return wembd_tnsr
def _sentvec(self, input_ids, use_gpu=False):
pass
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False, ret_mask=False):
use_gpu = next(self.parameters()).is_cuda
if self.sample_weights and len(extra_inputs) > 0:
sample_weights = extra_inputs[-1]
extra_inputs = extra_inputs[:-1]
else:
sample_weights = None
unsolved_input_keys, unsolved_inputs = self.embed_type.split('_'), [input_ids]+list(extra_inputs)
extra_inputs_dict = dict(zip([x for x in self.input_keys if x != 'input_ids'], extra_inputs))
pool_idx = extra_inputs_dict['mask'].sum(1)
mask = extra_inputs_dict['mask'] # mask of the original textual input
clf_hs = []
if self.task_type in ['entlmnt', 'sentsim']:
if (self.embed_type.startswith('elmo')):
embeddings = (self.lm_model(input_ids[0]), self.lm_model(input_ids[1]))
clf_hs.append((torch.cat(embeddings[0]['elmo_representations'], dim=-1), torch.cat(embeddings[1]['elmo_representations'], dim=-1)))
del unsolved_input_keys[0]
del unsolved_inputs[0]
for input_key, input_tnsr in zip(unsolved_input_keys, unsolved_inputs):
clf_hs.append([getattr(self, '_%s'%input_key)(input_tnsr[x], use_gpu=use_gpu) for x in [0,1]])
clf_h = [torch.cat(embds, dim=-1) for embds in zip(*clf_hs)]
else:
if (self.embed_type.startswith('elmo')):
embeddings = self.lm_model(input_ids)
clf_hs.append(torch.cat(embeddings['elmo_representations'], dim=-1))
del unsolved_input_keys[0]
del unsolved_inputs[0]
for input_key, input_tnsr in zip(unsolved_input_keys, unsolved_inputs):
clf_hs.append(getattr(self, '_%s'%input_key)(input_tnsr, use_gpu=use_gpu))
clf_h = torch.cat(clf_hs, dim=-1)
if labels is None:
return (clf_h, mask) if ret_mask else (clf_h,)
# Calculate language model loss
if (self.lm_loss):
lm_logits, lm_target = self.lm_logit(input_ids, clf_h, extra_inputs_dict)
lm_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduction='none')
lm_loss = lm_loss_func(lm_logits.contiguous().view(-1, lm_logits.size(-1)), lm_target.contiguous().view(-1)).view(input_ids.size(0), -1)
if sample_weights is not None: lm_loss *= sample_weights
else:
lm_loss = None
return (clf_h, lm_loss, mask) if ret_mask else (clf_h, lm_loss)
def _forward(self, clf_h, mask, labels=None, weights=None): # For fine-tune task
if self.task_type in ['entlmnt', 'sentsim']:
if self.do_norm: clf_h = [self.norm(clf_h[x]) for x in [0,1]]
clf_h = [self.dropout(clf_h[x]) for x in [0,1]]
if (self.task_type == 'entlmnt' or self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat'):
if task_params.setdefault('concat_strategy', 'normal') == 'diff':
clf_h = torch.cat(clf_h+[torch.abs(clf_h[0]-clf_h[1]), clf_h[0]*clf_h[1]], dim=-1)
elif task_params.setdefault('concat_strategy', 'normal') == 'flipflop':
clf_h = (torch.cat(clf_h, dim=-1) + torch.cat(clf_h[::-1], dim=-1))
else:
clf_h = torch.cat(clf_h, dim=-1)
clf_logits = self.linear(clf_h) if self.linear else clf_h
else:
clf_logits = clf_h = F.pairwise_distance(self.linear(clf_h[0]), self.linear(clf_h[1]), 2, eps=1e-12) if self.task_params['sentsim_func'] == 'dist' else F.cosine_similarity(self.linear(clf_h[0]), self.linear(clf_h[1]), dim=1, eps=1e-12)
else:
if self.do_norm: clf_h = self.norm(clf_h)
clf_h = self.dropout(clf_h)
clf_logits = self.linear(clf_h)
if self.do_lastdrop: clf_logits = self.last_dropout(clf_logits)
if (labels is None):
if self.crf:
tag_seq, score = zip(*self.crf.viterbi_tags(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), torch.ones(*(input_ids.size()[:2])).int()))
tag_seq = torch.tensor(tag_seq).to('cuda') if use_gpu else torch.tensor(tag_seq)
clf_logits = torch.zeros((*tag_seq.size(), self.num_lbs)).to('cuda') if use_gpu else torch.zeros((*tag_seq.size(), self.num_lbs))
clf_logits = clf_logits.scatter(-1, tag_seq.unsqueeze(-1), 1)
return clf_logits
if (self.task_type == 'sentsim' and self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != self.task_params['ymode']): return 1 - clf_logits.view(-1, self.num_lbs)
return clf_logits.view(-1, self.num_lbs)
if self.crf:
clf_loss = -self.crf(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), mask.long())
elif self.task_type == 'mltc-clf' or self.task_type == 'entlmnt' or self.task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1))
elif self.task_type == 'mltl-clf':
loss_func = nn.BCEWithLogitsLoss(pos_weight=10*weights if weights is not None else None, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1, self.num_lbs).float())
elif self.task_type == 'sentsim':
from util import config as C
loss_cls = C.RGRSN_LOSS_MAP[self.task_params.setdefault('loss', 'contrastive')]
loss_func = loss_cls(reduction='none', x_mode=C.SIM_FUNC_MAP.setdefault(self.task_params['sentsim_func'], 'dist'), y_mode=self.task_params.setdefault('ymode', 'sim')) if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else nn.MSELoss(reduction='none')
clf_loss = loss_func(clf_logits.view(-1), labels.view(-1))
return clf_loss
def _filter_vocab(self):
pass
@classmethod
def callback_update_w2v_model(cls, model):
def _callback(config):
from util import config as C
setattr(config, 'w2v_model', model.w2v_model)
config.delayed_update(C.Configurable.PREDEFINED_MODEL_CONFIG_DELAYED_UPDATES[config.model])
return _callback
@classmethod
def callback_update_elmo_config(cls, model):
def _callback(config):
from util import config as C
setattr(config, 'lm_config', model.lm_config)
config.delayed_update(C.Configurable.PREDEFINED_MODEL_CONFIG_DELAYED_UPDATES[config.model])
return _callback
class EmbeddingPool(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, pooler=None, pool_params={'kernel_size':8, 'stride':4}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
assert(config.task_type != 'nmt')
from util import config as C
super(EmbeddingPool, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
self.maxlen = self.task_params.setdefault('maxlen', 128)
if pooler:
self.pooler = nn.MaxPool2d(**pool_params) if pooler == 'max' else nn.AvgPool2d(**pool_params)
encoder_odim = int((2 * self.maxlen + 2 * pool_params.setdefault('padding', 0) - pool_params.setdefault('dilation', 1) * (pool_params['kernel_size'] - 1) - 1) / pool_params['stride'] + 1) * int((int(0.5 * self.n_embd) + 2 * pool_params.setdefault('padding', 0) - pool_params.setdefault('dilation', 1) * (pool_params['kernel_size'] - 1) - 1) / pool_params['stride'] + 1) if pooler == 'max' else int((2 * self.maxlen + 2 * pool_params.setdefault('padding', 0) - pool_params['kernel_size']) / pool_params['stride'] + 1) * int((int(0.5 * self.n_embd) + 2 * pool_params.setdefault('padding', 0) - pool_params['kernel_size']) / pool_params['stride'] + 1)
self.norm = C.NORM_TYPE_MAP[norm_type](encoder_odim)
self.hdim = self.dim_mulriple * encoder_odim if self.task_type in ['entlmnt', 'sentsim'] else encoder_odim
else:
self.pooler = None
self.norm = C.NORM_TYPE_MAP[norm_type](self.n_embd)
self.hdim = self.n_embd
self.linear = self.__init_linear__()
if (initln): self.linear.apply(H._weights_init(mean=initln_mean, std=initln_std))
def forward(self, input_ids, *extra_inputs, labels=None, past=None, weights=None, embedding_mode=False):
outputs = super(EmbeddingPool, self).forward(input_ids, *extra_inputs, labels=labels, past=past, weights=weights, embedding_mode=embedding_mode, ret_mask=True)
if labels is None:
clf_h, mask = outputs
else:
clf_h, lm_loss, mask = outputs
pool_idx = mask.sum(1)
if self.pooler:
clf_h = [clf_h[x].view(clf_h[x].size(0), 2*clf_h[x].size(1), -1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.view(clf_h.size(0), 2*clf_h.size(1), -1)
clf_h = [self.pooler(clf_h[x]).view(clf_h[x].size(0), -1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else self.pooler(clf_h).view(clf_h.size(0), -1)
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]] if self.task_type in ['entlmnt', 'sentsim'] else clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
return (self._forward(clf_h, mask, labels=labels, weights=weights),) + (({},) if labels is None else (lm_loss, {}))
class EmbeddingSeq2Vec(EmbeddingClfHead):
def __init__(self, config, lm_model, lm_config, seq2vec=None, s2v_params={'hdim':768}, embed_type='w2v', w2v_path=None, iactvtn='relu', oactvtn='sigmoid', fchdim=0, extfc=False, sample_weights=False, num_lbs=1, mlt_trnsfmr=False, lm_loss=False, do_drop=True, pdrop=0.2, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, do_thrshld=False, constraints=[], initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
assert(config.task_type != 'nmt')
from util import config as C
super(EmbeddingSeq2Vec, self).__init__(config, lm_model, lm_config, embed_type=embed_type, w2v_path=w2v_path, iactvtn=iactvtn, oactvtn=oactvtn, fchdim=fchdim, extfc=extfc, sample_weights=sample_weights, num_lbs=num_lbs, mlt_trnsfmr=mlt_trnsfmr, lm_loss=lm_loss, do_drop=do_drop, pdrop=pdrop, do_norm=do_norm, norm_type=norm_type, do_lastdrop=do_lastdrop, do_crf=do_crf, do_thrshld=do_thrshld, constraints=constraints, initln=initln, initln_mean=initln_mean, initln_std=initln_std, task_params=task_params, **kwargs)
if seq2vec:
params = {}
if seq2vec.startswith('pytorch-'):
pth_mdl = '-'.join(seq2vec.split('-')[1:])
_ = [params.update(x) for x in [C.SEQ2VEC_MDL_PARAMS.setdefault('pytorch', {}).setdefault(embed_type, {}), C.SEQ2VEC_TASK_PARAMS.setdefault('pytorch', {}).setdefault(self.task_type, {})]]
_ = [params.update({p:s2v_params[k]}) for k, p in C.SEQ2VEC_LM_PARAMS_MAP.setdefault('pytorch', []) if k in s2v_params]
if (embed_type == 'w2v'): params[pth_mdl]['input_size'] = self.w2v_model.syn0.shape[1]
if (embed_type == 'elmo_w2v'): params[pth_mdl]['input_size'] = params[pth_mdl]['input_size'] + | |
import unittest
from conda_build_all.conda_interface import MatchSpec
from conda_build_all.version_matrix import (parse_specifications,
special_case_version_matrix,
filter_cases,
keep_top_n_major_versions,
keep_top_n_minor_versions)
from conda_build_all.tests.unit.dummy_index import DummyPackage, DummyIndex
class Test_special_case_version_matrix(unittest.TestCase):
def setUp(self):
self.pkgs = {'a': DummyPackage('pkgA', ['python', 'numpy']),
'b': DummyPackage('b', ['c']),
'c': DummyPackage('c'),
'b_alt': DummyPackage('b', ['c', 'd']),
'd': DummyPackage('d')}
self.index = DummyIndex()
def test_no_case(self):
# No cases should still give us a result with a single case in it.
a = DummyPackage('pkgA', ['wibble'])
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('wibble', '3.5.0')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set([()]))
def test_python_itself(self):
a = DummyPackage('python', version="a.b.c")
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((('python', 'a.b'),),
))
)
def test_python(self):
a = DummyPackage('pkgA', ['python'])
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('python', '3.5.0')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((('python', '2.7'),),
(('python', '3.5'),),
))
)
def test_noarch_python(self):
a = DummyPackage('pkgA', ['python'])
a.noarch = 'python'
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('python', '3.5.0')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((), )))
def test_constrained_python(self):
a = DummyPackage('pkgA', ['python <3'])
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('python', '3.5.0')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((('python', '2.7'),
),
))
)
def test_numpy_simplest_case(self):
a = DummyPackage('pkgA', ['python', 'numpy'])
self.index.add_pkg('numpy', '1.8.0', 'py27', depends=['python'])
self.index.add_pkg('python', '2.7.2')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set([(('python', '2.7'), ('numpy', '1.8')),
])
)
def test_numpy_without_python(self):
# Conda recipes which do not depend on python, but do on python, do
# not have the full conda metadata, but still need to be handled.
a = DummyPackage('pkgA', ['numpy'])
self.index.add_pkg('numpy', '1.8.0', 'py27', depends=['python'])
self.index.add_pkg('python', '2.7.2')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set([(('python', '2.7'), ('numpy', '1.8')),
])
)
def test_numpy_repeated_python27(self):
# Repeating python 2.7 will result in the latest version being found
a = DummyPackage('pkgA', ['python', 'numpy'])
self.index.add_pkg('numpy', '1.8.0', 'py27', depends=['python <3'])
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('python', '2.7.0')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set([(('python', '2.7'), ('numpy', '1.8')),
])
)
def test_numpy_repeated_python(self):
a = DummyPackage('pkgA', ['python', 'numpy'])
self.index.add_pkg('numpy', '1.8.0', 'py27', depends=['python <3'])
self.index.add_pkg('numpy', '1.8.0', 'py35', depends=['python'])
self.index.add_pkg('numpy', '1.9.0', 'py35', depends=['python >=3'])
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('python', '3.5.0')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((('python', '2.7'), ('numpy', '1.8')),
(('python', '3.5'), ('numpy', '1.8')),
(('python', '3.5'), ('numpy', '1.9')),
))
)
def test_dependency_on_py27(self):
# If a dependency can't hit the python version, it should not
# be considered a case.
a = DummyPackage('pkgA', ['python', 'oldschool'])
self.index.add_pkg('oldschool', '1.8.0', 'py27', depends=['python <3'])
self.index.add_pkg('python', '2.7.2')
self.index.add_pkg('python', '3.5.0')
r = special_case_version_matrix(a, self.index)
# No python 3 should be here.
self.assertEqual(r, set([(('python', '2.7'),
),
]
))
def construct_numpy_index(self, python_versions, numpy_versions):
"""
Set up an index with several versions of python and numpy.
"""
for python_version in python_versions:
python_build_string = 'py' + python_version.replace('.', '')
self.index.add_pkg('python', python_version)
for numpy_version in numpy_versions:
# Add a patch version to each numpy since that is how the
# versions are numbered.
self.index.add_pkg('numpy',
numpy_version + '.2',
python_build_string,
depends=['python ' + python_version])
def test_numpy_xx_only(self):
# Only a numpy x.x spec.
# Build an index that contains numpy 1.9 and 1.10 on python 2.7 and
# 3.5 for a total of 4 numpy/python combinations.
pythons = ['2.7', '3.5']
# Only major/minor in the numpy list here because that is what is used
# for the build matrix.
numpys = ['1.9', '1.10']
self.construct_numpy_index(pythons, numpys)
# Case 1: Only a numpy x.x spec...
numpy_dep_case = ('numpy x.x', 'python')
# ...expect all four cases to be in the matrix.
expect_result = []
for python in pythons:
for numpy in numpys:
expect_result.append((('python', python), ('numpy', numpy)))
a = DummyPackage('pkgA', numpy_dep_case, numpy_dep_case)
r = special_case_version_matrix(a, self.index)
self.assertEqual(set(r), set(expect_result),
msg='got: {}\nexpected: {}'.format(r, expect_result))
def test_numpy_xx_and_nonrestrictive_specifciation(self):
# Case 2:
# A numpy x.x spec and a numpy version restriction which does NOT
# exclude any of the cases in the DummyIndex.
# Build an index that contains numpy 1.9 and 1.10 on python 2.7 and
# 3.5 for a total of 4 numpy/python combinations.
pythons = ['2.7', '3.5']
# Only major/minor in the numpy list here because that is what is used
# for the build matrix.
numpys = ['1.9', '1.10']
self.construct_numpy_index(pythons, numpys)
# A numpy x.x spec and a numpy version restriction which does NOT
# exclude any of the cases in the DummyIndex.
numpy_dep_case = ('numpy x.x', 'numpy >1.6', 'python')
# As in case 1, expect all four cases to be in the matrix.
expect_result = []
for python in pythons:
for numpy in numpys:
expect_result.append((('python', python), ('numpy', numpy)))
a = DummyPackage('pkgA', numpy_dep_case, numpy_dep_case)
r = special_case_version_matrix(a, self.index)
self.assertEqual(set(r), set(expect_result),
msg='got: {}\nexpected: {}'.format(r, expect_result))
def test_numpy_xx_and_restrictive_specifcation(self):
# Case 3:
# A numpy x.x spec and a numpy version restriction which does
# eliminate one the numpy versions in the DummyIndex.
# Build an index that contains numpy 1.9 and 1.10 on python 2.7 and
# 3.5 for a total of 4 numpy/python combinations.
pythons = ['2.7', '3.5']
# Only major/minor in the numpy list here because that is what is used
# for the build matrix.
numpys = ['1.9', '1.10']
self.construct_numpy_index(pythons, numpys)
# A numpy x.x spec and a numpy version restriction which does
# eliminate one the numpy versions in the DummyIndex.
numpy_dep_case = ('numpy x.x', 'numpy >=1.10', 'python')
# Expect only the numpy 1.9 case to survive.
expect_result = []
for python in pythons:
for numpy in numpys[1:]:
expect_result.append((('python', python), ('numpy', numpy)))
a = DummyPackage('pkgA', numpy_dep_case, numpy_dep_case)
r = special_case_version_matrix(a, self.index)
self.assertEqual(set(r), set(expect_result),
msg='got: {}\nexpected: {}'.format(r, expect_result))
def test_perl_matrix(self):
a = DummyPackage('pkgA', ['perl'])
self.index.add_pkg('perl', '4.5.6')
self.index.add_pkg('perl', '4.5.7')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((('perl', '4.5.6'),),
(('perl', '4.5.7'),),
))
)
def test_perl_and_python_matrix(self):
a = DummyPackage('pkgA', ['perl', 'python'])
self.index.add_pkg('perl', '4.5.6')
self.index.add_pkg('perl', '4.5.7')
self.index.add_pkg('python', '2.7')
self.index.add_pkg('python', '3.5')
r = special_case_version_matrix(a, self.index)
expected = set(((('python', '3.5'), ('perl', '4.5.7')),
(('python', '2.7'), ('perl', '4.5.7')),
(('python', '2.7'), ('perl', '4.5.6')),
(('python', '3.5'), ('perl', '4.5.6'))))
self.assertEqual(r, expected)
def test_r_matrix(self):
a = DummyPackage('pkgA', ['r-base'])
self.index.add_pkg('r-base', '4.5.6')
self.index.add_pkg('r-base', '4.5.7')
r = special_case_version_matrix(a, self.index)
self.assertEqual(r, set(((('r-base', '4.5.6'),),
(('r-base', '4.5.7'),),
))
)
def test_r_and_py_and_perl_matrix(self):
a = DummyPackage('pkgA', ['perl', 'python', 'r-base'])
self.index.add_pkg('perl', '4.5.6')
self.index.add_pkg('perl', '4.5.7')
self.index.add_pkg('python', '2.7')
self.index.add_pkg('python', '3.5')
self.index.add_pkg('r-base', '1.2.3')
self.index.add_pkg('r-base', '4.5.6')
r = special_case_version_matrix(a, self.index)
expected = set(((('python', '2.7'), ('perl', '4.5.6'), ('r-base', '1.2.3')),
(('python', '2.7'), ('perl', '4.5.6'), ('r-base', '4.5.6')),
(('python', '3.5'), ('perl', '4.5.6'), ('r-base', '1.2.3')),
(('python', '3.5'), ('perl', '4.5.7'), ('r-base', '1.2.3')),
(('python', '3.5'), ('perl', '4.5.7'), ('r-base', '4.5.6')),
(('python', '2.7'), ('perl', '4.5.7'), ('r-base', '4.5.6')),
(('python', '2.7'), ('perl', '4.5.7'), ('r-base', '1.2.3')),
(('python', '3.5'), ('perl', '4.5.6'), ('r-base', '4.5.6')),
))
self.assertEqual(r, expected)
class Test_parse_specification(unittest.TestCase):
def test_specification_no_duplicates(self):
# Do specifications that are all on one-liners get handled correctly?
input_spec = ['numpy', 'scipy', 'python']
expected_match_spec = [MatchSpec(spec) for spec in input_spec]
expected_output = {ms.name: ms for ms in expected_match_spec}
output_spec = parse_specifications(input_spec)
self.assertEqual(expected_output, output_spec)
def test_specification_duplicates_with_version(self):
# If there are duplicates lines in the specifications, and each
# contains a non-trivial version specification, do they get combined
# as expected?
input_spec = ['numpy >=1.7', 'numpy <1.10', 'python']
expected_match_spec = [MatchSpec(spec) for spec in ['numpy >=1.7,<1.10', 'python']]
expected_output = {ms.name: ms for ms in expected_match_spec}
output_spec = parse_specifications(input_spec)
self.assertEqual(expected_output, output_spec)
def test_three_part_spec_preserved(self):
# A conda specification may contain up to three parts. Make sure those
# are preserved.
input_spec = ['numpy 1.8.1 py27_0', 'python']
expected_match_spec = [MatchSpec(spec) for spec in input_spec]
expected_output = {ms.name: ms for ms in expected_match_spec}
output_spec = parse_specifications(input_spec)
self.assertEqual(expected_output, output_spec)
def test_multiline_spec_with_one_three_part_spec(self):
# The expected output here is to have the specifications combined
# with a comma even though the result is not a valid conda version
# specification. However, the original multi-line version is not
# valid either.
input_spec = ['numpy 1.8.1 py27_0', 'numpy 1.8*', 'python']
expected_match_spec = [MatchSpec(spec) for spec
in ['numpy 1.8.1 py27_0,1.8*', 'python']]
expected_output = {ms.name: ms for ms in expected_match_spec}
output_spec = parse_specifications(input_spec)
self.assertEqual(expected_output, output_spec)
def test_specification_with_blank(self):
# Does a multiline specification, one of which is just the package
# name, properly combine the other specifications on the other lines?
input_spec = ('numpy 1.9', 'numpy', 'numpy <1.11', 'python 2.7')
expected_match_spec = [MatchSpec(spec) for spec
in ['numpy 1.9,<1.11', 'python 2.7']]
expected_output = {ms.name: ms for ms in expected_match_spec}
output_spec = parse_specifications(input_spec)
self.assertEqual(expected_output, output_spec)
class CasesTestCase(unittest.TestCase):
def setUp(self):
self.item = {
'py26': ('python', '2.6'),
'py27': ('python', '2.7'),
'py34': ('python', '3.4'),
'py35': ('python', '3.5'),
'o12': ('other', '1.2'),
'o13': ('other', '1.3'),
'np19': ('numpy', '1.9'),
'np110': ('numpy', '1.10'),
'np21': ('numpy', '2.1'),
}
class | |
run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_20_1 = settings_1_20_0
settings_1_20_2 = settings_1_20_1
settings_1_20_3 = settings_1_20_2
settings_1_20_4 = settings_1_20_3
settings_1_20_5 = settings_1_20_4
settings_1_21_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_21_1 = settings_1_21_0
settings_1_21_2 = settings_1_21_1
settings_1_21_3 = settings_1_21_2
settings_1_22_0 = settings_1_21_2
settings_1_22_1 = settings_1_22_0
settings_1_22_2 = settings_1_22_1
settings_1_22_3 = settings_1_22_2
settings_1_23_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", | |
returns a random hash function which returns hash values from 0..N-1.
"""
if not isinstance(keys, (list, tuple)):
raise TypeError("list or tuple expected")
NK = len(keys)
if NK != len(set(keys)):
raise ValueError("duplicate keys")
for key in keys:
if not isinstance(key, str):
raise TypeError("key a not string: %r" % key)
if NK > 10000 and Hash == StrSaltHash:
print("""\
WARNING: You have %d keys.
Using --hft=1 is likely to fail for so many keys.
Please use --hft=2 instead.
""" % NK)
# the number of vertices in the graph G
NG = NK + 1
if verbose:
print('NG = %d' % NG)
trial = 0 # Number of trial graphs so far
while True:
if (trial % trials) == 0: # trials failures, increase NG slightly
if trial > 0:
NG = max(NG + 1, int(1.05 * NG))
if verbose:
sys.stdout.write('\nGenerating graphs NG = %d ' % NG)
trial += 1
if NG > 100 * (NK + 1):
raise TooManyInterationsError("%d keys" % NK)
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
G = Graph(NG) # Create graph with NG vertices
f1 = Hash(NG) # Create 2 random hash functions
f2 = Hash(NG)
# Connect vertices given by the values of the two hash functions
# for each key. Associate the desired hash value with each edge.
for hashval, key in enumerate(keys):
G.connect(f1(key), f2(key), hashval)
# Try to assign the vertex values. This will fail when the graph
# is cyclic. But when the graph is acyclic it will succeed and we
# break out, because we're done.
if G.assign_vertex_values():
break
if verbose:
print('\nAcyclic graph found after %d trials.' % trial)
print('NG = %d' % NG)
# Sanity check the result by actually verifying that all the keys
# hash to the right value.
for hashval, key in enumerate(keys):
assert hashval == (
G.vertex_values[f1(key)] + G.vertex_values[f2(key)]
) % NG
if verbose:
print('OK')
return f1, f2, G.vertex_values
class Format(object):
def __init__(self, width=76, indent=4, delimiter=', '):
self.width = width
self.indent = indent
self.delimiter = delimiter
def print_format(self):
print("Format options:")
for name in 'width', 'indent', 'delimiter':
print(' %s: %r' % (name, getattr(self, name)))
def __call__(self, data, quote=False):
if not isinstance(data, (list, tuple)):
return str(data)
lendel = len(self.delimiter)
aux = StringIO()
pos = 20
for i, elt in enumerate(data):
last = bool(i == len(data) - 1)
s = ('"%s"' if quote else '%s') % elt
if pos + len(s) + lendel > self.width:
aux.write('\n' + (self.indent * ' '))
pos = self.indent
aux.write(s)
pos += len(s)
if not last:
aux.write(self.delimiter)
pos += lendel
return '\n'.join(l.rstrip() for l in aux.getvalue().split('\n'))
def generate_code(keys, Hash=StrSaltHash, template=None, options=None):
"""
Takes a list of key value pairs and inserts the generated parameter
lists into the 'template' string. 'Hash' is the random hash function
generator, and the optional keywords are formating options.
The return value is the substituted code template.
"""
f1, f2, G = generate_hash(keys, Hash)
assert f1.N == f2.N == len(G)
try:
salt_len = len(f1.salt)
assert salt_len == len(f2.salt)
except TypeError:
salt_len = None
if template is None:
template = builtin_template(Hash)
if options is None:
fmt = Format()
else:
fmt = Format(width=options.width, indent=options.indent,
delimiter=options.delimiter)
if verbose:
fmt.print_format()
return string.Template(template).substitute(
NS = salt_len,
S1 = fmt(f1.salt),
S2 = fmt(f2.salt),
NG = len(G),
G = fmt(G),
NK = len(keys),
K = fmt(list(keys), quote=True))
def read_table(filename, options):
"""
Reads keys and desired hash value pairs from a file. If no column
for the hash value is specified, a sequence of hash values is generated,
from 0 to N-1, where N is the number of rows found in the file.
"""
if verbose:
print("Reading table from file `%s' to extract keys." % filename)
try:
fi = open(filename)
except IOError:
sys.exit("Error: Could not open `%s' for reading." % filename)
keys = []
if verbose:
print("Reader options:")
for name in 'comment', 'splitby', 'keycol':
print(' %s: %r' % (name, getattr(options, name)))
for n, line in enumerate(fi):
line = line.strip()
if not line or line.startswith(options.comment):
continue
if line.count(options.comment): # strip content after comment
line = line.split(options.comment)[0].strip()
row = [col.strip() for col in line.split(options.splitby)]
try:
key = row[options.keycol - 1]
except IndexError:
sys.exit("%s:%d: Error: Cannot read key, not enough columns." %
(filename, n + 1))
keys.append(key)
fi.close()
if not keys:
exit("Error: no keys found in file `%s'." % filename)
return keys
def read_template(filename):
if verbose:
print("Reading template from file `%s'" % filename)
try:
with open(filename, 'r') as fi:
return fi.read()
except IOError:
sys.exit("Error: Could not open `%s' for reading." % filename)
def run_code(code):
tmpdir = tempfile.mkdtemp()
path = join(tmpdir, 't.py')
with open(path, 'w') as fo:
fo.write(code)
try:
subprocess.check_call([sys.executable, path])
except subprocess.CalledProcessError as e:
raise AssertionError(e)
finally:
shutil.rmtree(tmpdir)
def main():
from optparse import OptionParser
usage = "usage: %prog [options] KEYS_FILE [TMPL_FILE]"
description = """\
Generates code for perfect hash functions from
a file with keywords and a code template.
If no template file is provided, a small built-in Python template
is processed and the output code is written to stdout.
"""
parser = OptionParser(usage = usage,
description = description,
prog = sys.argv[0],
version = "%prog: " + __version__)
parser.add_option("--delimiter",
action = "store",
default = ", ",
help = "Delimiter for list items used in output, "
"the default delimiter is '%default'",
metavar = "STR")
parser.add_option("--indent",
action = "store",
default = 4,
type = "int",
help = "Make INT spaces at the beginning of a "
"new line when generated list is wrapped. "
"Default is %default",
metavar = "INT")
parser.add_option("--width",
action = "store",
default = 76,
type = "int",
help = "Maximal width of generated list when "
"wrapped. Default width is %default",
metavar = "INT")
parser.add_option("--comment",
action = "store",
default = "#",
help = "STR is the character, or sequence of "
"characters, which marks the beginning "
"of a comment (which runs till "
"the end of the line), in the input "
"KEYS_FILE. "
"Default is '%default'",
metavar = "STR")
parser.add_option("--splitby",
action = "store",
default = ",",
help = "STR is the character by which the columns "
"in the input KEYS_FILE are split. "
"Default is '%default'",
metavar = "STR")
parser.add_option("--keycol",
action = "store",
default = 1,
type = "int",
help = "Specifies the column INT in the input "
"KEYS_FILE which contains the keys. "
"Default is %default, i.e. the first column.",
metavar = "INT")
parser.add_option("--trials",
action = "store",
default = 5,
type = "int",
help = "Specifies the number of trials before "
"NG is increased. A small INT will give "
"compute faster, but the array G will be "
"large. A large INT will take longer to "
"compute but G will be smaller. "
"Default is %default",
metavar = "INT")
parser.add_option("--hft",
action = "store",
default = 1,
type = "int",
help = "Hash function type INT. Possible values "
"are 1 (StrSaltHash) and 2 (IntSaltHash). "
"The default is %default",
metavar = "INT")
parser.add_option("-e", "--execute",
action = "store_true",
help = "Execute the generated code within "
"the Python interpreter.")
parser.add_option("-o", "--output",
action = "store",
help = "Specify output FILE explicitly. "
"`-o std' means standard output. "
"`-o no' means no output. "
"By default, the file name is obtained "
"from the name of the template file by "
"substituting `tmpl' to `code'.",
metavar = "FILE")
parser.add_option("-v", "--verbose",
action = "store_true",
help = "verbosity")
options, args = parser.parse_args()
if options.trials <= 0:
parser.error("trials before increasing N has to be larger than zero")
global trials
trials = options.trials
global verbose
verbose = options.verbose
if len(args) not in (1, 2):
parser.error("incorrect number of arguments")
if len(args) == 2 and not args[1].count('tmpl'):
parser.error("template filename does not contain 'tmpl'")
if options.hft == 1:
Hash = StrSaltHash
elif options.hft == 2:
Hash = IntSaltHash
else:
parser.error("Hash function %s not implemented." % options.hft)
# --------------------- end parsing and checking --------------
keys_file = args[0]
if verbose:
print("keys_file = %r" % keys_file)
keys = read_table(keys_file, options)
if verbose:
print("Number os keys: %d" % len(keys))
tmpl_file | |
<filename>project_6_starter.py
#!/usr/bin/env python
# coding: utf-8
# # Project 6: Analyzing Stock Sentiment from Twits
# ## Instructions
# Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment.
#
# ## Packages
# When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.
#
# ### Load Packages
# In[1]:
import json
import nltk
import os
import random
import re
import torch
from torch import nn, optim
import torch.nn.functional as F
# ## Introduction
# When deciding the value of a company, it's important to follow the news. For example, a product recall or natural disaster in a company's product chain. You want to be able to turn this information into a signal. Currently, the best tool for the job is a Neural Network.
#
# For this project, you'll use posts from the social media site [StockTwits](https://en.wikipedia.org/wiki/StockTwits). The community on StockTwits is full of investors, traders, and entrepreneurs. Each message posted is called a Twit. This is similar to Twitter's version of a post, called a Tweet. You'll build a model around these twits that generate a sentiment score.
#
# We've collected a bunch of twits, then hand labeled the sentiment of each. To capture the degree of sentiment, we'll use a five-point scale: very negative, negative, neutral, positive, very positive. Each twit is labeled -2 to 2 in steps of 1, from very negative to very positive respectively. You'll build a sentiment analysis model that will learn to assign sentiment to twits on its own, using this labeled data.
#
# The first thing we should to do, is load the data.
#
# ## Import Twits
# ### Load Twits Data
# This JSON file contains a list of objects for each twit in the `'data'` field:
#
# ```
# {'data':
# {'message_body': 'Neutral twit body text here',
# 'sentiment': 0},
# {'message_body': 'Happy twit body text here',
# 'sentiment': 1},
# ...
# }
# ```
#
# The fields represent the following:
#
# * `'message_body'`: The text of the twit.
# * `'sentiment'`: Sentiment score for the twit, ranges from -2 to 2 in steps of 1, with 0 being neutral.
#
#
# To see what the data look like by printing the first 10 twits from the list.
# In[2]:
with open(os.path.join('..', '..', 'data', 'project_6_stocktwits', 'twits.json'), 'r') as f:
twits = json.load(f)
print(twits['data'][:10])
# ### Length of Data
# Now let's look at the number of twits in dataset. Print the number of twits below.
# In[3]:
"""print out the number of twits"""
# TODO Implement
print(len(twits['data']))
# ### Split Message Body and Sentiment Score
# In[4]:
messages = [twit['message_body'] for twit in twits['data']]
# Since the sentiment scores are discrete, we'll scale the sentiments to 0 to 4 for use in our network
sentiments = [twit['sentiment'] + 2 for twit in twits['data']]
# ## Preprocessing the Data
# With our data in hand we need to preprocess our text. These twits are collected by filtering on ticker symbols where these are denoted with a leader $ symbol in the twit itself. For example,
#
# `{'message_body': 'RT @google Our annual look at the year in Google blogging (and beyond) http://t.co/sptHOAh8 $GOOG',
# 'sentiment': 0}`
#
# The ticker symbols don't provide information on the sentiment, and they are in every twit, so we should remove them. This twit also has the `@google` username, again not providing sentiment information, so we should also remove it. We also see a URL `http://t.co/sptHOAh8`. Let's remove these too.
#
# The easiest way to remove specific words or phrases is with regex using the `re` module. You can sub out specific patterns with a space:
#
# ```python
# re.sub(pattern, ' ', text)
# ```
# This will substitute a space with anywhere the pattern matches in the text. Later when we tokenize the text, we'll split appropriately on those spaces.
# ### Pre-Processing
# In[5]:
nltk.download('wordnet')
def preprocess(message):
"""
This function takes a string as input, then performs these operations:
- lowercase
- remove URLs
- remove ticker symbols
- removes punctuation
- tokenize by splitting the string on whitespace
- removes any single character tokens
Parameters
----------
message : The text message to be preprocessed.
Returns
-------
tokens: The preprocessed text into tokens.
"""
#TODO: Implement
# Lowercase the twit message
text = message.lower()
# Replace URLs with a space in the message
pattern = "(www|http:|https:)+[^\s]+[\w]"
text = re.sub(pattern,'',text)
# Replace ticker symbols with a space. The ticker symbols are any stock symbol that starts with $.
text = re.sub('\$[a-zA-Z0-9]+\s',' ',text)
# Replace StockTwits usernames with a space. The usernames are any word that starts with @.
text = re.sub('\@[A-Za-z0-9]+\s',' ',text)
# Replace everything not a letter with a space
text = re.sub('[^a-zA-Z]+\s*',' ',text)
# Tokenize by splitting the string on whitespace into a list of words
tokens = text.split()
# Lemmatize words using the WordNetLemmatizer. You can ignore any word that is not longer than one character.
wnl = nltk.stem.WordNetLemmatizer()
tokens = [i for i in tokens if len(i)>1]
tokens= [wnl.lemmatize(i) for i in tokens]
return tokens
# ### Preprocess All the Twits
# Now we can preprocess each of the twits in our dataset. Apply the function `preprocess` to all the twit messages.
# In[6]:
# TODO Implement
tokenize = [preprocess(i) for i in messages]
# ### Bag of Words
# Now with all of our messages tokenized, we want to create a vocabulary and count up how often each word appears in our entire corpus. Use the [`Counter`](https://docs.python.org/3.1/library/collections.html#collections.Counter) function to count up all the tokens.
# In[7]:
from collections import Counter
"""
Create a vocabulary by using Bag of words
"""
# TODO: Implement
bow = Counter()
for token in tokenize:
bow.update(token)
# ### Frequency of Words Appearing in Message
# With our vocabulary, now we'll remove some of the most common words such as 'the', 'and', 'it', etc. These words don't contribute to identifying sentiment and are really common, resulting in a lot of noise in our input. If we can filter these out, then our network should have an easier time learning.
#
# We also want to remove really rare words that show up in a only a few twits. Here you'll want to divide the count of each word by the number of messages. Then remove words that only appear in some small fraction of the messages.
# In[8]:
"""
Set the following variables:
freqs
low_cutoff
high_cutoff
K_most_common
"""
# TODO Implement
# Dictionart that contains the Frequency of words appearing in messages.
# The key is the token and the value is the frequency of that word in the corpus.
freqs = {}
for word,freq in bow.items():
freqs[word] = freq/len(tokenize)
# Float that is the frequency cutoff. Drop words with a frequency that is lower or equal to this number.
low_cutoff = 10/len(tokenize)
# Integer that is the cut off for most common words. Drop words that are the `high_cutoff` most common words.
high_cutoff = 20
# The k most common words in the corpus. Use `high_cutoff` as the k.
K_most_common = []
for keys in freqs:
if freqs[keys]>=high_cutoff:
K_most_common.append(keys)
filtered_words = [word for word in freqs if (freqs[word] > low_cutoff and word not in K_most_common)]
# ### Updating Vocabulary by Removing Filtered Words
# Let's creat three variables that will help with our vocabulary.
# In[9]:
from tqdm import tqdm as tqdm
"""
Set the following variables:
vocab
id2vocab
filtered
"""
#TODO Implement
# A dictionary for the `filtered_words`. The key is the word and value is an id that represents the word.
vocab = {}
count=0
for i in filtered_words:
count+=1
vocab[i]=count
# Reverse of the `vocab` dictionary. The key is word id and value is the word.
id2vocab = {}
for i in filtered_words:
count+=1
id2vocab[count]=i
# tokenized with the words not in `filtered_words` removed.
# Knowledge post https://knowledge.udacity.com/questions/105006
filtered = [[word for | |
<reponame>dianshao-embedded/dianshao
from datetime import datetime
import os
import socket
import json
from sqlite3 import Timestamp
from celery import shared_task
from progressui.backend import ProgressSend
from git.repo.base import Repo
from tools import shell, git, bbcommand, patch, bbfile, dishes
from tools import migration
from tools.migration import Migration
from .models import MetaLayer, MyMachine, MyPackages, Project
# TODO:后续提高稳定性,无论如何误操作可自恢复
@shared_task(bind=True)
def project_initial_task(self, project_id, project_path, project_version, project_name):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send = ProgressSend(self)
template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'project_template')
target_path = os.path.join(project_path, project_name)
r, err = shell.shell_cmd('cp -rp %s/. %s' % (template_path, target_path), os.curdir)
if err == True:
raise Exception("project template build error: %s" % (r))
project=Project.objects.get(id=project_id)
MyMachine.objects.create(project=project, name='dianshao', base='none', initial_method='Systemd', flash='SDCard',
distro_version='1.0.0', description='my machine generate by dianshao',
machine_include='{}', distro_include='{}')
# TODO: 根据项目名自动生成 distro, image, machine, bblayer, conf.sample 等文件
Repo.init(target_path)
progress_send.send_progress(percentage='10', description='Add Bitbake Submodule')
if project_version == 'HARDKNOTT':
yocto_version = 'hardknott'
bitbake_version = '1.50'
elif project_version == 'GATESGARTH':
yocto_version = 'gatesgarth'
bitbake_version = '1.48'
elif project_version == 'DUNFELL':
yocto_version = 'dunfell'
bitbake_version = '1.46'
elif project_version == 'ZEUS':
yocto_version = 'zeus'
bitbake_version = '1.44'
path = os.path.join(project_path, project_name, 'bitbake')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'bitbake',
'https://github.com/openembedded/bitbake.git',
bitbake_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='10',subProgress=sub, description='Add Bitbake Submodule')
if os.path.exists(path):
break
bitbake_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bitbake')
r, err = shell.shell_cmd(command=('cp -r %s %s' % (bitbake_path, target_path)), cwd = target_path)
if err == True:
server.close()
raise Exception("project template build error: %s" % (r))
progress_send.send_progress(percentage='30', description='Add Openembedded-Core Submodule')
path = os.path.join(project_path, project_name, 'openembedded-core')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'openembedded-core',
'https://github.com/openembedded/openembedded-core.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='30', subProgress=sub, description='Add Openembedded-Core Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
project = Project.objects.get(id=project_id)
MetaLayer.objects.create(project=project,
name='openembedded-core',
url='https://github.com/openembedded/openembedded-core.git',
remote_or_local = 'remote')
progress_send.send_progress(percentage='50', description='Add Meta-Yocto Submodule')
path = os.path.join(project_path, project_name, 'meta-yocto')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-yocto',
'https://git.yoctoproject.org/meta-yocto.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='50', subProgress=sub, description='Add Meta-Yocto Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-yocto',
url='https://git.yoctoproject.org/meta-yocto.git',
remote_or_local = 'remote',
sub = 'meta-poky')
MetaLayer.objects.create(project=project,
name='meta-yocto',
url='https://git.yoctoproject.org/meta-yocto.git',
remote_or_local = 'remote',
sub = 'meta-yocto-bsp')
progress_send.send_progress(percentage='70', description='Add Meta-Yocto Submodule')
path = os.path.join(project_path, project_name, 'meta-openembedded')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-openembedded',
'https://github.com/openembedded/meta-openembedded.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='70', subProgress=sub, description='Add Meta-Openembedded Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-oe')
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-python')
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-networking')
progress_send.send_progress(percentage='90', description='Add Meta-Rauc Submodule')
path = os.path.join(project_path, project_name, 'meta-rauc')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-rauc',
'https://github.com/rauc/meta-rauc.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='90', subProgress=sub, description='Add Meta-Openembedded Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-rauc',
url='https://github.com/rauc/meta-rauc.git',
remote_or_local = 'remote')
ret, err = shell.shell_cmd(command=('unset BBPATH; bash -c \"source %s %s;\"'
% (os.path.join(target_path, 'oe-init-build-env'), os.path.join(target_path, 'build'))),
cwd=target_path)
if err == True:
server.close()
raise Exception("auto create configure file error: %s" % (ret))
server.close()
bb_path = os.path.join(project_path, project_name, 'bitbake')
oe_path = os.path.join(project_path, project_name, 'openembedded-core')
yocto_path = os.path.join(project_path, project_name, 'meta-yocto')
if os.path.exists(bb_path) == False or os.path.exists(oe_path) == False or os.path.exists(yocto_path) == False:
raise Exception('Project is not complete')
return "Project Create Success"
@shared_task(bind=True)
def project_import_task(self, project_path, project_name, url):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send = ProgressSend(self)
progress_send.send_progress(percentage=50, description='Clone Project')
project_repo = git.git_clone(url, project_path, project_name)
project_repo.start()
path = os.path.join(project_path, project_name)
i = 0
while(os.path.exists(path) == False and i < 3):
while project_repo.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage=50, subProgress=sub, description='Start Clone Project')
if os.path.exists(path):
break
else:
i += 1
if i == 3:
raise Exception('git clone error')
target_path = os.path.join(project_path, project_name)
bitbake_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bitbake')
r, err = shell.shell_cmd(command=('cp -r %s %s' % (bitbake_path, target_path)), cwd = target_path)
if err == True:
server.close()
raise Exception("project template build error: %s" % (r))
m = Migration()
m.project_import(project_path, project_name)
@shared_task(bind=True)
def meta_clone_task(self, name, url, remote_or_local, subd, project_id):
# TODO: meta add sub directory, meta add without donwload
progress_send = ProgressSend(self)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send.send_progress(percentage=0, description='Check the Exist Meta')
metas = MetaLayer.objects.filter(project__id=project_id)
project = Project.objects.get(id=project_id)
for m in metas:
if m.name == name and m.sub == subd:
server.close()
raise Exception("meta is already exist")
progress_send.send_progress(33, description='Meta Adding...')
if project.project_version == 'HARDKNOTT':
yocto_version = 'hardknott'
elif project.project_version == 'GATESGARTH':
yocto_version = 'gatesgarth'
elif project.project_version == 'DUNFELL':
yocto_version = 'dunfell'
elif project.project_version == 'ZEUS':
yocto_version = 'zeus'
if remote_or_local == 'remote':
path = os.path.join(project.project_path, project.project_name, name)
i = 0
while(os.path.exists(path) == False and i < 3):
submodule = git.git_submodule(os.path.join(project.project_path, project.project_name),
name, url, yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(subProgress=sub)
if os.path.exists(path):
break
else:
i += 1
if i == 3:
raise Exception('git clone error')
progress_send.send_progress(66, description='Save Meta-Layer')
try:
MetaLayer.objects.create(project=project,
name=name, url=url, remote_or_local=remote_or_local, sub=subd)
except:
server.close()
raise Exception("meta model create err")
if subd != '':
meta_name = name + '/' + subd
else:
meta_name = name
bbcommand.bitbake_addlayer(os.path.join(project.project_path, project.project_name),
os.path.join(project.project_path, project.project_name, meta_name))
server.close()
return 'meta add success'
@shared_task(bind=True)
def bitbake_progress(self, project_path, project_name, target, command):
# TODO: 增加一个锁,确保同一个时刻只有一个 Bitbake 进程
# TODO: 任务恢复,每次进入任务查询是否有 Bitbake 任务在进行中, 并默认不显示,点击按钮后显示任务进度
progress_send = ProgressSend(self)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 6688))
bitbake = bbcommand.BitbakeThread(os.path.join(project_path, project_name), target, command)
bitbake.start()
progress_send = ProgressSend(self)
lock_file = os.path.join(project_path, project_path, 'build/bitbake.lock')
if os.path.exists(lock_file):
raise Exception('Another Bitbake Process')
start_time = datetime.now().timestamp()
while True:
bbprogress_byte, addr = server.recvfrom(8192)
bbprogress = json.loads(bbprogress_byte.decode('ascii'))
if bbprogress['event_type'] == 'dianshao_ui_start':
print('dianshao ui has already started')
if bbprogress['event_type'] == 'TaskList':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
sub = []
# TODO: 处理 progress < 0
for task in bbprogress['tasks']:
if task['progress'] < 0:
sub.append({'percentage': 0, 'description': ('%s pending' % task['title'])})
else:
sub.append({'percentage': task['progress'], 'description': (('%s:%s') %(task['title'], task['rate']))})
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s), subProgress=sub)
continue
if bbprogress['event_type'] == 'Ping':
# TODO: Server Command
# TODO: ping interval
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s))
if bbprogress['event_type'] == 'End':
if bbprogress['total_error'] > 0:
progress_send.send_progress(header='Bitbake Failed', description=bbprogress['summary'])
raise Exception('Bitbake Failed, With %s errors' % bbprogress['total_error'])
elif bbprogress['total_task_failures'] > 0:
progress_send.send_progress(header='Bitbake Failed', description=bbprogress['summary'])
raise Exception('Bitbake Failed, With %s errors' % bbprogress['total_task_failures'])
else:
progress_send.send_progress(header='Bitbake Success', description=bbprogress['summary'])
return ('Bitbake Success with %s Warnings' % bbprogress['total_warning'])
if bbprogress['event_type'] == 'CommandFailed':
raise Exception('Bitbake Failed, Please Find Details in dianshao_bitbake.log')
if bbprogress['event_type'] == 'CommandExit':
break
if bbprogress['event_type'] == 'CommandCompleted':
break
if bbprogress['event_type'] == 'logging.LogRecord':
print(bbprogress['msg'])
if bbprogress['event_type'] == 'CacheLoadStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='cache data load started')
if bbprogress['event_type'] == 'CacheLoadProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='cache data loading')
if bbprogress['event_type'] == 'CacheLoadCompleted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='cache data load succes with %d retry times' % bbprogress['num_entries'])
if bbprogress['event_type'] == 'ProcessStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='%s process started' % bbprogress['processname'])
if bbprogress['event_type'] == 'ProcessProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(bbprogress['progress']), description='%s process excuting' % bbprogress['processname'])
# TODO: Add Parse Progress
if bbprogress['event_type'] == 'ProcessFinished':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='%s process finished' % bbprogress['processname'])
if bbprogress['event_type'] == 'runQueueTaskStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='%s scene queue task started' % bbprogress['taskstring'])
if bbprogress['event_type'] == 'ParseStarted':
| |
<gh_stars>0
'''
Neural networks for standard systems
'''
import numpy as np
import collections
import tensorflow as tf
from tensorflow import math as tfm
from tensorflow import linalg as tfl
from tools import vae_utils
from neural_networks import layers
from neural_networks import NN_utils
class Classifier(object):
'''
Class for simple classifier, taking input x and outputting
class probability p(y|x)
'''
def __init__(self, name, n_x, n_y, N_h):
'''
Initialisation
INPUTS:
name - name to assign to the decoder
n_x - dimensionality of the input
n_z - dimensionality of latent space
N_h - array of hidden units' dimensionalities in the format [Nhx,Nh1,Nh2,...,Nhn]
'''
self.n_x = n_x
self.n_y = n_y
self.N_h = N_h
self.name = name
self.bias_start = 0.0
network_weights = self._create_weights()
self.weights = network_weights
# Choice of non-linearity (tf.nn.relu/tf.nn.leaky_relu/tf.nn.elu)
self.nonlinearity = tf.nn.leaky_relu
def compute_py(self,x):
'''
compute probability for each class
INPUTS:
x - input
OUTPUTS:
py - histogram of probabilities for each class
'''
hidden1_pre = tfm.add(tfl.matmul(x, self.weights['W_x_to_h1']), self.weights['b_x_to_h1'])
hidden_post = self.nonlinearity(hidden1_pre)
num_layers_middle = np.shape(self.N_h)[0]-1
for i in range(num_layers_middle):
ni = i+2
hidden_pre = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_h{}'.format(ni-1,ni)]), self.weights['b_h{}_to_h{}'.format(ni-1,ni)])
hidden_post = self.nonlinearity(hidden_pre)
p_un = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_py'.format(ni)]), self.weights['b_h{}_to_py'.format(ni)])
p_un = tf.nn.sigmoid(p_un) + 1e-6
py = tfm.divide(p_un,tf.tile(tf.expand_dims(tfm.reduce_sum(p_un,axis=1),axis=1),[1,self.n_y]))
return py
def _create_weights(self):
'''
Initialise weights
'''
all_weights = collections.OrderedDict()
all_weights['W_x_to_h1'] = tf.Variable(vae_utils.xavier_init(self.n_x, self.N_h[0]), dtype=tf.float32)
all_weights['b_x_to_h1'] = tf.Variable(tf.zeros([self.N_h[0]], dtype=tf.float32) * self.bias_start)
num_layers_middle = np.shape(self.N_h)[0]-1
for i in range(num_layers_middle):
ni = i+2
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(vae_utils.xavier_init(self.N_h[ni-2], self.N_h[ni-1]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([self.N_h[ni-1]], dtype=tf.float32) * self.bias_start)
all_weights['W_h{}_to_py'.format(ni)] = tf.Variable(vae_utils.xavier_init(self.N_h[ni-1], self.n_y), dtype=tf.float32)
all_weights['b_h{}_to_py'.format(ni)] = tf.Variable(tf.zeros([self.n_y], dtype=tf.float32) * self.bias_start)
return all_weights
class Gaussian_NN(object):
'''
Class for Gaussian neural network, taking input x and outputting
Gaussian distribution p(y|x)
'''
def __init__(self, name, n_x, n_y, N_h):
'''
Initialisation
INPUTS:
name - name to assign to the decoder
n_x - dimensionality of the input
n_y - dimensionality of output
N_h - array of hidden units' dimensionalities in the format [Nhx,Nh1,Nh2,...,Nhn]
'''
self.n_x = n_x
self.n_y = n_y
self.N_h = N_h
self.name = name
self.bias_start = 0.0
network_weights = self._create_weights()
self.weights = network_weights
# Choice of non-linearity (tf.nn.relu/tf.nn.leaky_relu/tf.nn.elu)
self.nonlinearity = tf.nn.leaky_relu
def compute_moments(self,x):
'''
compute moments of output Gaussian distribution
INPUTS:
x - input
OUTPUTS:
mu_y - mean of output Gaussian distribution
log_sig_sq_y - log variance of output Gaussian distribution
'''
hidden1_pre = tfm.add(tfl.matmul(x, self.weights['W_x_to_h1']), self.weights['b_x_to_h1'])
hidden_post = self.nonlinearity(hidden1_pre)
num_layers_middle = np.shape(self.N_h)[0]-1
for i in range(num_layers_middle):
ni = i+2
hidden_pre = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_h{}'.format(ni-1,ni)]), self.weights['b_h{}_to_h{}'.format(ni-1,ni)])
hidden_post = self.nonlinearity(hidden_pre)
mu_y = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_muy'.format(ni)]), self.weights['b_h{}_to_muy'.format(ni)])
mu_y = tf.nn.sigmoid(mu_y)
log_sig_sq_y = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_sy'.format(ni)]), self.weights['b_h{}_to_sy'.format(ni)])
log_sig_sq_y = 100*(tf.nn.sigmoid(log_sig_sq_y/100)-0.5)
return mu_y, log_sig_sq_y
def _create_weights(self):
'''
Initialise weights
'''
all_weights = collections.OrderedDict()
all_weights['W_x_to_h1'] = tf.Variable(vae_utils.xavier_init(self.n_x, self.N_h[0]), dtype=tf.float32)
all_weights['b_x_to_h1'] = tf.Variable(tf.zeros([self.N_h[0]], dtype=tf.float32) * self.bias_start)
num_layers_middle = np.shape(self.N_h)[0]-1
for i in range(num_layers_middle):
ni = i+2
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(vae_utils.xavier_init(self.N_h[ni-2], self.N_h[ni-1]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([self.N_h[ni-1]], dtype=tf.float32) * self.bias_start)
all_weights['W_h{}_to_muy'.format(ni)] = tf.Variable(vae_utils.xavier_init(self.N_h[ni-1], self.n_y), dtype=tf.float32)
all_weights['b_h{}_to_muy'.format(ni)] = tf.Variable(tf.zeros([self.n_y], dtype=tf.float32) * self.bias_start)
all_weights['W_h{}_to_sy'.format(ni)] = tf.Variable(vae_utils.xavier_init(self.N_h[ni-1], self.n_y), dtype=tf.float32)
all_weights['b_h{}_to_sy'.format(ni)] = tf.Variable(tf.zeros([self.n_y], dtype=tf.float32) * self.bias_start)
return all_weights
class Gaussian_CNN_2D(object):
'''
Class for Gaussian convolutionalneural network, taking input x and
outputting Gaussian distribution p(y|x)
'''
def __init__(self, name, n_x, n_y, N_h1, NF_h, N_h2, St1, St2, sz_f, sz_im):
'''
Initialisation
INPUTS:
name - name to assign to the decoder
n_x - channels of the input
n_y - channels of output
N_h - array of number of channels in the hidden units [Nhx,Nh1,Nh2,...,Nhn]
St - array of strides to use every operation (must be one longer then the above)
sz_f - filters sizes in the format [H,W]
'''
self.n_x = n_x
self.n_y = n_y
self.N_h1 = N_h1
self.N_h2 = N_h2
self.NF_h = NF_h
self.Sz1 = NN_utils.compute_size(sz_im,St1)
self.Sz2 = NN_utils.compute_size(self.Sz1[-1],St2)
self.St = np.concatenate((St1,np.ones(np.shape(NF_h)[0]),St2),0)
self.sz_f = sz_f
self.sz_im = sz_im
self.name = name
self.bias_start = 0.0
network_weights = self._create_weights()
self.weights = network_weights
# Choice of non-linearity (tf.nn.relu/tf.nn.leaky_relu/tf.nn.elu)
self.nonlinearity = tf.nn.leaky_relu
def compute_moments(self,xl):
'''
compute moments of output Gaussian distribution
INPUTS:
x - input
OUTPUTS:
mu_y - mean of output Gaussian distribution
log_sig_sq_y - log variance of output Gaussian distribution
'''
x, l = NN_utils.reshape_and_extract(xl,self.sz_im)
hidden_post = layers.tf_conv_layer(x,self.weights['W_x_to_h1'],self.weights['b_x_to_h1'],self.St[0],self.nonlinearity)
# print(tf.shape(hidden_post).numpy())
num_layers_1 = np.shape(self.N_h1)[0]-1
for i in range(num_layers_1):
ni = i+2
hidden_post = layers.tf_conv_layer(hidden_post,self.weights['W_h{}_to_h{}'.format(ni-1,ni)],self.weights['b_h{}_to_h{}'.format(ni-1,ni)],self.St[ni-1],self.nonlinearity)
# print(tf.shape(hidden_post).numpy())
hidden_post = NN_utils.flatten(hidden_post)
hidden_post = tf.concat([hidden_post,l],axis=1)
# print(tf.shape(hidden_post).numpy())
num_layers_F = np.shape(self.NF_h)[0]
for i in range(num_layers_F):
ni = ni+1
hidden_pre = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_h{}'.format(ni-1,ni)]), self.weights['b_h{}_to_h{}'.format(ni-1,ni)])
hidden_post = self.nonlinearity(hidden_pre)
# print(tf.shape(hidden_post).numpy())
hidden_post = NN_utils.reshape_to_images(hidden_post,self.Sz2[0,:])
# print(tf.shape(hidden_post).numpy())
num_layers_2 = np.shape(self.N_h2)[0]
for i in range(num_layers_2):
ni = ni+1
hidden_post = layers.tf_conv_layer(hidden_post,self.weights['W_h{}_to_h{}'.format(ni-1,ni)],self.weights['b_h{}_to_h{}'.format(ni-1,ni)],self.St[ni-1],self.nonlinearity)
# print(tf.shape(hidden_post).numpy())
mu_y = layers.tf_conv_layer(hidden_post,self.weights['W_h{}_to_muy'.format(ni)],self.weights['b_h{}_to_muy'.format(ni)],1,self.nonlinearity)
mu_y = tf.nn.sigmoid(mu_y)
log_sig_sq_y = layers.tf_conv_layer(hidden_post,self.weights['W_h{}_to_sy'.format(ni)],self.weights['b_h{}_to_sy'.format(ni)],1,self.nonlinearity)
log_sig_sq_y = 100*(tf.nn.sigmoid(log_sig_sq_y/100)-0.5)
mu_y = NN_utils.flatten(mu_y)
mu_y = tf.concat([mu_y,tf.zeros([tf.shape(mu_y)[0],1])],axis=1)
log_sig_sq_y = NN_utils.flatten(log_sig_sq_y)
log_sig_sq_y = tf.concat([log_sig_sq_y,tf.zeros([tf.shape(log_sig_sq_y)[0],1])],axis=1)
return mu_y, log_sig_sq_y
def _create_weights(self):
'''
Initialise weights
'''
SZ1 = self.N_h1*self.Sz1[:,0]*self.Sz1[:,1]
SZ1[-1] = SZ1[-1]+2
SZF = self.NF_h
SZ2 = self.N_h2*self.Sz2[:,0]*self.Sz2[:,1]
SZ = np.concatenate((SZ1,SZF,SZ2),axis=0)
nf = np.asarray(self.NF_h)
nfi = np.rint(self.NF_h[-1]/(self.Sz2[0,0]*self.Sz2[0,1]))
nfi = nfi.astype(int)
nf[-1] = nfi
N_h = tf.concat([self.N_h1,nf,self.N_h2],0)
# print(N_h.numpy())
# print(SZ)
all_weights = collections.OrderedDict()
all_weights['W_x_to_h1'] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1],self.n_x,N_h[0]]), dtype=tf.float32)
all_weights['b_x_to_h1'] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1],self.n_x,N_h[0]], dtype=tf.float32))
num_layers_1 = np.shape(self.N_h1)[0]-1
for i in range(num_layers_1):
ni = i+2
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1], N_h[ni-2], N_h[ni-1]]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1],N_h[ni-2], N_h[ni-1]], dtype=tf.float32))
num_layers_F = np.shape(self.NF_h)[0]
for i in range(num_layers_F):
ni = ni+1
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(vae_utils.xavier_init(SZ[ni-2], SZ[ni-1]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([SZ[ni-1]], dtype=tf.float32) * self.bias_start)
num_layers_2 = np.shape(self.N_h2)[0]
for i in range(num_layers_2):
ni = ni+1
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1], N_h[ni-2], N_h[ni-1]]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1],N_h[ni-2], N_h[ni-1]], dtype=tf.float32))
all_weights['W_h{}_to_muy'.format(ni)] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1], N_h[ni-1], self.n_y]), dtype=tf.float32)
all_weights['b_h{}_to_muy'.format(ni)] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1], N_h[ni-1], self.n_y], dtype=tf.float32))
all_weights['W_h{}_to_sy'.format(ni)] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1], N_h[ni-1], self.n_y]), dtype=tf.float32)
all_weights['b_h{}_to_sy'.format(ni)] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1], N_h[ni-1], self.n_y], dtype=tf.float32))
return all_weights
class Classification_CNN_2D(object):
'''
Class for Gaussian convolutionalneural network, taking input x and
outputting Gaussian distribution p(y|x)
'''
def __init__(self, name, n_x, n_y, N_h1, NF_h, St1, sz_f, sz_im):
'''
Initialisation
INPUTS:
name - name to assign to the decoder
n_x - channels of the input
n_y - channels of output
N_h - array of number of channels in the hidden units [Nhx,Nh1,Nh2,...,Nhn]
St - array of strides to use every operation (must be one longer then the above)
sz_f - filters sizes in the format [H,W]
'''
self.n_x = n_x
self.n_y = n_y
self.N_h1 = N_h1
self.NF_h = NF_h
self.Sz1 = NN_utils.compute_size(sz_im,St1)
self.St = np.concatenate((St1,np.ones(np.shape(NF_h)[0])),0)
self.sz_f = sz_f
self.sz_im = sz_im
self.name = name
self.bias_start = 0.0
network_weights = self._create_weights()
self.weights = network_weights
# Choice of non-linearity (tf.nn.relu/tf.nn.leaky_relu/tf.nn.elu)
self.nonlinearity = tf.nn.leaky_relu
def compute_py(self,xl):
'''
compute moments of output Gaussian distribution
INPUTS:
x - input
OUTPUTS:
mu_y - mean of output Gaussian distribution
log_sig_sq_y - log variance of output Gaussian distribution
'''
x, _ = NN_utils.reshape_and_extract(xl,self.sz_im)
hidden_post = layers.tf_conv_layer(x,self.weights['W_x_to_h1'],self.weights['b_x_to_h1'],self.St[0],self.nonlinearity)
# print(tf.shape(hidden_post).numpy())
num_layers_1 = np.shape(self.N_h1)[0]-1
for i in range(num_layers_1):
ni = i+2
hidden_post = layers.tf_conv_layer(hidden_post,self.weights['W_h{}_to_h{}'.format(ni-1,ni)],self.weights['b_h{}_to_h{}'.format(ni-1,ni)],self.St[ni-1],self.nonlinearity)
# print(tf.shape(hidden_post).numpy())
hidden_post = NN_utils.flatten(hidden_post)
# print(tf.shape(hidden_post).numpy())
num_layers_F = np.shape(self.NF_h)[0]
for i in range(num_layers_F):
ni = ni+1
hidden_pre = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_h{}'.format(ni-1,ni)]), self.weights['b_h{}_to_h{}'.format(ni-1,ni)])
hidden_post = self.nonlinearity(hidden_pre)
# print(tf.shape(hidden_post).numpy())
p_un = tfm.add(tfl.matmul(hidden_post, self.weights['W_h{}_to_py'.format(ni)]), self.weights['b_h{}_to_py'.format(ni)])
p_un = tf.nn.sigmoid(p_un) + 1e-6
py = tfm.divide(p_un,tf.tile(tf.expand_dims(tfm.reduce_sum(p_un,axis=1),axis=1),[1,self.n_y]))
return py
def _create_weights(self):
'''
Initialise weights
'''
SZ1 = self.N_h1*self.Sz1[:,0]*self.Sz1[:,1]
SZF = self.NF_h
SZ = np.concatenate((SZ1,SZF),axis=0)
N_h = tf.concat([self.N_h1,self.NF_h],0)
# print(N_h.numpy())
# print(SZ)
all_weights = collections.OrderedDict()
all_weights['W_x_to_h1'] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1],self.n_x,N_h[0]]), dtype=tf.float32)
all_weights['b_x_to_h1'] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1],self.n_x,N_h[0]], dtype=tf.float32))
num_layers_1 = np.shape(self.N_h1)[0]-1
for i in range(num_layers_1):
ni = i+2
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.random.uniform([self.sz_f[0],self.sz_f[1], N_h[ni-2], N_h[ni-1]]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([self.sz_f[0],self.sz_f[1],N_h[ni-2], N_h[ni-1]], dtype=tf.float32))
num_layers_F = np.shape(self.NF_h)[0]
for i in range(num_layers_F):
ni = ni+1
all_weights['W_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(vae_utils.xavier_init(SZ[ni-2], SZ[ni-1]), dtype=tf.float32)
all_weights['b_h{}_to_h{}'.format(ni-1,ni)] = tf.Variable(tf.zeros([SZ[ni-1]], dtype=tf.float32) * self.bias_start)
all_weights['W_h{}_to_py'.format(ni)] = tf.Variable(vae_utils.xavier_init(SZ[ni-1], self.n_y), dtype=tf.float32)
all_weights['b_h{}_to_py'.format(ni)] = tf.Variable(tf.zeros([self.n_y], dtype=tf.float32) * self.bias_start)
return | |
"""
This Is My Deeeep.io Version Using Pymunk
So That The Collision And Stuff Like That Is Not Crappy.
"""
# Import Librarys And Modules
import arcade
import random
import math
import pymunk
from typing import Optional
from Pymunk_Helping_Code import follow_sprite
from arcade.pymunk_physics_engine import PymunkPhysicsEngine
# Screen Properties
SCREEN_WIDTH = 1200
SCREEN_HEIGHT = 800
SCREEN_TITLE = "Deeeep.io(Remake) Using Pymunk"
# Healthbar Setup
HEALTHBAR_WIDTH = 50
HEALTHBAR_HEIGHT = 10
HEALTHBAR_OFFSET_Y = 50
# Different Scales
TINY_SCALE = 0.7
SCALE = 0.4
SUPER_SCALE = 0.2
# Movement Forces For Different Sprites In The Physic Engine
PLAYER_MOVE_FORCE = 4000
AI_MOVE_FORCE = 4000
# Animal Dictionary
animal_name_list = [
'Alligator_Snapping_Turtle',
'Blue_Whale',
'Elephant_Seal',
'Goblin_Shark',
'Humpback_Whale',
'Leatherback_Turtle',
'Manta_Ray',
'Marlin',
'Orca',
'Polar_Bear',
'Sleeper_Shark',
'Sperm_Whale',
'Sunfish',
'Tiger_Shark',
'Walrus', ]
animals = {
# 1
'Alligator_Snapping_Turtle': {
'health': 800,
'speed': 90,
'damage': 140,
'scale': 0.5
},
# 2
'Blue_Whale': {
'health': 1500,
'speed': 90,
'damage': 120,
'scale': 0.6
},
# 3
"Elephant_Seal": {
'health': 1000,
'speed': 90,
'damage': 120,
'scale': 0.45
},
# 4
'Goblin_Shark': {
'health': 750,
'speed': 100,
'damage': 140,
'scale': 0.4
},
# 5
'Humpback_Whale': {
'health': 1200,
'speed': 90,
'damage': 100,
'scale': 0.55
},
# 6
'Leatherback_Turtle': {
'health': 900,
'speed': 95,
'damage': 130,
'scale': 0.4
},
# 7
'Manta_Ray': {
'health': 1000,
'speed': 100,
'damage': 120,
'scale': 0.5
},
# 8
'Marlin': {
'health': 700,
'speed': 125,
'damage': 100,
'scale': 0.3
},
# 9
'Orca': {
'health': 900,
'speed': 100,
'damage': 160,
'scale': 0.4
},
# 10
'Polar_Bear': {
'health': 900,
'speed': 100,
'damage': 160,
'scale': 0.4
},
# 11
'Sleeper_Shark': {
'health': 1000,
'speed': 80,
'damage': 160,
'scale': 0.4
},
# 12
'Sperm_Whale': {
'health': 1200,
'speed': 85,
'damage': 160,
'scale': 0.53
},
# 13
'Sunfish': {
'health': 900,
'speed': 100,
'damage': 140,
'scale': 0.4
},
# 14
'Tiger_Shark': {
'health': 800,
'speed': 100,
'damage': 160,
'scale': 0.4
},
# 15
'Walrus': {
'health': 900,
'speed': 90,
'damage': 140,
'scale': 0.32
},
}
# Classes
class Health_Sprite(arcade.Sprite):
'''Health Sprite'''
def __init__(self, image, scale, max_health):
super().__init__(image, scale)
self.max_health = max_health
self.cur_health = max_health
def draw_health_bar(self):
if self.cur_health < self.max_health:
arcade.draw_rectangle_filled(center_x=self.center_x,
center_y=self.center_y + HEALTHBAR_OFFSET_Y,
width=HEALTHBAR_WIDTH,
height=HEALTHBAR_HEIGHT,
color=arcade.color.BLACK)
health_width = HEALTHBAR_WIDTH * (self.cur_health / self.max_health)
arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width),
center_y=self.center_y + HEALTHBAR_OFFSET_Y,
width=health_width,
height=HEALTHBAR_HEIGHT,
color=arcade.color.GREEN)
class Game(arcade.Window):
"""Game"""
def __init__(self):
"""Init"""
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Add Lists
self.orb_list = None
self.orb_list2 = None
self.fish_list = None
self.AI_list = None
self.player_list = None
self.static_lines = []
# Physic Engine
self.physics_engine: Optional[PymunkPhysicsEngine] = None
def setup(self):
"""Setup"""
arcade.set_background_color(arcade.color.OCEAN_BOAT_BLUE)
# Set Random Player Animal At The Start Of The Game
animal_index = random.randint(1, 15)
animal_name = animal_name_list[animal_index - 1]
animal_attributes = animals[animal_name]
# Use Animal Attributes For Self Not Just For Setup
self.animal_name = animal_name
self.animal_attributes = animal_attributes
# AI Animal Attributes
self.AI_animal_attributes = None
# Boost Varibles
self.boost_timer = 0
self.boost_timer_start = False
# Frame Count
self.frame_count = 0
# Player
self.player = Health_Sprite(
f"images/Deeeep.io/{animal_name}.png", animal_attributes["scale"], max_health=animal_attributes["health"])
self.player.center_x = random.randint(100, 1790)
self.player.center_y = random.randint(100, 790)
# Add The Player Weapon
self.player_weapon = arcade.Sprite(
"images/Deeeep.io/Weapon.png", animal_attributes['scale'])
# Add The AI Weapon
self.AI_weapon = arcade.Sprite(
"images/Deeeep.io/Weapon.png", animal_attributes['scale'])
# Speed
self.speed = animal_attributes['speed'] / 9 / 2
# Food Setup / Activate Lists
self.orb_list = arcade.SpriteList()
self.orb_list2 = arcade.SpriteList()
self.fish_list = arcade.SpriteList()
self.AI_list = arcade.SpriteList()
self.player_list = arcade.SpriteList()
# Spawn Food
for i in range(50):
self.GreenOrb()
for i in range(50):
self.BlueOrb()
for i in range(5):
self.fish()
# Spawn AI
self.AI()
# Win And Lose Function Varibles
self.You_Won = None
# Score
self.score = 0
# Schedule Stuff
arcade.schedule(self.check_win_lose, 15)
# Append Everything That Needs To Be In The Player List This Includes The Following
# - The Player
# - The Player Weapon
# That is Everything
# Player
self.player_list.append(self.player)
# Player Weapon
self.player_list.append(self.player_weapon)
# NEW UPDATE: VIDEOS
# Sorry That The Update Is Unavailible (IT IS GOING TO TAKE SO LONG)
# By The Way THis Will Need A Server For THis Update(But It Is To Hard)
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# --- Pymunk Physics Engine Setup ---
# The default damping for every object controls the percent of velocity
# the object will keep each second. A value of 1.0 is no speed loss,
# 0.9 is 10% per second, 0.1 is 90% per second.
# For top-down games, this is basically the friction for moving objects.
# For platformers with gravity, this should probably be set to 1.0.
# Default value is 1.0 if not specified.
damping = 0.7
# Set the gravity. (0, 0) is good for outer space and top-down.
gravity = (0, 0)
# Create the physics engine
self.physics_engine = PymunkPhysicsEngine(damping=damping,
gravity=gravity)
self.physics_engine.space.collision_bias = 0
# Add the player.
# For the player, we set the damping to a lower value, which increases
# the damping rate. This prevents the character from traveling too far
# after the player lets off the movement keys.
# Setting the moment to PymunkPhysicsEngine.MOMENT_INF prevents it from
# rotating.
# Friction normally goes between 0 (no friction) and 1.0 (high friction)
# Friction is between two objects in contact. It is important to remember
# in top-down games that friction moving along the 'floor' is controlled
# by damping.
self.physics_engine.add_sprite(self.player, mass=2,
friction=0.01,
elasticity=1,
damping=1,
collision_type="player",
max_velocity=400)
# Create some boxes to push around.
# Mass controls, well, the mass of an object. Defaults to 1.
self.physics_engine.add_sprite_list(self.AI_list,
mass=0.01,
friction=0.01,
elasticity=1,
damping=1,
collision_type="player")
# Add bottom floor
floor_height = 20
body = pymunk.Body(body_type=pymunk.Body.STATIC)
shape = pymunk.Segment(body, [0, floor_height], [
1900, floor_height], 0.0)
shape.friction = 10
self.static_lines.append(shape)
self.physics_engine.space.add(shape, body)
# Add top floor
body = pymunk.Body(body_type=pymunk.Body.STATIC)
shape = pymunk.Segment(
body,
[0, 1080 - floor_height], # [x1, y1] starting point
[1900, 1080 - floor_height], # [x2, y2], ending point
0.0
)
shape.friction = 10
self.static_lines.append(shape)
self.physics_engine.space.add(shape, body)
# Add left floor
body = pymunk.Body(body_type=pymunk.Body.STATIC)
shape = pymunk.Segment(
body,
[floor_height, 0],
[floor_height, 1500],
0.0
)
shape.friction = 10
self.static_lines.append(shape)
self.physics_engine.space.add(shape, body)
# Add right floor
body = pymunk.Body(body_type=pymunk.Body.STATIC)
shape = pymunk.Segment(
body,
[1900 - floor_height, 0], # [x1, y1] starting point
[1900 - floor_height, 1500], # [x2, y2], ending point
0.0
)
shape.friction = 10
self.static_lines.append(shape)
self.physics_engine.space.add(shape, body)
def AI(self):
"""AI Shark"""
# Index, Name, And Attributes
animal_index = random.randint(1, 15)
animal_name = animal_name_list[animal_index - 1]
animal_attributes = animals[animal_name]
# Set AI Attributes
self.AI_animal_attributes = animal_attributes
# AI Setup
AI_Shark = Health_Sprite(
f"images/Deeeep.io/{animal_name}.png", animal_attributes["scale"], animal_attributes["health"])
AI_Shark.center_x = random.randint(10, 1190)
AI_Shark.center_y = random.randint(10, 790)
AI_Shark.change_x = 0
AI_Shark.change_y = 0
# Place AI's center x and y in varibles
self.ai_center_x = AI_Shark.center_x
self.ai_center_y = AI_Shark.center_y
# Add to AI list
self.AI_list.append(AI_Shark)
def player_movement(self, x, y):
"""Player Movement"""
# Get from the mouse the destination location for the bullet
# IMPORTANT! If you have a scrolling screen, you will also need
# to add in self.view_bottom and self.view_left.
dest_x = x
dest_y = y
# Do math to calculate how to get the bullet to the destination.
# Calculation the angle in radians between the start points
# and end points. This is the angle the bullet will travel.
x_diff = dest_x - self.player.center_x
y_diff = dest_y - self.player.center_y
angle = math.atan2(y_diff, x_diff)
# Taking into account the angle, calculate our change_x
# and change_y. Velocity is how fast the bullet travels.
self.physics_engine.set_velocity(
self.player,
(math.cos(angle) * self.speed * 50, math.sin(angle) * self.speed * 50)
)
# Angle the bullet sprite so it doesn't look like it is flying
# sideways.
spriteAngle = math.degrees(angle) - 90
# Sync up the angle in the physics world too!!!!
self.player.angle = spriteAngle
physicsBody = self.physics_engine.get_physics_object(self.player).body
physicsBody.angle = math.radians(spriteAngle)
def GreenOrb(self):
"""Orb"""
# Green Orb Random Spawn Position
center_x = random.randint(10, 1890)
center_y = random.randint(10, 1040)
# Green Orb Setup
orb = arcade.Sprite("images/Orb.png", SCALE / 4)
orb.center_x = center_x
orb.center_y = center_y
# Add To List To Draw
self.orb_list.append(orb)
def BlueOrb(self):
"""Orb"""
# Blue Orb Random Spawn Position
center_x | |
<gh_stars>1-10
""" Lazy-CSeq Sequentialization module
(swarm version of the corresponding name)
maintained by <NAME>, University of Southampton.
"""
VERSION = 'lazyseqnewscheduleswarm-0.1-2016.09.14'
"""
Transformation:
implements the lazy sequentialization schema
(see Inverso, Tomasco, Fischer, <NAME>, Parlato, CAV'14)
Prerequisites:
- all functions should have been inlined, except the main(), all thread functions, all __CSEQ_atomic_ functions, and function __CSEQ_assert
- all loops should habe been unrolled
- no two threads refers to the same thread function (use module duplicator.py)
TODO:
- get rid of _init_scalar() (see ext. notes)
- check the STOP() inserting mechanism
- this schema assumes no mutex_lock() in main() - is this fine?
- handle typedef in guessing numbit
Changelog:
2016.09.21 fix small bug that causes the injection of GUARD in atomic function
2016.09.14 Initial version (starting from lazyseqnewschedule.py (2016.08.12))
"""
import math, re, os.path
from time import gmtime, strftime
import pycparser.c_parser, pycparser.c_ast, pycparser.c_generator
import core.common, core.module, core.parser, core.utils
class lazyseqcbmcswarm(core.module.Translator):
__lines = {} # lines for each thread
__threadName = ['main'] # name of threads, as they are found in pthread_create(s) - the threads all have different names
__threadIndex = {} # index of the thread = value of threadcount when the pthread_create to that thread was discovered
__threadCount = 0 # pthread create()s found so far
__labelLine = {} # statement number where labels are defined [function, label]
__gotoLine = {} # statement number where goto to labels appear [function, label]
__maxInCompound = 0 # max label within a compound
__labelLength = 55 # for labels to have all the same length, adding padding when needed
__startChar = 't' # special char to distinguish between labeled and non-labelled lines
__stmtCount = -1 # thread statement counter (to build thread labels)
__currentThread = '' # name of the current thread (also used to build thread labels)
__threadbound = 0 # bound on the number of threads
__firstThreadCreate = False # set once the first thread creation is met
__globalMemoryAccessed = False # used to limit context-switch points (when global memory is not accessed, no need to insert them)
__first = False
__atomic = False # no context-switch points between atomic_start() and atomic_end()
_bitwidth = {} # custom bitwidth for specific int variables, e.g. ['main','var'] = 4
_deadlockcheck = False
__decomposepc = False # decompose pc
__roundrobin = False
__preanalysis = {}
__visiting_struct = False
__struct_stack = [] # stack of struct name
#swarm
__showThreadLines = False
def init(self):
self.addInputParam('rounds', 'round-robin schedules', 'r', '1', False)
self.addInputParam('threads', 'max no. of thread creations (0 = auto)', 't', '0', False)
self.addInputParam('deadlock', 'check for deadlock', '', default=False, optional=True)
self.addInputParam('decomposepc', 'use seperate variable for each pc', '', default=False, optional=True)
self.addInputParam('robin', 'use round robin schedule', '', default=False, optional=True)
self.addInputParam('preanalysis', 'use preanalysis input from abstract interpretation backend', 'u', default=None, optional=True)
self.addOutputParam('bitwidth')
self.addOutputParam('header')
def loadfromstring(self, string, env):
if self.getInputParamValue('deadlock') is not None:
self._deadlockcheck = True
threads = int(self.getInputParamValue('threads'))
rounds = int(self.getInputParamValue('rounds'))
backend = self.getInputParamValue('backend')
if self.getInputParamValue("preanalysis") is not None:
self.__preanalysis = self.getInputParamValue("preanalysis")
if env.debug:
seqfile = core.utils.rreplace(env.inputfile, '/', '/_cs_', 1) if '/' in env.inputfile else '_cs_' + env.inputfile
if env.outputfile is not None and env.outputfile != '':
seqfile = env.outputfile
logfile = seqfile + '.framac.log.extract'
with open(logfile, "w") as logfile:
logfile.write(str(self.__preanalysis))
if self.getInputParamValue('decomposepc') is not None:
self.__decomposepc = True
if self.getInputParamValue('robin') is not None:
self.__roundrobin = True
self.__threadbound = threads
#Swarm
self.__intervals = {} # Intervals for each thread
self.__currInterval = [] # Current intervals of a thread for this module
self.__realStmtCount = -1 # real statement counter to
self.__showThreadLines = env.contextswitch
self.__intervals = env.intervals if hasattr(env, 'intervals') else {}
super(self.__class__, self).loadfromstring(string, env)
if self.__showThreadLines:
self.showThreadLines()
# Add the new main().
# if self.__roundrobin:
# if self.__decomposepc:
# self.output += self.__createMainRoundRobinDecomposePC(rounds)
# else:
# self.output += self.__createMainRoundRobin(rounds)
# else:
# if self.__decomposepc:
# self.output += self.__createMainDecomposePC(rounds)
# else:
# self.output += self.__createMain(rounds)
# Insert the thread sizes (i.e. number of visible statements).
lines = ''
i = maxsize = 0
for t in self.__threadName:
if i <= self.__threadbound:
if i>0: lines += ', '
lines += str(self.__lines[t])
maxsize = max(int(maxsize), int(self.__lines[t]))
#print "CONFRONTO %s %s " % (int(maxsize), int(self.__lines[t]))
i +=1
ones = ''
if i <= self.__threadbound:
if i>0: ones += ', '
ones += '-1'
i +=1
# Generate the header.
#
# the first part is not parsable (contains macros)
# so it is passed to next module as a header...
if self.__decomposepc:
header = core.utils.printFile('modules/lazyseqAdecomposepc.c')
else:
header = core.utils.printFile('modules/lazyseqA.c')
header = header.replace('<insert-maxthreads-here>',str(threads))
header = header.replace('<insert-maxrounds-here>',str(rounds))
self.setOutputParam('header', header)
i = 0
pc_decls = ''
pc_cs_decls = ''
join_replace = ''
for t in self.__threadName:
if i <= self.__threadbound:
threadsize = self.__lines[t]
k = int(math.floor(math.log(threadsize,2)))+1
pc_decls += 'unsigned int __cs_pc_%s;\n' % i
self._bitwidth['','__cs_pc_%s' % i] = k
pc_cs_decls += 'unsigned int __cs_pc_cs_%s;\n' % i
self._bitwidth['','__cs_pc_cs_%s' % i] = k + 1
join_replace += 'if (__cs_id == %s) __CSEQ_assume(__cs_pc_%s == __cs_thread_lines[%s]);\n' % (i, i, i)
i += 1
join_replace += 'if (__cs_id >= %s) __CSEQ_assume(0);\n' % (i)
# ..this is parsable and is added on top of the output code,
# as next module is able to parse it.
# if not self._deadlockcheck:
# if self.__decomposepc:
# header = core.utils.printFile('modules/lazyseqBnewscheduledecomposepc.c').replace('<insert-threadsizes-here>',lines)
# header = header.replace('<insert-pc-decls-here>', pc_decls + pc_cs_decls)
# header = header.replace('<insert-join_replace-here>', join_replace)
# else:
# header = core.utils.printFile('modules/lazyseqBnewschedule.c').replace('<insert-threadsizes-here>',lines)
# else:
# header = core.utils.printFile('modules/lazyseqBdeadlock.c').replace('<insert-threadsizes-here>',lines)
# header = header.replace('<insert-all1-here>', ones)
# self.insertheader(header)
# Calculate exact bitwidth size for a few integer control variables of the seq. schema,
# good in case the backend handles bitvectors.
self._bitwidth['','__cs_active_thread'] = 1
k = int(math.floor(math.log(maxsize,2)))+1
if self.__decomposepc is False:
self._bitwidth['','__cs_pc'] = k
self._bitwidth['','__cs_pc_cs'] = k+1
self._bitwidth['','__cs_thread_lines'] = k
k = int(math.floor(math.log(self.__threadbound,2)))+1
self._bitwidth['','__cs_last_thread'] = k
self._bitwidth[core.common.changeID['pthread_mutex_lock'],'__cs_thread_index'] = k
self._bitwidth[core.common.changeID['pthread_mutex_unlock'],'__cs_thread_index'] = k
self.setOutputParam('__cs_bitwidth', self._bitwidth)
# Fix gotos by inserting ASS_GOTO(..) blocks before each goto,
# excluding gotos which destination is the line below.
for (a,b) in self.__labelLine:
if (a,b) in self.__gotoLine and (self.__labelLine[a,b] == self.__gotoLine[a,b]+1):
self.output = self.output.replace('<%s,%s>' % (a,b), '')
else:
self.output = self.output.replace('<%s,%s>' % (a,b), 'ASS_GOTO(%s)' % self.__labelLine[a,b])
self.setOutputParam('bitwidth', self._bitwidth)
def visit_Decl(self,n,no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first declaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
if 'scalar' in self.__preanalysis and n.name in self.__preanalysis['scalar']:
self._bitwidth[self.__currentThread, n.name] = self.__preanalysis['scalar'][n.name]
if 'pointer' in self.__preanalysis and n.name in self.__preanalysis['pointer']:
self._bitwidth[self.__currentThread, n.name] = self.__preanalysis['pointer'][n.name]
if 'array' in self.__preanalysis and n.name in self.__preanalysis['array']:
self._bitwidth[self.__currentThread, n.name] = self.__preanalysis['array'][n.name]
if (self.__visiting_struct and
'struct' in self.__preanalysis and
self.__struct_stack[-1] in self.__preanalysis['struct'] and
n.name in self.__preanalysis['struct'][self.__struct_stack[-1]]
):
# TODO: remember that for a field in struct, only multiple of 8bits is acceptable
numbit = self.__preanalysis['struct'][self.__struct_stack[-1]][n.name]
self._bitwidth[self.__struct_stack[-1], n.name] = numbit
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
if n.init:
s += ' = ' + self._visit_expr(n.init)
return s
def _generate_struct_union(self, n, name):
""" Generates code for structs and unions. name should be either
'struct' or union.
"""
s = name + ' ' + (n.name or '')
# There should be no anonymous struct, handling in workarounds module
self.__visiting_struct = True
if n.name:
self.__struct_stack.append(n.name)
if n.decls:
s += '\n'
s += self._make_indent()
self.indent_level += 2
s += '{\n'
for decl in n.decls:
s += self._generate_stmt(decl)
self.indent_level -= 2
s += self._make_indent() + '}'
self.__visiting_struct = False
self.__struct_stack.pop()
return s
def showThreadLines(self):
print "Number of context-switch of each thread:"
for t in self.__threadName:
if t in self.__lines:
print t, ':', str(self.__lines[t])
else:
self.error('This thread %s is misinterpreted' % t)
import sys
sys.exit(0)
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.indent_level += 1
# Insert the labels at the beginning of each statement,
# with a few exclusions to reduce context-switch points...
#
if n.block_items:
for stmt in n.block_items:
# Case 1: last statement in a thread (must correspond to last label)
if type(stmt) == pycparser.c_ast.FuncCall and stmt.name.name == core.common.changeID['pthread_exit']: ##if type(stmt) == pycparser.c_ast.FuncCall and self._parenthesize_unless_simple(stmt.name) == core.common.changeID['pthread_exit']:
self.__stmtCount += 1
self.__realStmtCount += 1
self.__maxInCompound = self.__realStmtCount
| |
<filename>self_play.py<gh_stars>0
import time
import math
import numpy
import ray
import torch
import models
@ray.remote
class SelfPlay:
"""
Class which runs in a dedicated thread to play games and save them to the replay buffer.
"""
def __init__(self, initial_checkpoint, Game, config, seed):
self.config = config
self.game = Game(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
# Initialize MuZero's neural nets
self.model = models.MuZeroNetwork(self.config)
self.model.set_weights(initial_checkpoint["weights"])
self.model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
self.model.eval()
### And that's it, pretty simple init.
### I tried running it again, but this time I got this error:
### self.replay_buffer = replay_buffer.Reanalyse.options(
### AttributeError: module 'replay_buffer' has no attribute 'Reanalyse'
### So I went back to replay_buffer.py to implement the init for Reanalyze. By the way, he spelled reanalyze wrong lol.
# This function is where the self play workers continuously play the game, saving the played games to the replay buffer for training.
def continuous_self_play(self, shared_storage, replay_buffer, test_mode=False):
while (
ray.get(shared_storage.get_info.remote("training_step")) < self.config.training_steps
and not ray.get(shared_storage.get_info.remote("terminate"))
):
self.model.set_weights(ray.get(shared_storage.get_info.remote("weights")))
# If not testing, play the game and save it to the replay buffer, otherwise take the best action (no exploration) in test mode
if not test_mode:
game_history = self.play_game( # Play the game
# Although I think this multi-line argument is ugly, I also think it would be even uglier on one line, unfortunately.
# This softmax voodoo magic basically alters the visit count distribution to ensure that the action selection becomes greedier as training progresses.
# The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
self.config.visit_softmax_temperature_fn(
trained_steps=ray.get(
shared_storage.get_info.remote("training_step")
)
),
self.config.temperature_threshold,
False,
"self",
0
)
replay_buffer.save_game.remote(game_history, shared_storage) # Save the game to the replay buffer
else:
# Take the best action (no exploration) in test mode
game_history = self.play_game(
0,
self.config.temperature_threshold,
False,
"self" if len(self.config.players) == 1 else self.config.opponent,
self.config.muzero_player # This is a number indicating the turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second).
)
# Save game/episode to shared storage
shared_storage.set_info.remote(
{
"episode_length": len(game_history.action_history) - 1,
"total_reward": sum(game_history.reward_history),
"mean_value": numpy.mean([value for value in game_history.root_values if value])
}
)
# If there's more than 1 players, save both MuZero's reward and the opponents reward, presumably to compare the two.
if 1 < len(self.config.players):
shared_storage.set_info.remote(
{
"muzero_reward": sum(
# Sum rewards from reward history where the player is muzero.
reward for i, reward in enumerate(game_history.reward_history)
if game_history.to_play_history[i - 1] == self.config.muzero_player
),
"opponent_reward": sum(
# Sum rewards from reward history where the player is NOT muzero.
# But wait, what if the game is a 3-player game? Will ALL of muzero's opponents' rewards be summed together? That seems unfair to muzero.
reward for i, reward in enumerate(game_history.reward_history)
if game_history.to_play_history[i - 1] != self.config.muzero_player
)
}
)
# Similar to what we saw in trainer.py, here we adjust the self play / training ratio to avoid over/underfitting.
if not test_mode and self.config.self_play_delay:
time.sleep(self.config.self_play_delay)
if not test_mode and self.config.ratio:
while ( # Another really large condition for a while loop that just puts the program to sleep for a bit.
ray.get(shared_storage.get_info.remote("training_step")) / max(1, ray.get(shared_storage.get_info.remote("num_played_steps"))) < self.config.ratio
and ray.get(shared_storage.get_info.remote("training_step")) < self.config.training_steps
and not ray.get(shared_storage.get_info.remote("terminate"))
):
time.sleep(0.5)
self.close_game()
def play_game(self, temperature, temperature_threshold, render, opponent, muzero_player):
"""
Play one game with actions based on Monte Carlo Tree Search at each move.
:param temperature: Used in selecting the action. From the paper, "visit count distribution is parametrized using a temperature" (Appendix D Data Generation)
:param temperature_threshold: Also used in selecting the action, I think its the max value that the temperature can be?
:param render: Boolean, whether or not to render the game
:param opponent: The opponent to play against, either "self" or self.config.opponent
:param muzero_player: The trained muzero model to play against. FIXME: If muzero_player is 0 and opponent is not self, what happens?
:return: game_history
"""
game_history = GameHistory()
observation = self.game.reset()
game_history.action_history.append(0) # Append action_history with 0? Why?
### Since this thing depends on the GameHistory class, I went down and implemented that. It's not too long.
game_history.observation_history.append(observation)
game_history.reward_history.append(0)
game_history.to_play_history.append(self.game.to_play()) # to_play() determines the current player. It seems to be defined in both the game file and the Node class, but sometimes not in the game file? cartpole doesn't define to_play, so what happens there? is this just for multiplayer games?
done = False
# Render initial state of game/environment.
if render:
self.game.render()
with torch.no_grad(): # with statement in Python is used in exception handling. Not sure what torch.no_grad is used for in this though.
# Main gameplay loop.
while not done and len(game_history.action_history) <= self.config.max_moves:
# Make sure observation is 3 dimensional? Does it have to be 3 dimensional? What if the observation is a string, a one-dimensional array of characters?
# Ohhh, I think the observation has to be 3 dimensional because the resnet is a 3d convolutional network? Can we change it to have a one dimensional input, though?
assert (len(numpy.array(observation).shape) == 3), f"Observation should be 3 dimensional instead of {len(numpy.array(observation).shape)} dimensional. Got observation of shape: {numpy.array(observation).shape}"
# Make sure the observation shape matches the observation_shape defined in MuZeroConfig. Makes sense.
assert (numpy.array(observation).shape == self.config.observation_shape), f"Observation should match the observation_shape defined in MuZeroConfig. Expected {self.config.observation_shape} but got {numpy.array(observation).shape}."
stacked_observations = game_history.get_stacked_observations(-1, self.config.stacked_observations) # config.stacked_observations is the number of previous observations and previous actions to add to the current observation
# Choose the action
# If it's playing against itself, use select_action(), if it's playing against a preprogrammed or human opponent, use select_opponent_action().
if opponent == "self" or muzero_player == self.game.to_play():
root, mcts_info = MCTS(self.config).run(self.model, stacked_observations, self.game.legal_actions(), self.game.to_play(), True) # Create and run the MCTS.
# We input the root node because select_action() gets the visit counts of all the root node's children.
action = self.select_action(root, temperature if not temperature_threshold or len(game_history.action_history) < temperature_threshold else 0)
# If we're rendering the game, also print out information about the MCTS.
if render:
print(f'Tree depth: {mcts_info["max_tree_depth"]}')
print(f"Root value for player {self.game.to_play()}: {root.value():.2f}")
else:
action, root = self.select_opponent_action(opponent, stacked_observations)
# After choosing the action, execute it in the environment and from it get the observation, reward, and whether or not we're done.
observation, reward, done = self.game.step(action)
# Print the action taken and render game state after action has been taken.
if render:
print(f"Played action: {self.game.action_to_string(action)}")
self.game.render()
# Store tree search statistics of game to game_history.
game_history.store_search_statistics(root, self.config.action_space)
# Store next batch
game_history.action_history.append(action)
game_history.observation_history.append(observation)
game_history.reward_history.append(reward)
game_history.to_play_history.append(self.game.to_play())
# Return game_history, which is saved in the replay buffer and shared storage. FIXME: Shared storage saves to disk, but what about the replay buffer? Is that our bottleneck?
return game_history
# Is this function really necessary? Why not just use self.game.close() in continuous_self_play()? It's only one line.
def close_game(self):
self.game.close()
# Select the action for the opponent playing against muzero.
def select_opponent_action(self, opponent, stacked_observations):
"""
Select the action for the opponent playing against muzero.
:param opponent: The opponent playing against muzero.
:param stacked_observations:
:return: action
"""
# Select action depending on the type of opponent.
if opponent == "human":
# Create a MuZero MCTS so that MuZero can suggest a move to the human player.
root, mcts_info = MCTS(self.config).run(
self.model,
stacked_observations,
self.game.legal_actions(),
self.game.to_play(),
True,
)
# Print out info about the MCTS, let MuZero suggest its move to the human player.
print(f'Tree depth: {mcts_info["max_tree_depth"]}')
print(f"Root value for player {self.game.to_play()}: {root.value():.2f}")
print(f"Player {self.game.to_play()} turn. MuZero suggests {self.game.action_to_string(self.select_action(root, 0))}")
return self.game.human_to_action(), root
elif opponent == "expert":
return self.game.expert_agent(), None
elif opponent == "random":
# Make sure legal_actions are valid.
assert (self.game.legal_actions()), f"Legal actions should not be an empty array. Got {self.game.legal_actions()}."
assert set(self.game.legal_actions()).issubset(set(self.config.action_space)), "Legal actions should be a subset of the action space."
return numpy.random.choice(self.game.legal_actions()), None
else:
raise NotImplementedError(f'Wrong argument: "opponent" argument should be "self", "human", "expert" or "random". "{opponent}" is not an implemented opponent.')
# Select MuZero's action based on the visit counts of each node in the MCTS. But where do the neural nets | |
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import database
import inflect
import re
from exceptions import ValidationError, InvalidSchemaException, \
InvalidReloadException
from pymongo import DESCENDING
from bson import ObjectId
from copy import deepcopy
inflect_engine = inflect.engine()
ValidTypes = {
"integer": int,
"boolean": bool,
"number": float,
"string": basestring,
"object_id": ObjectId,
"date": datetime
}
class Model(object):
def __init__(self, fields={}, from_find=False, *args, **kwargs):
''' Creates an instance of the object.'''
self._from_find = from_find
fields = deepcopy(fields)
# populate any default fields for objects that haven't come from the DB
if not from_find:
for field, details in self._schema["properties"].items():
if "default" in details and not field in fields:
fields[field] = details["default"]
self._fields = self.cast(fields)
self.validate()
def reload(self):
''' Reload this object's data from the DB. '''
result = self.__class__.find_by_id(self._id)
# result will be None in the case that this object hasn't yet been
# saved to the DB, or if the object has been deleted since it was
# fetched
if result:
self._fields = self.cast(result._fields)
else:
raise InvalidReloadException("No object in the database with ID %s" % self._id)
def save(self, *args, **kwargs):
''' Saves an object to the database. '''
self.validate()
# set safe to True by default, older versions of pymongo didn't do that
if not "safe" in kwargs:
kwargs["safe"] = True
self._id = self.collection().save(self._fields, *args, **kwargs)
def delete(self):
''' Removes an object from the database. '''
if self._id:
self.collection().remove({"_id": ObjectId(str(self._id))})
def get(self, field, default=None):
''' Get a field if it exists, otherwise return the default. '''
return self._fields.get(field, default)
@classmethod
def bulk_create(cls, objects, *args, **kwargs):
''' Create a number of objects (yay performance). '''
docs = [obj._fields for obj in objects]
return cls.collection().insert(docs)
@classmethod
def find_or_create(cls, query, *args, **kwargs):
''' Retrieve an element from the database. If it doesn't exist, create
it. Calling this method is equivalent to calling find_one and then
creating an object. Note that this method is not atomic. '''
result = cls.find_one(query, *args, **kwargs)
if result is None:
default = cls._schema.get("default", {})
default.update(query)
result = cls(default, *args, **kwargs)
return result
@classmethod
def find(cls, *args, **kwargs):
''' Grabs a set of elements from the DB.
Note: This returns a generator, so you can't to do an efficient count.
To get a count, use the count() function which accepts the same
arguments as find() with the exception of non-query fields like sort,
limit, skip.
'''
options = {}
for option in ["sort", "limit", "skip", "batch_size"]:
if option in kwargs:
options[option] = kwargs[option]
del options[option]
if "batch_size" in options and "skip" not in options and "limit" not in options:
# run things in batches
current_skip = 0
limit = options["batch_size"]
found_something = True
while found_something:
found_something = False
result = cls.collection().find(*args, **kwargs)
result = result.skip(current_skip).limit(limit)
if "sort" in options:
result = result.sort(options["sort"])
for obj in result:
found_something = True
yield cls(obj, from_find=True)
current_skip += limit
else:
result = cls.collection().find(*args, **kwargs)
if "sort" in options:
result = result.sort(options["sort"])
if "skip" in options:
result = result.skip(options["skip"])
if "limit" in options:
result = result.limit(options["limit"])
for obj in result:
yield cls(obj, from_find=True)
@classmethod
def find_by_id(cls, id, **kwargs):
''' Finds a single object from this collection. '''
if isinstance(id, basestring):
id = ObjectId(id)
args = {"_id": id}
result = cls.collection().find_one(args, **kwargs)
if result is not None:
return cls(result, from_find=True)
return None
@classmethod
def find_latest(cls, *args, **kwargs):
''' Finds the latest one by _id and returns it. '''
kwargs["limit"] = 1
kwargs["sort"] = [("_id", DESCENDING)]
result = cls.collection().find(*args, **kwargs)
if result.count() > 0:
return cls(result[0], from_find=True)
return None
@classmethod
def find_one(cls, *args, **kwargs):
''' Finds a single object from this collection. '''
result = cls.collection().find_one(*args, **kwargs)
if result is not None:
return cls(result)
return None
@classmethod
def count(cls, *args, **kwargs):
''' Counts the number of items:
- not the same as pymongo's count, this is the equivalent to:
collection.find(*args, **kwargs).count()
'''
return cls.collection().find(*args, **kwargs).count()
@classmethod
def collection(cls):
''' Get the pymongo collection object for this model. Useful for
features not supported by Warmongo like aggregate queries and
map-reduce. '''
return database.get_collection(collection=cls.collection_name(),
database=cls.database_name())
@classmethod
def collection_name(cls):
''' Get the collection associated with this class. The convention is
to take the lowercase of the class name and pluralize it. '''
global inflect_engine
if cls._schema.get("collectionName"):
return cls._schema.get("collectionName")
elif cls._schema.get("name"):
name = cls._schema.get("name")
else:
name = cls.__name__
# convert to snake case
name = (name[0] + re.sub('([A-Z])', r'_\1', name[1:])).lower()
# pluralize
return inflect_engine.plural(name)
@classmethod
def database_name(cls):
''' Get the database associated with this class. Meant to be overridden
in subclasses. '''
if cls._schema.get("databaseName"):
return cls._schema.get("databaseName")
return None
def to_dict(self):
''' Convert the object to a dict. '''
return self._fields
def validate(self):
''' Validate `schema` against a dict `obj`. '''
self.validate_field("", self._schema, self._fields)
def validate_field_type(self, key, value_schema, value, value_type):
if isinstance(value_type, list):
for subtype in value_type:
try:
self.validate_field_type(key, value_schema, value, subtype)
# We got this far, so break
break
except ValidationError:
# Ignore it
pass
else:
# None of them passed,
raise ValidationError("Field '%s' must be one of the following types: '%s', received '%s' (%s)" %
(key, ", ".join(value_type), str(value), type(value)))
elif value_type == "array":
self.validate_array(key, value_schema, value)
elif value_type == "object":
self.validate_object(key, value_schema, value)
elif value_type == "null":
self.validate_null(key, value_schema, value)
else:
self.validate_simple(key, value_type, value)
def validate_field(self, key, value_schema, value):
''' Validate a single field in `value` named `key` against `value_schema`. '''
# check the type
value_type = value_schema.get("type", "object")
self.validate_field_type(key, value_schema, value, value_type)
def validate_array(self, key, value_schema, value):
if not isinstance(value, list):
raise ValidationError("Field '%s' is of type 'array', received '%s' (%s)" %
(key, str(value), type(value)))
if value_schema.get("items"):
for item in value:
self.validate_field(key, value_schema["items"], item)
else:
# no items, this is an untyped array
pass
def validate_object(self, key, value_schema, value):
if not isinstance(value, dict):
raise ValidationError("Field '%s' is of type 'object', received '%s' (%s)" %
(key, str(value), type(value)))
if not value_schema.get("properties"):
# no validation on this object
return
for subkey, subvalue in value_schema["properties"].items():
if subkey in value:
self.validate_field(subkey, subvalue, value[subkey])
elif subvalue.get("required", False) and not self._from_find:
# if the field is required and we haven't pulled from find,
# throw an exception
raise ValidationError("Field '%s' is required but not found!" %
subkey)
# Check for additional properties
if not value_schema.get("additionalProperties", True):
extra = set(value.keys()) - set(value_schema["properties"].keys())
if len(extra) > 0:
raise ValidationError("Additional properties are not allowed: %s" %
', '.join(list(extra)))
def validate_null(self, key, value_schema, value):
if value is not None:
raise ValidationError("Field '%s' is expected to be null!" % key)
def validate_simple(self, key, value_type, value):
''' Validate a simple field (not an object or array) against a schema. '''
if value_type == "any":
# can be anything
pass
elif value_type == "number" or value_type == "integer":
# special case: can be an int or a float
valid_types = [int, float, long]
matches = [klass for klass in valid_types if isinstance(value, klass)]
if len(matches) == 0:
raise ValidationError("Field '%s' is of type '%s', received '%s' (%s)" %
(key, value_type, str(value), type(value)))
elif value_type in ValidTypes:
if not isinstance(value, ValidTypes[value_type]):
raise ValidationError("Field '%s' is of type '%s', received '%s' (%s)" %
(key, value_type, str(value), type(value)))
# TODO: check other things like maximum, etc.
else:
# unknown type
raise InvalidSchemaException("Unknown type '%s'!" % value_type)
def cast(self, fields, schema=None):
''' Cast the fields from Mongo into our format - necessary to convert
floats into ints since Javascript doesn't support ints. '''
if schema is None:
schema = self._schema
value_type = schema.get("type", "object")
if value_type == "object" and isinstance(fields, dict) and schema.get("properties"):
return {
key: self.cast(value, schema["properties"].get(key, {})) for key, value | |
<reponame>EmPlatts/FRB
""" Module for IGM calculations
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
from IPython import embed
from pkg_resources import resource_filename
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units
from astropy.table import Table
from astropy.utils import isiterable
from astropy.cosmology import Planck15
from astropy import constants
from frb import halos
from frb import mw
def fukugita04_dict():
"""
Data from Fukugita 2004, Table 1
Returns:
f04_dict (dict): data dict.
"""
f04_dict = {}
f04_dict['M_sphere'] = 0.0015
f04_dict['M_disk'] = 0.00055
f04_dict['M_HI'] = 0.00062
f04_dict['M_H2'] = 0.00016
f04_dict['M_WD'] = 0.00036
f04_dict['M_NS'] = 0.00005
f04_dict['M_BH'] = 0.00007
f04_dict['M_BD'] = 0.00014
# Return
return f04_dict
def average_fHI(z, z_reion=7.):
"""
Average HI fraction
1 = neutral
0 = fully ionized
Args:
z (float or ndarray): redshift
z_reion (float, optional): redshift
of reionization.
Returns:
fHI (float or ndarray): float or ndarray
"""
z, flg_z = z_to_array(z)
fHI = np.zeros_like(z)
#
zion = z > z_reion
fHI[zion] = 1.
# Return
if flg_z:
return fHI
else:
return fHI[0]
def average_He_nume(z, z_HIreion=7.):
"""
Average number of electrons contributed by He as a function of redshift
per He nucleus
Following Kulkarni, Worseck & Hennawi 2018
https://arxiv.org/abs/1807.09774
Args:
z (float or ndarray): Redshift
z_HIreion (float, optional): Helium reionization
redshift.
Returns:
neHe (float or ndarray): Number of free electrons
per Helium nucelus.
"""
z, flg_z = z_to_array(z)
# Load Kulkarni Table
He_file = resource_filename('frb', os.path.join('data','IGM','qheIII.txt'))
qHeIII = Table.read(He_file, format='ascii')
# Fully re-ionized
first_ionized = np.where(qHeIII['Q_HeIII_18'] >= 1.)[0][0]
z_HeIIreion = qHeIII['z'][first_ionized]
#
fHeI = np.zeros_like(z)
fHeII = np.zeros_like(z)
# HeI ionized at HI reionization
zion = z > z_HIreion
fHeI[zion] = 1.
# HeII ionized at HeII reionization
zion2 = (z > z_HeIIreion) & (z < z_HIreion)
fi_HeIII = interp1d(qHeIII['z'], qHeIII['Q_HeIII_18'])
fHeII[zion2] = 1. - fi_HeIII(z[zion2])
# Combine
neHe = (1.-fHeI) + (1.-fHeII) # No 2 on the second term as the first one gives you the first electron
# Return
if flg_z:
return neHe
else:
return neHe[0]
def z_from_DM(DM, cosmo=Planck15, coord=None, corr_nuisance=True):
"""
Report back an estimated redshift from an input IGM DM
Any contributions from the Galaxy and/or host need to have been 'removed'
Args:
DM (Quantity): Dispersion measure.
cosmo (Cosmology, optional): Cosmology
of the universe. LambdaCDM with Planck15 parameters
used by default.
coord (SkyCoord, optional): If provided, use it to remove the ISM
corr_nuisance (bool, optional): If True, correct for the MW Halo
and the host with 100 DM units.
Returns:
z (float): Redshift
"""
if coord is not None:
DM_ISM = mw.ismDM(coord)
DM_use = DM - DM_ISM
else:
DM_use = DM
# Correct
if corr_nuisance:
DM_use -= 100 * units.pc/units.cm**3
# Calculate DMs
all_DM, zeval = average_DM(20., cosmo=cosmo, neval=20000, cumul=True)
# Interpolate
fint = interp1d(all_DM.value, zeval)
# Evaluate
z = fint(DM_use.to('pc/cm**3').value)
# Return
return z
def f_diffuse(z, cosmo=Planck15, return_rho = False):
"""
Calculate the cosmic fraction of baryons
in diffuse gas phase based on our empirical
knowledge of baryon distributions and their
ionization state.
Args:
z (float or ndarray): Redshift
cosmo (Cosmology, optional): Cosmology of
the universe.
return_rho (bool, optional): If true,
the diffuse gas density
is returned too.
Returns:
f_diffuse (float, ndarray): Diffuse gas baryon fraction.
rho_diffuse (Quantity): Physical diffuse gas density.
Returned if return_rho is set to true.
"""
# Get comoving baryon mass density
rho_b = cosmo.Ob0 * cosmo.critical_density0.to('Msun/Mpc**3')
# Dense components
rho_Mstar = avg_rhoMstar(z, remnants=True)
rho_ISM = avg_rhoISM(z, cosmo=cosmo)
# Diffuse gas fraction
f_diffuse = 1 - ((rho_Mstar+rho_ISM)/rho_b).value
if not return_rho:
return f_diffuse
else:
return f_diffuse, rho_b*f_diffuse*(1+z)**3
def ne_cosmic(z, cosmo = Planck15, mu = 4./3):
"""
Calculate the average cosmic electron
number density as a function of redshift.
Args:
z (float or ndarray): Redshift
cosmo (Cosmology, optional): Cosmology in
which the calculations are to be performed.
mu (float): Reduced mass
Returns:
ne_cosmic (Quantity): Average physical number
density of electrons in the unverse in cm^-3.
"""
# Get diffuse gas density
_, rho_diffuse = f_diffuse(z, cosmo=cosmo, return_rho=True)
# Number densities of H and He
n_H = (rho_diffuse/constants.m_p/mu).to('cm**-3')
n_He = n_H / 12. # 25% He mass fraction
# Compute electron number density
ne_cosmic = n_H * (1.-average_fHI(z)) + n_He*(average_He_nume(z))
return ne_cosmic
def average_DM(z, cosmo = Planck15, cumul=False, neval=10000, mu=4/3):
"""
Calculate the average cosmic DM 'expected' based on our empirical
knowledge of baryon distributions and their ionization state.
This includes both the IGM and galactic halos, i.e. any and all diffuse gas
Args:
z (float): Redshift
mu (float): Reduced mass correction for He when calculating n_H
cumul (bool, optional): Return the DM as a function of z
Returns:
DM (Quantity or Quantity array): DM values evaluated at
the required redshifts. An array is returned only if
cumul is True.
zeval (ndarray): evaluation redshifts. Only returned if
cumul is True.
"""
# Init
zeval, dz = np.linspace(0., z, neval,retstep=True)
# Get n_e as a function of z
n_e = ne_cosmic(zeval)
# Cosmology -- 2nd term is the (1+z) factor for DM
denom = cosmo.H(zeval) * (1+zeval) * (1+zeval)
# Time to Sum
DM_cum = (constants.c * np.cumsum(n_e * dz / denom)).to('pc/cm**3')
# Return
if cumul:
return DM_cum, zeval
else:
return DM_cum[-1]
def average_DMhalos(z, cosmo = Planck15, f_hot = 0.75, rmax=1., logMmin=10.3, neval = 10000, cumul=False):
"""
Average DM_halos term from halos along the sightline to an FRB
Args:
z (float): Redshift of the FRB
cosmo (Cosmology): Cosmology in which the calculations
are to be performed.
f_hot (float, optional): Fraction of the halo baryons in diffuse phase.
rmax (float, optional): Size of a halo in units of r200
logMmin (float, optional): Lowest mass halos to consider
Cannot be much below 10.3 or the Halo code barfs
The code deals with h^-1 factors, i.e. do not impose it yourself
neval (int, optional): Number of redshift values between
0 and z the function is evaluated at.
cumul (bool, optional): Return a cumulative evaluation?
Returns:
DM_halos (Quantity or Quantity array): One value if cumul=False
else evaluated at a series of z
zeval (ndarray): Evaluation redshifts if cumul=True
"""
zeval, dz = np.linspace(0, z, neval, retstep = True)
# Electron number density in the universe
ne_tot = ne_cosmic(zeval, cosmo = cosmo)
# Diffuse gas mass fraction
f_diff = f_diffuse(zeval, cosmo = cosmo)
# Fraction of total mass in halos
zvals = np.linspace(0, z, 20)
fhalos = halos.frac_in_halos(zvals, Mlow = 10**logMmin, Mhigh = 1e16, rmax = rmax)
fhalos_interp = IUS(zvals, fhalos)(zeval)
# Electron number density in halos only
ne_halos = ne_tot*fhalos_interp*f_hot/f_diff
# Cosmology -- 2nd term is the (1+z) factor for DM
denom = cosmo.H(zeval) * (1+zeval) * (1+zeval)
# DM halos
DM_halos = (constants.c * np.cumsum(ne_halos * dz / denom)).to('pc/cm**3')
# Return
if cumul:
return DM_halos, zeval
else:
return DM_halos[-1]
def average_DMIGM(z, cosmo = Planck15, f_hot = 0.75, rmax=1., logMmin=10.3, neval = 10000, cumul=False):
"""
Estimate DM_IGM in a cumulative fashion
Args:
z (float): Redshift of the FRB
cosmo (Cosmology, optional): Cosmology in which
the calculations are to be performed. LambdaCDM
with Planck15 parameters assumed by default.
f_hot (float, optional): Fraction of the halo
baryons in diffuse phase.
rmax (float, optional):
Size of a halo in units of r200
logMmin (float, optional):
Lowest mass halos to consider. Cannot be much below
10.3 or the Halo code barfs. The code deals with
h^-1 factors, i.e. do not impose it yourself
neval (int, optional): Number of redshift values between
0 and z the function is evaluated at.
cumul (bool, optional):
Return a cumulative evaluation?
Returns:
DM (Quantity or Quantity array): One value if cumul=False
else evaluated at a series of z
zeval (ndarray, optional): Evaluation redshifts if cumul=True
"""
# DM cosmic
DM_cosmic, zeval = average_DM(z, cosmo = cosmo, cumul=True, neval=neval)
# DM_halos
DM_halos, _ = average_DMhalos(z,cosmo = cosmo, logMmin = logMmin,
f_hot=f_hot, cumul = True, rmax = rmax, neval = neval)
# Subtract the two
DM_IGM = DM_cosmic - DM_halos
# Return
if cumul:
return DM_IGM, zeval
else:
return DM_IGM[-1]
def avg_rhoISM(z, cosmo=Planck15):
"""
Co-moving Mass density of the ISM
Interpolates | |
- NON-SKILLED", max_length=2)
SKILLED = models.CharField("TYPE OF HLTH CARE WRKR - SKILLED", max_length=2)
SKILLWOS = models.CharField("SPECIFY TYPE OF SKILLED WORKER", max_length=25)
OTHCW = models.CharField("TYPE OF HLTH CARE WRKR - SOME OTHER", max_length=2)
OTHCWOS = models.CharField("SPECIFY OTHER TYPE HEALTH CARE WORKER", max_length=25)
HOSPITAL = models.CharField("ANY HH CARE SVCE DUE TO HOSPITALIZATION", max_length=2)
VSTRELCN = models.CharField("ANY HH CARE SVCE RELATED TO HLTH COND", max_length=2)
TREATMT = models.CharField("PERSON RECEIVED MEDICAL TREATMENT", max_length=2)
MEDEQUIP = models.CharField("PERSON WAS TAUGHT USE OF MED EQUIPMENT", max_length=2)
DAILYACT = models.CharField("PERSON WAS HELPED WITH DAILY ACTIVITIES", max_length=2)
COMPANY = models.CharField("PERSON RECEIVED COMPANIONSHIP SERVICES", max_length=2)
OTHSVCE = models.CharField("PERSON RECEIVED OTH HOME CARE SERVICES", max_length=2)
OTHSVCOS = models.CharField("SPECIFY OTHER HOME CARE SRVCE RECEIVED", max_length=25)
FREQCY = models.CharField("PROVIDER HELPED EVERY WEEKSOME WEEKS", max_length=2)
DAYSPWK = models.CharField("# DAYS WEEK PROVIDER CAME", max_length=2)
DAYSPMO = models.CharField("# DAYS MONTH PROVIDER CAME", max_length=2)
HOWOFTEN = models.CharField("PROV CAME ONCE PER DAYMORE THAN ONCE", max_length=2)
TMSPDAY = models.CharField("TIMESDAY PROVIDER CAME TO HOME TO HELP", max_length=2)
HRSLONG = models.CharField("HOURS EACH VISIT LASTED", max_length=2)
MINLONG = models.CharField("MINUTES EACH VISIT LASTED", max_length=2)
SAMESVCE = models.CharField("ANY OTH MONS PER RECEIVED SAME SERVICES", max_length=2)
HHDAYS = models.CharField("DAYS PER MONTH IN HOME HEALTH, 2010", max_length=2)
HHSF10X = models.CharField("AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
HHMR10X = models.CharField("AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
HHMD10X = models.CharField("AMOUNT PAID, MEDICAID (IMPUTED)", max_length=8)
HHPV10X = models.CharField("AMOUNT PAID, PRIVATE INSURANCE (IMPUTED)", max_length=7)
HHVA10X = models.CharField("AMOUNT PAID, VETERANSCHAMPVA(IMPUTED)", max_length=6)
HHTR10X = models.CharField("AMOUNT PAID, TRICARE(IMPUTED)", max_length=7)
HHOF10X = models.CharField("AMOUNT PAID, OTHER FEDERAL (IMPUTED)", max_length=5)
HHSL10X = models.CharField("AMOUNT PAID, STATE & LOCAL GOV (IMPUTED)", max_length=7)
HHWC10X = models.CharField("AMOUNT PAID, WORKERS COMP (IMPUTED)", max_length=8)
HHOR10X = models.CharField("AMOUNT PAID, OTHER PRIVATE (IMPUTED)", max_length=7)
HHOU10X = models.CharField("AMOUNT PAID, OTHER PUBLIC (IMPUTED)", max_length=7)
HHOT10X = models.CharField("AMOUNT PAID, OTHER INSURANCE (IMPUTED)", max_length=7)
HHXP10X = models.CharField("SUM OF HHSF10X - HHOT10X (IMPUTED)", max_length=8)
HHTC10X = models.CharField("HHLD REPORTED TOTAL CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT10F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2010", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2010", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2010", max_length=1)
# Methods
def __str__(self):
"""String for representing a HomeHealth10 object"""
return f"{self.DUPERSID}"
class HomeHealth09(models.Model):
""" Defines the HomeHealth Model for 2009, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "HomeHealth09"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
HHDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
HHDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
MPCELIG = models.CharField("MPC ELIGIBILITY FLAG", max_length=1)
SELFAGEN = models.CharField("DOES PROVIDER WORK FOR AGENCY OR SELF", max_length=2)
HHTYPE = models.CharField("HOME HEALTH EVENT TYPE", max_length=1)
CNA = models.CharField("TYPE OF HLTH CARE WRKR - CERT NURSE ASST", max_length=2)
COMPANN = models.CharField("TYPE OF HLTH CARE WRKR - COMPANION", max_length=2)
DIETICN = models.CharField("TYPE OF HLTH CARE WRKR - DIETITIANNUTRT", max_length=2)
HHAIDE = models.CharField("TYPE OF HLTH CARE WRKR - HOME CARE AIDE", max_length=2)
HOSPICE = models.CharField("TYPE OF HLTH CARE WRKR - HOSPICE WORKER", max_length=2)
HMEMAKER = models.CharField("TYPE OF HLTH CARE WRKR - HOMEMAKER", max_length=2)
IVTHP = models.CharField("TYPE OF HLTH CARE WRKR - IV THERAPIST", max_length=2)
MEDLDOC = models.CharField("TYPE OF HLTH CARE WRKR - MEDICAL DOCTOR", max_length=2)
NURPRACT = models.CharField("TYPE OF HLTH CARE WRKR - NURSEPRACTR", max_length=2)
NURAIDE = models.CharField("TYPE OF HLTH CARE WRKR - NURSE S AIDE", max_length=2)
OCCUPTHP = models.CharField("TYPE OF HLTH CARE WRKR - OCCUP THERAP", max_length=2)
PERSONAL = models.CharField("TYPE OF HLTH CARE WRKR - PERS CARE ATTDT", max_length=2)
PHYSLTHP = models.CharField("TYPE OF HLTH CARE WRKR - PHYSICL THERAPY", max_length=2)
RESPTHP = models.CharField("TYPE OF HLTH CARE WRKR - RESPIRA THERAPY", max_length=2)
SOCIALW = models.CharField("TYPE OF HLTH CARE WRKR - SOCIAL WORKER", max_length=2)
SPEECTHP = models.CharField("TYPE OF HLTH CARE WRKR - SPEECH THERAPY", max_length=2)
OTHRHCW = models.CharField("TYPE OF HLTH CARE WRKR - OTHER", max_length=2)
NONSKILL = models.CharField("TYPE OF HLTH CARE WRKR - NON-SKILLED", max_length=2)
SKILLED = models.CharField("TYPE OF HLTH CARE WRKR - SKILLED", max_length=2)
SKILLWOS = models.CharField("SPECIFY TYPE OF SKILLED WORKER", max_length=25)
OTHCW = models.CharField("TYPE OF HLTH CARE WRKR - SOME OTHER", max_length=2)
OTHCWOS = models.CharField("SPECIFY OTHER TYPE HEALTH CARE WORKER", max_length=25)
HOSPITAL = models.CharField("ANY HH CARE SVCE DUE TO HOSPITALIZATION", max_length=2)
VSTRELCN = models.CharField("ANY HH CARE SVCE RELATED TO HLTH COND", max_length=2)
TREATMT = models.CharField("PERSON RECEIVED MEDICAL TREATMENT", max_length=2)
MEDEQUIP = models.CharField("PERSON WAS TAUGHT USE OF MED EQUIPMENT", max_length=2)
DAILYACT = models.CharField("PERSON WAS HELPED WITH DAILY ACTIVITIES", max_length=2)
COMPANY = models.CharField("PERSON RECEIVED COMPANIONSHIP SERVICES", max_length=2)
OTHSVCE = models.CharField("PERSON RECEIVED OTH HOME CARE SERVICES", max_length=2)
OTHSVCOS = models.CharField("SPECIFY OTHER HOME CARE SRVCE RECEIVED", max_length=25)
FREQCY = models.CharField("PROVIDER HELPED EVERY WEEKSOME WEEKS", max_length=2)
DAYSPWK = models.CharField("# DAYS WEEK PROVIDER CAME", max_length=2)
DAYSPMO = models.CharField("# DAYS MONTH PROVIDER CAME", max_length=2)
HOWOFTEN = models.CharField("PROV CAME ONCE PER DAYMORE THAN ONCE", max_length=2)
TMSPDAY = models.CharField("TIMESDAY PROVIDER CAME TO HOME TO HELP", max_length=2)
HRSLONG = models.CharField("HOURS EACH VISIT LASTED", max_length=2)
MINLONG = models.CharField("MINUTES EACH VISIT LASTED", max_length=2)
SAMESVCE = models.CharField("ANY OTH MONS PER RECEIVED SAME SERVICES", max_length=2)
HHDAYS = models.CharField("DAYS PER MONTH IN HOME HEALTH, 2009", max_length=2)
HHSF09X = models.CharField("AMOUNT PAID, FAMILY (IMPUTED)", max_length=8)
HHMR09X = models.CharField("AMOUNT PAID, MEDICARE (IMPUTED)", max_length=8)
HHMD09X = models.CharField("AMOUNT PAID, MEDICAID (IMPUTED)", max_length=8)
HHPV09X = models.CharField("AMOUNT PAID, PRIVATE INSURANCE (IMPUTED)", max_length=7)
HHVA09X = models.CharField("AMOUNT PAID, VETERANSCHAMPVA(IMPUTED)", max_length=7)
HHTR09X = models.CharField("AMOUNT PAID, TRICARE(IMPUTED)", max_length=7)
HHOF09X = models.CharField("AMOUNT PAID, OTHER FEDERAL (IMPUTED)", max_length=7)
HHSL09X = models.CharField("AMOUNT PAID, STATE & LOCAL GOV (IMPUTED)", max_length=7)
HHWC09X = models.CharField("AMOUNT PAID, WORKERS COMP (IMPUTED)", max_length=7)
HHOR09X = models.CharField("AMOUNT PAID, OTHER PRIVATE (IMPUTED)", max_length=7)
HHOU09X = models.CharField("AMOUNT PAID, OTHER PUBLIC (IMPUTED)", max_length=6)
HHOT09X = models.CharField("AMOUNT PAID, OTHER INSURANCE (IMPUTED)", max_length=7)
HHXP09X = models.CharField("SUM OF HHSF09X - HHOT09X (IMPUTED)", max_length=8)
HHTC09X = models.CharField("HHLD REPORTED TOTAL CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT09F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2009", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2009", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2009", max_length=1)
# Methods
def __str__(self):
"""String for representing a HomeHealth09 object"""
return f"{self.DUPERSID}"
class HomeHealth08(models.Model):
""" Defines the HomeHealth Model for 2008, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "HomeHealth08"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
HHDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
HHDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
MPCELIG = models.CharField("MPC ELIGIBILITY FLAG", max_length=1)
SELFAGEN = models.CharField("DOES PROVIDER WORK FOR AGENCY OR SELF", max_length=2)
HHTYPE = models.CharField("HOME HEALTH EVENT TYPE", max_length=1)
CNA = models.CharField("TYPE OF HLTH CARE WRKR - CERT NURSE ASST", max_length=2)
COMPANN = models.CharField("TYPE OF HLTH CARE WRKR - COMPANION", max_length=2)
DIETICN = models.CharField("TYPE OF HLTH CARE WRKR - DIETITIANNUTRT", max_length=2)
HHAIDE = models.CharField("TYPE OF HLTH CARE WRKR - HOME CARE AIDE", max_length=2)
HOSPICE = models.CharField("TYPE OF HLTH CARE WRKR - HOSPICE WORKER", max_length=2)
HMEMAKER = models.CharField("TYPE OF HLTH CARE WRKR - HOMEMAKER", max_length=2)
IVTHP = models.CharField("TYPE OF HLTH CARE WRKR - IV THERAPIST", max_length=2)
MEDLDOC = models.CharField("TYPE OF HLTH CARE WRKR - MEDICAL DOCTOR", max_length=2)
NURPRACT = models.CharField("TYPE OF HLTH CARE WRKR - NURSEPRACTR", max_length=2)
NURAIDE = models.CharField("TYPE OF HLTH CARE WRKR - NURSE S AIDE", max_length=2)
OCCUPTHP = models.CharField("TYPE OF HLTH CARE WRKR - OCCUP THERAP", max_length=2)
PERSONAL = models.CharField("TYPE OF HLTH CARE WRKR - PERS CARE ATTDT", max_length=2)
PHYSLTHP = models.CharField("TYPE OF HLTH CARE WRKR - PHYSICL THERAPY", max_length=2)
RESPTHP = models.CharField("TYPE OF HLTH CARE WRKR - RESPIRA THERAPY", max_length=2)
SOCIALW = models.CharField("TYPE OF HLTH CARE WRKR - SOCIAL WORKER", max_length=2)
SPEECTHP = models.CharField("TYPE OF HLTH CARE WRKR - SPEECH THERAPY", max_length=2)
OTHRHCW = models.CharField("TYPE OF HLTH CARE WRKR - OTHER", max_length=2)
NONSKILL = models.CharField("TYPE OF HLTH CARE WRKR - NON-SKILLED", max_length=2)
SKILLED = models.CharField("TYPE OF HLTH CARE WRKR - SKILLED", max_length=2)
SKILLWOS | |
else:
c = nv.quantil(1-Rational(alpha, 2))
krit1 = p0 - c*sqrt(p0*(1-p0)/n)
krit2 = p0 + c*sqrt(p0*(1-p0)/n)
krit1 = ceiling(n*krit1) - 1
krit2 = floor(n*krit2) + 1
ab= {i for i in range(krit1+1)}.union({i for i in range(krit2, n+1)})
return ab
def ab_ber_(self, **kwargs):
"""ebenso; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz g=ja grafische Darstellung\n")
return
ab_ber, om = self.ab_ber, self.bv.omega
mark = []
for k in om:
if k in ab_ber:
mark += [k]
if kwargs.get('g'):
balken1(self.bv._vert, typ='W', titel='Annahmebereich (hell) und ' + \
'Ablehnungsbereich (dunkel)\nvon $H_0$\n', mark=mark)
return
return self.ab_ber
abBer = ab_ber
AbBer = ab_ber_
@property
def an_ber(self):
"""Annahmebereich von :math:`H_0`"""
ab, om = self.ab_ber, self.bv.omega
return om.difference(ab)
def an_ber_(self, **kwargs):
"""ebenso; zugehörige Methode"""
if kwargs.get('h'):
print("\nZusatz g=ja grafische Darstellung\n")
return
ab_ber, om = self.ab_ber, self.bv.omega
mark = []
for k in om:
if k in ab_ber:
mark += [k]
if kwargs.get('g'):
balken1(self.bv._vert, typ='W', titel='Annahmebereich (hell) und ' + \
'Ablehnungsbereich (dunkel)\nvon $H_0$\n', mark=mark)
return
return self.an_ber
anBer = an_ber
AnBer = an_ber_
@property
def k(self):
"""Kritische Zahl(en)"""
ab, bv, alpha, seite, verf = self.ab_ber, self.bv, self.args[1], \
self.args[2], self.args[4]
if seite in ('l', 'links'):
return max(ab)
elif seite in ('r', 'rechts'):
return min(ab) - 1
else:
if verf in ('S', 'Sigma'):
fakt = 1.64 if alpha == 0.1 else 1.96
K = ceiling(bv.erw - fakt*bv.sigma)
L = floor(bv.erw + fakt*bv.sigma)
return K, L
elif verf in ('B', 'BV'):
K = max(self.an_ber)
L = ceiling(bv.quantil(1-Rational(alpha, 2)) + 1)
return K, L
else:
p0, n = self.args[0], self.args[3]
nv = NormalVerteilung(0, 1)
c = nv.quantil(1-Rational(alpha, 2))
krit1 = p0 - c*sqrt(p0*(1-p0)/n)
krit2 = p0 + c*sqrt(p0*(1-p0)/n)
krit1 = ceiling(n*krit1) - 1
krit2 = floor(n*krit2) + 1
return krit1, krit2
K = k
def guete(self, *args, **kwargs):
"""Güte-Funktion"""
if kwargs.get('h'):
print("\nGüte-Funktion (SignifikanzTestP)\n")
print("Aufruf t . güte( p )\n")
print(" t SignifikanzTestP")
print(" p Zahl aus [0,1]\n")
print("Zusatz g=ja Graf der Funktion\n")
return
if kwargs.get('g'):
return _grafik_guete(self)
if len(args) != 1:
print('zufall: ein Argument angeben')
return
p = float(args[0])
if not (isinstance(p, (Rational, float, Float)) and 0<=p<=1):
print('zufall: Zahl aus dem Intervall [0,1] angeben')
return
K, seite, n = self.k, self.args[2], self.args[3]
p0 = float(self.args[0])
def guete(p):
F = BinomialVerteilung(n, p).F
if seite in ('l', 'links'):
if p <= p0:
return float(F(K))
elif seite in ('r', 'rechts'):
if p >= p0:
return float(1 - F(K-1))
else:
return float(F(K[0]) + 1 - F(K[1]-1))
return guete(p)
def oc(self, *args, **kwargs):
"""Operationscharakteristik"""
if kwargs.get('h'):
print("\nOperationscharakteristik-Funktion (SignifikanzTestP)\n")
print("Aufruf st . oc( p )\n")
print(" st SignifikanzTestP")
print(" p Zahl aus [0,1]\n")
print("Zusatz g=ja Graf der Funktion\n")
return
if kwargs.get('g'):
return _grafik_oc(self)
if len(args) != 1:
print('zufall: ein Argument angeben')
return
p = float(args[0])
if not (isinstance(p, (Rational, float, Float)) and 0<=p<=1):
print('zufall: Zahl aus dem Intervall [0,1] angeben')
return
if self.guete(p):
return 1.0 - self.guete(p)
beta = oc
@property
def regel(self):
"""Entscheidungsregel"""
def dm(x):
return display(Math(x))
print('')
dm('\mathrm{Ermittlung\; der\; Entscheidungsregel\; für\; den\; Signifikanztest}')
seite, verf = self.args[2], self.args[4]
p0, alpha, n = self.args[0], self.args[1], self.args[3]
if verf in ('B', 'BV', 'N', 'NV'):
bv = self.bv
K = self.K
if verf in ('N', 'NV'):
nv0 = NormalVerteilung(0, 1)
sp, sa = str(p0), str(alpha)
if seite in ('l', 'links'):
ss = 'links'
elif seite in ('r', 'rechts'):
ss = 'rechts'
else:
ss = 'zwei'
smy = '{0:.2f}'.format(float(self.bv.erw))
ssi = '{0:.4f}'.format(float(self.bv.sigma))
if verf in ('S', 'Sigma') and alpha == 0.1:
g1 = '{0:.2f}'.format(float(self.bv.erw - 1.64*self.bv.sigma))
g2 = '{0:.2f}'.format(float(self.bv.erw + 1.64*self.bv.sigma))
elif verf in ('S', 'Sigma') and alpha == 0.05:
g1 = '{0:.2f}'.format(float(self.bv.erw - 1.96*self.bv.sigma))
g2 = '{0:.2f}'.format(float(self.bv.erw + 1.96*self.bv.sigma))
if seite in ('l', 'links'):
if verf in ('S', 'Sigma'):
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0}:\;p \ge' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\mathrm{Prüfgröße}\;\; X=\,\mathrm{"Anzahl\;Treffer", \;\;} \\mu=' + smy + \
',\;\\sigma=' + ssi)
dm('\mathrm{1.64\,\\sigma-Umgebung\;des\;Erwartungswertes}')
dm('\\qquad [\, \\mu-1.64\,\\sigma,\; \\mu+1.64\,\\sigma\, ] = [' + g1 + ',\;' + g2 + ']')
dm('\mathrm{Kritische\;Zahl\;\;}' + str(floor(g1)))
dm('\mathrm{Ablehnungsbereich\;für\;H_0\;\;} X \\le' + str(floor(g1)))
dm('\mathrm{Entscheidungsregel}')
dm('\mathrm{Liegt\;der\;Wert\;der\;Prüfgröße\;im\;Ablehnungsbereich,\;wird\;H_0\;abgelehnt,\;sonst}')
dm('\mathrm{wird\;H_0\;beibehalten}')
elif verf in ('B', 'BV'):
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0}:\;p \ge' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\\text{Prüfgröße} \;\; X=\\text{"Anzahl Treffer"}')
dm('\\qquad\\text{Verteilung von }X\;\;' + \
latex(bv))
dm('\mathrm{Kritische\;Zahl\;\;}' + str(K))
dm('\mathrm{Ablehnungsbereich\;für\;}H_0\;\; X \\le' + str(K))
dm('\mathrm{Entscheidungsregel}')
dm('\mathrm{Liegt\;der\;Wert\;der\;Prüfgröße\;im\;Ablehnungsbereich,\;wird\;} H_0\\text{ mit einer' + \
' Irrtums-}')
dm('\mathrm{wahrscheinlichkeit\;von\;höchstens\;}'+ sa + '\mathrm{\;abgelehnt,\;sonst\;' + \
'wird\;}H_0\\text{ beibehalten}')
else:
q = nv0.quantil(1-alpha)
g = p0 - q*sqrt(p0*(1-p0)/n)
sq = '{0:.5}'.format(float(q))
sg = '{0:.5}'.format(float(g))
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0}:\;p \ge' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\mathrm{Prüfgröße\;\;} X=\,\mathrm{"Relative\;Trefferhäufigkeit"}')
dm('\\qquad\mathrm{Verteilung\;von}\;X' + \
'\\quad ' + latex('NormalVerteilung(' + str(p0) + ',\,' + \
'{0:.4f}'.format(float(p0*(1-p0)/n)) + ')'))
print(' ')
dm('\mathrm{Die\;Bestimmung\;von\;}c_\\alpha\mathrm{\;aus\;der\;Gleichung\;}' + \
'\\Phi(c_\\alpha) =' + latex(1-alpha) + '\mathrm{\;ergibt\;\;}' + \
'c_\\alpha =' +'{0:.4f}'.format(q))
dm('\mathrm{(siehe\;Quantile\;der\;Normalverteilung\;oder\;untere\;Grafik;\;sie\;zeigt\;die\;' + \
'Vertei-}')
dm('\mathrm{lungsfunktion\;der\;(0,1)-Normalverteilung\;(Ausschnitt)})')
print(' ')
_grafik_nv()
dm('\mathrm{Die\;Berechnung\;der\;Grenze\;des\;Ablehnungsbereiches\;ergibt\;\;}')
dm('\\qquad ' + str(p0) + '-' + \
sq + '\,\\sqrt{\\frac{' + str(p0) + '\\cdot' + str(1-p0) +'}{' + str(n) + \
'}} =' + sg)
print(' ')
dm('\mathrm{Entscheidungsregel}')
dm('\mathrm{Ist\;die\;relative\;Trefferhäufigkeit\;} \\gt' + sg + \
'\mathrm{,\;so\;wird\;H_0} \mathrm{\;mit\;einer\;Irrtums-}')
dm('\mathrm{wahrscheinlichkeit\;von\;}' + str(alpha)+ '\mathrm{\;abgelehnt,\;sonst\;wird\; H_0} ' + \
'\mathrm{\;beibehalten}')
print(' ')
dm('\mathrm{oder\;\;\;(Verwendung\;der\;absoluten\;Trefferhäufigkeit)}')
dm('\mathrm{Multiplikation\;des\;Grenzwertes\;mit\;} n =' + str(n) + '\mathrm{\;ergibt\;}' + \
str(floor(g*n)) + '\mathrm{\;(Rundung\;in\;sicherer\;Richtung)}')
dm('\mathrm{Ablehnungsbereich\;\;} X \\le' + str(floor(g*n)))
dm('\mathrm{Fällt\;die\;absolute\;Trefferhäufigkeit\;} X \mathrm{\;in\;den\;' + \
'Ablehnungsbereich,\;so\;wird\;H_0} \mathrm{\;mit\;der}')
dm('\mathrm{Irrtumswahrscheinlichkeit\;}' + str(alpha) + \
'\mathrm{\;abgelehnt,\;sonst\;wird\; H_0} \mathrm{\;beibehalten}')
elif seite in ('r', 'rechts'):
if verf in ('S', 'Sigma'):
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0}:\;p \le' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\mathrm{Prüfgröße}\;\; X=\,\mathrm{"Anzahl\;Treffer", \;\;} \\mu=' + smy + \
',\;\\sigma=' + ssi)
dm('\mathrm{1.64\,\\sigma-Umgebung\;des\;Erwartungswertes}')
dm('\\qquad [\, \\mu-1.64\,\\sigma,\; \\mu+1.64\,\\sigma\, ] = [' + g1 + ',\;' + g2 + ']')
dm('\mathrm{Kritische\;Zahl\;\;}' + str(ceiling(g2)))
dm('\mathrm{Ablehnungsbereich\;für\;H_0\;\;} X \\ge' + str(ceiling(g2)))
dm('\mathrm{Entscheidungsregel}')
dm('\mathrm{Liegt\;der\;Wert\;der\;Prüfgröße\;im\;Ablehnungsbereich,\;wird\;}H_0 \
\mathrm{\;abgelehnt,\;sonst}')
dm('\mathrm{wird\;}H_0\mathrm{\;beibehalten}')
elif verf in ('B', 'BV'):
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0}:\;p \le' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\mathrm{Prüfgröße\;\;} X=\,\mathrm{"Anzahl\;Treffer"}')
dm('\mathrm{Verteilung\;von}\;X\;\;' + latex(bv))
dm('\mathrm{Kritische\;Zahl\;\;}' + str(self.K))
dm('\mathrm{Ablehnungsbereich\;für\;H_0\;\;} X \\ge' + str(self.K + 1))
dm('\mathrm{Entscheidungsregel}')
dm('\mathrm{Liegt\;der\;Wert\;der\;Prüfgröße\;im\;Ablehnungsbereich,\;wird\;H_0\;mit\;einer}')
dm('\mathrm{Irrtumswahrscheinlichkeit\;von\;höchstens\;}'+ sa + '\mathrm{\;abgelehnt,\;sonst\;}')
dm('\mathrm{wird\;H_0\;beibehalten}')
else:
q = nv0.quantil(1-alpha)
g = p0 + q*sqrt(p0*(1-p0)/n)
sq = '{0:.5}'.format(float(q))
sg = '{0:.5}'.format(float(g))
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0:} \;p \le' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\mathrm{Prüfgröße\;\;} X=\,\mathrm{"Relative\;Trefferhäufigkeit"}')
dm('\\qquad\mathrm{Verteilung\;von}\;X' + \
'\\quad ' + latex('NormalVerteilung(' + str(p0) + ',' + '\, {0:.4f}'.format(float(p0*(1-p0)/n)) + \
')'))
print(' ')
dm('\mathrm{Die\;Bestimmung\;von\;}c_\\alpha\mathrm{\;aus\;der\;Gleichung\;}' + \
'\\Phi(c_\\alpha) =' + latex(1-alpha) + '\mathrm{\;ergibt\;\;}' + \
'c_\\alpha =' +'{0:.4f}'.format(q))
dm('\mathrm{(siehe\;Quantile\;der\;Normalverteilung\;oder\;untere\;Grafik;\;sie\;zeigt\;die\;' + \
'Vertei-}')
dm('\mathrm{lungsfunktion\;der\;(0,1)-Normalverteilung\;(Ausschnitt)})')
print(' ')
_grafik_nv()
dm('\mathrm{Die\;Berechnung\;der\;Grenze\;des\;Ablehnungsbereiches\;ergibt\;\;}')
dm('\\qquad ' + str(p0) + '+' + \
sq + '\,\\sqrt{\\frac{' + str(p0) + '\\cdot' + str(1-p0) +'}{' + str(n) + \
'}} =' + sg)
print(' ')
dm('\mathrm{Entscheidungsregel}')
dm('\mathrm{Ist\;die\;relative\;Trefferhäufigkeit\;} \\gt' + sg + \
'\mathrm{,\;so\;wird \;H_0} \mathrm{\;mit\;einer\;Irrtums-}')
dm('\mathrm{wahrscheinlichkeit\;von\;}' + str(alpha)+ '\mathrm{\;abgelehnt,\;sonst\;wird\; H_0} ' + \
'\mathrm{\;beibehalten}')
print(' ')
dm('\mathrm{oder\;\;\;(Verwendung\;der\;absoluten\;Trefferhäufigkeit)}')
dm('\mathrm{Multiplikation\;des\;Grenzwertes\;mit\;} n =' + str(n) + '\mathrm{\;ergibt\;}' + \
str(ceiling(g*n)) + '\mathrm{\;(Rundung\;in\;sicherer\;Richtung)}')
dm('\mathrm{Ablehnungsbereich\;\;} X \\ge' + str(ceiling(g*n)))
dm('\mathrm{Fällt\;die\;absolute\;Trefferhäufigkeit\;} X \mathrm{\;in\;den\;' + \
'Ablehnungsbereich,\;so\;wird\; H_0} \mathrm{\;mit\;der}')
dm('\mathrm{Irrtumswahrscheinlichkeit\;}' + str(alpha) + \
'\mathrm{\;abgelehnt,\;sonst\;wird \;H_0} \mathrm{\;beibehalten}')
else:
if verf in ('S', 'Sigma'):
dm('\mathrm{Gegeben}')
dm('\\qquad \mathrm{H_0}:\;p=' + sp)
dm('\\qquad\\alpha=' + sa + ',\;n=' + str(n) + ',\;' + \
'\mathrm{' + ss + 'seitiger\;Test}')
dm('\\qquad\mathrm{Prüfgröße}\;\; X=\,\mathrm{"Anzahl\;Treffer", \;\;} \\mu=' + smy + ',\;\\sigma=' + ssi)
if alpha ==0.1:
dm('\mathrm{1.64\,\\sigma-Umgebung\;des\;Erwartungswertes}')
dm('\\qquad [\, \\mu-1.64\,\\sigma,\; \\mu+1.64\,\\sigma\, ] = [' + g1 + ',\;' + g2 + ']')
elif alpha == 0.05:
dm('\mathrm{1.96\,\\sigma-Umgebung\;des\;Erwartungswertes}')
dm('\\qquad [\, \\mu-1.96\,\\sigma,\; \\mu+1.96\,\\sigma\, ] = [' + g1 + ',\;' + g2 + ']')
dm('\mathrm{Kritische\;Zahlen\;\;}' + str(floor(g1)) + ',\;' + str(ceiling(g2)))
dm('\mathrm{Ablehnungsbereich\;für\;H_0\;\;} X \\le' + str(floor(g1)) + \
'\mathrm{\;\;oder\;\; X \\ge}' + | |
import csv
import cv2
import numpy as np
import pandas as pd
import sys
from datetime import datetime
from numpy.random import RandomState
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D
def DrivingNetV1():
model = Sequential()
model.add( Cropping2D( cropping=( (90,20), (0,0) ), input_shape=( 160, 320, 3 ) ) )
model.add( Lambda( lambda x: (x/255.0) - 0.5 ) )
model.add( Flatten( ) )
model.add( Dense(1) )
return model
def NVIDIANetV0( lr=1e-3):
model = Sequential( name="NVIDIANetV0" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear') )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV1( lr=1e-3):
model = Sequential( name="NVIDIANetV1" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='tanh' ) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV2( lr=1e-3):
model = Sequential( name="NVIDIANetV2" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV3( lr=1e-3):
model = Sequential( name="NVIDIANetV3" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV4( lr=1e-3):
model = Sequential( name="NVIDIANetV4" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dropout(0.125) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV5( lr=1e-3):
model = Sequential( name="NVIDIANetV5" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV6( lr=1e-3):
model = Sequential( name="NVIDIANetV6" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dropout(0.5) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV1( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV1" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(300, activation='tanh' ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear' ) )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt | |
: "uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport",
"start" : 2,
"end" : 4
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.ExportToSet(),
g.DiscardOutput(),
g.GetSetExport(
end=4,
start=2
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.OperationChain",
"operations" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet",
"key" : "edges"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet",
"key" : "entities"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.DiscardOutput"
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.GetExports",
"getExports" : [ {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport",
"key" : "edges",
"start" : 0
}, {
"class" : "uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport",
"key" : "entities",
"start" : 0
} ]
} ]
}
''',
g.OperationChain(
operations=[
g.GetAllElements(),
g.ExportToSet(
key="edges"
),
g.DiscardOutput(),
g.GetAllElements(),
g.ExportToSet(
key="entities"
),
g.DiscardOutput(),
g.GetExports(
get_exports=[
g.GetSetExport(
start=0,
key="edges"
),
g.GetSetExport(
start=0,
key="entities"
)
]
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.generate.GenerateElements",
"elementGenerator" : {
"class" : "uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator"
},
"input" : [ "1,1", "1,2,1" ]
}
''',
g.GenerateElements(
element_generator=g.ElementGenerator(
fields={},
class_name="uk.gov.gchq.gaffer.doc.operation.generator.ElementGenerator"
),
input=[
"1,1",
"1,2,1"
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.generate.GenerateElements",
"elementGenerator" : {
"class" : "uk.gov.gchq.gaffer.doc.operation.GenerateElementsExample$DomainObjectGenerator"
},
"input" : [ {
"class" : "uk.gov.gchq.gaffer.doc.operation.GenerateElementsExample$DomainObject1",
"a" : 1,
"c" : 1
}, {
"class" : "uk.gov.gchq.gaffer.doc.operation.GenerateElementsExample$DomainObject2",
"a" : 1,
"b" : 2,
"c" : 1
} ]
}
''',
g.GenerateElements(
element_generator=g.ElementGenerator(
class_name="uk.gov.gchq.gaffer.doc.operation.GenerateElementsExample$DomainObjectGenerator",
fields={}
),
input=[
{'c': 1,
'class': 'uk.gov.gchq.gaffer.doc.operation.GenerateElementsExample$DomainObject1',
'a': 1},
{'b': 2, 'c': 1,
'class': 'uk.gov.gchq.gaffer.doc.operation.GenerateElementsExample$DomainObject2',
'a': 1}
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.generate.GenerateObjects",
"elementGenerator" : {
"class" : "uk.gov.gchq.gaffer.doc.operation.generator.ObjectGenerator"
},
"input" : [ {
"group" : "entity",
"vertex" : 6,
"properties" : {
"count" : 1
},
"class" : "uk.gov.gchq.gaffer.data.element.Entity"
}, {
"group" : "edge",
"source" : 5,
"destination" : 6,
"directed" : true,
"properties" : {
"count" : 1
},
"class" : "uk.gov.gchq.gaffer.data.element.Edge"
} ]
}
''',
g.GenerateObjects(
input=[
g.Entity(
properties={'count': 1},
vertex=6,
group="entity"
),
g.Edge(
directed=True,
source=5,
properties={'count': 1},
group="edge",
destination=6
)
],
element_generator=g.ElementGenerator(
fields={},
class_name="uk.gov.gchq.gaffer.doc.operation.generator.ObjectGenerator"
)
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.generate.GenerateObjects",
"elementGenerator" : {
"class" : "uk.gov.gchq.gaffer.doc.operation.GenerateObjectsExample$DomainObjectGenerator"
},
"input" : [ {
"group" : "entity",
"vertex" : 6,
"properties" : {
"count" : 1
},
"class" : "uk.gov.gchq.gaffer.data.element.Entity"
}, {
"group" : "edge",
"source" : 5,
"destination" : 6,
"directed" : true,
"properties" : {
"count" : 1
},
"class" : "uk.gov.gchq.gaffer.data.element.Edge"
} ]
}
''',
g.GenerateObjects(
element_generator=g.ElementGenerator(
class_name="uk.gov.gchq.gaffer.doc.operation.GenerateObjectsExample$DomainObjectGenerator",
fields={}
),
input=[
g.Entity(
properties={'count': 1},
vertex=6,
group="entity"
),
g.Edge(
directed=True,
group="edge",
properties={'count': 1},
source=5,
destination=6
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
} ]
}
''',
g.GetAdjacentIds(
input=[
g.EntitySeed(
vertex=2
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"includeIncomingOutGoing" : "OUTGOING",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
} ]
}
''',
g.GetAdjacentIds(
input=[
g.EntitySeed(
vertex=2
)
],
include_incoming_out_going="OUTGOING"
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"view" : {
"edges" : {
"edge" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 1
},
"selection" : [ "count" ]
} ]
}
},
"entities" : { }
},
"includeIncomingOutGoing" : "OUTGOING",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
} ]
}
''',
g.GetAdjacentIds(
view=g.View(
entities=[
],
edges=[
g.ElementDefinition(
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=[
"count"
],
predicate=g.IsMoreThan(
value=1,
or_equal_to=False
)
)
],
group="edge"
)
]
),
input=[
g.EntitySeed(
vertex=2
)
],
include_incoming_out_going="OUTGOING"
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements"
}
''',
g.GetAllElements()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
"view" : {
"edges" : {
"edge" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 2
},
"selection" : [ "count" ]
} ]
}
},
"entities" : {
"entity" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 2
},
"selection" : [ "count" ]
} ]
}
}
}
}
''',
g.GetAllElements(
view=g.View(
entities=[
g.ElementDefinition(
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=[
"count"
],
predicate=g.IsMoreThan(value=2,
or_equal_to=False)
)
],
group="entity"
)
],
edges=[
g.ElementDefinition(
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=[
"count"
],
predicate=g.IsMoreThan(value=2,
or_equal_to=False)
)
],
group="edge"
)
]
)
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.job.GetAllJobDetails"
}
''',
g.GetAllJobDetails()
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
}, {
"source" : 2,
"destination" : 3,
"directedType" : "EITHER",
"matchedVertex" : "SOURCE",
"class" : "uk.gov.gchq.gaffer.operation.data.EdgeSeed"
} ],
"seedMatching": "EQUAL"
}
''',
g.GetElements(
input=[
g.EntitySeed(
vertex=2
),
g.EdgeSeed(
directed_type="EITHER",
source=2,
destination=3,
matched_vertex="SOURCE"
)
],
seed_matching=g.SeedMatchingType.EQUAL
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
}],
"view": {
"allEdges": true,
"allEntities": true
}
}
''',
g.GetElements(
input=[
g.EntitySeed(
vertex=2
)
],
view=g.View(
all_edges=True,
all_entities=True
)
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
}],
"view": {
"allEdges": true
}
}
''',
g.GetElements(
input=[
g.EntitySeed(
vertex=2
)
],
view=g.View(
all_edges=True,
all_entities=False
)
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
}],
"view": {
"allEntities": true
}
}
''',
g.GetElements(
input=[
g.EntitySeed(
vertex=2
)
],
view=g.View(
all_entities=True
)
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"view" : {
"edges" : {
"edge" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 1
},
"selection" : [ "count" ]
} ]
}
},
"entities" : {
"entity" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 1
},
"selection" : [ "count" ]
} ]
}
}
},
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
}, {
"source" : 2,
"destination" : 3,
"directedType" : "EITHER",
"matchedVertex" : "SOURCE",
"class" : "uk.gov.gchq.gaffer.operation.data.EdgeSeed"
} ]
}
''',
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
group="edge",
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=[
"count"
],
predicate=g.IsMoreThan(value=1,
or_equal_to=False)
)
]
)
],
entities=[
g.ElementDefinition(
group="entity",
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=[
"count"
],
predicate=g.IsMoreThan(value=1,
or_equal_to=False)
)
]
)
]
),
input=[
g.EntitySeed(
vertex=2
),
g.EdgeSeed(
source=2,
matched_vertex="SOURCE",
directed_type="EITHER",
destination=3
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"input" : [ {
"vertex" : 2,
"class" : "uk.gov.gchq.gaffer.operation.data.EntitySeed"
} ]
}
''',
g.GetElements(
input=[
g.EntitySeed(
vertex=2
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"input" : [ {
"source" : 1,
"destination" : 2,
"directedType" : "EITHER",
"matchedVertex" : "SOURCE",
"class" : "uk.gov.gchq.gaffer.operation.data.EdgeSeed"
} ]
}
''',
g.GetElements(
input=[
g.EdgeSeed(
source=1,
directed_type="EITHER",
matched_vertex="SOURCE",
destination=2
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"view" : {
"edges" : {
"edge" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 1
},
"selection" : [ "count" ]
} ]
}
},
"entities" : {
"entity" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 1
},
"selection" : [ "count" ]
} ]
}
}
},
"input" : [ {
"source" : 1,
"destination" : 2,
"directedType" : "EITHER",
"matchedVertex" : "SOURCE",
"class" : "uk.gov.gchq.gaffer.operation.data.EdgeSeed"
} ]
}
''',
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
group="edge",
pre_aggregation_filter_functions=[
g.PredicateContext(
predicate=g.IsMoreThan(
value=1,
or_equal_to=False
),
selection=[
"count"
]
)
]
)
],
entities=[
g.ElementDefinition(
group="entity",
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=[
"count"
],
predicate=g.IsMoreThan(value=1,
or_equal_to=False)
)
]
)
]
),
input=[
g.EdgeSeed(
matched_vertex="SOURCE",
source=1,
directed_type="EITHER",
destination=2
)
]
)
],
[
'''
{
"class" : "uk.gov.gchq.gaffer.operation.impl.get.GetElements",
"view" : {
"edges" : { },
"entities" : {
"entity" : {
"preAggregationFilterFunctions" : [ {
"predicate" : {
"class" : "uk.gov.gchq.koryphe.impl.predicate.Or",
"predicates" : [ {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsLessThan",
"orEqualTo" : false,
"value" : 2
}, {
"class" : "uk.gov.gchq.koryphe.impl.predicate.IsMoreThan",
"orEqualTo" : false,
"value" : 5
} ]
},
"selection" : [ "count" ]
} ]
}
}
},
"input" : [ {
"vertex" : | |
<reponame>yisibl/picosvg
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from math import degrees, pi
from picosvg.svg_transform import *
from typing import Tuple
@pytest.mark.parametrize(
"transform, expected_result",
[
# translate(tx)
("translate(-5)", Affine2D(1, 0, 0, 1, -5, 0)),
# translate(tx ty)
("translate(3.5, -0.65)", Affine2D(1, 0, 0, 1, 3.5, -0.65)),
# scale(sx)
("scale(2)", Affine2D(2, 0, 0, 2, 0, 0)),
# scale(sx,sy)
("scale(-2 -3)", Affine2D(-2, 0, 0, -3, 0, 0)),
# rotate(angle)
(f"rotate({degrees(pi / 4)})", Affine2D(0.707, 0.707, -0.707, 0.707, 0, 0)),
# rotate(angle cx cy)
(f"rotate({degrees(pi / 2)}, 5, 6)", Affine2D(0, 1, -1, 0, 11, 1)),
# skewX(angle)
(f"skewx({degrees(pi / 8)})", Affine2D(1, 0, 0.414, 1, 0, 0)),
# skewY(angle)
(f"skewY({degrees(pi / 8)})", Affine2D(1, 0.414, 0, 1, 0, 0)),
# example from FontTools
(
"matrix(2, 0, 0, 3, 1, 6) matrix(4, 3, 2, 1, 5, 6)",
Affine2D(8, 9, 4, 3, 11, 24),
),
# svg spec example
# 255 decimal expected part changed from 03 to 061
(
"translate(50 90),rotate(-45) translate(130,160)",
Affine2D(0.707, -0.707, 0.707, 0.707, 255.061, 111.213),
),
# example from Noto
(
"rotate(150)translate(0,6)rotate(66)",
Affine2D(a=-0.809, b=-0.588, c=0.588, d=-0.809, e=-2.999, f=-5.196),
),
# Crafted example
(
# "rotate (180)\ttranslate\t(0 6)\n\t",
"rotate (180)\ttranslate(0 6)\n\t",
Affine2D(-1, 0, 0, -1, 0, -6),
),
# found in the wild, contains some odd spacing
(
"matrix( -1,0,0,1,3717.75,0 )",
Affine2D(-1, 0, 0, 1, 3717.75, 0),
),
],
)
def test_parse_svg_transform(transform: str, expected_result: Tuple[str, ...]):
actual = parse_svg_transform(transform)
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == pytest.approx(expected_result, rel=1e-3)
class TestAffine2D:
def test_map_point(self):
t = Affine2D(2, 0, 0, 1, 10, 20)
p = t.map_point((-3, 4))
assert isinstance(p, Point)
assert p == Point(4, 24)
assert Affine2D(1, 0.5, -0.5, 1, 0, 0).map_point(Point(2, 2)) == Point(1.0, 3.0)
def test_map_vector(self):
v = Affine2D(2, 0, 0, -1, 0, 0).map_vector((1, 1))
assert isinstance(v, Vector)
assert v == Vector(2, -1)
# vectors are unaffected by translation
v = Vector(-3, 4)
assert Affine2D(1, 0, 0, 1, 40, -50).map_vector(v) == v
def test_determinant(self):
assert Affine2D(1, 2, 3, 4, 0, 0).determinant() == (1 * 4 - 2 * 3)
def test_is_degenerate(self):
assert not Affine2D(1, 2, 3, 4, 5, 6).is_degenerate()
assert not Affine2D.identity().is_degenerate()
assert Affine2D.degenerate().is_degenerate()
assert Affine2D(-1, 2 / 3, 3 / 2, -1, 0, 0).is_degenerate()
assert Affine2D(
float_info.epsilon,
float_info.epsilon,
float_info.epsilon,
float_info.epsilon,
0,
0,
).is_degenerate()
def test_scale_0_is_degenerate(self):
assert not Affine2D.identity().scale(1, 1).is_degenerate()
assert Affine2D.identity().scale(0, 1).is_degenerate()
assert Affine2D.identity().scale(1, 0).is_degenerate()
assert Affine2D.identity().scale(0, 0).is_degenerate()
def test_inverse(self):
t = Affine2D.identity().translate(2, 3).scale(4, 5)
p0 = Point(12, 34)
p1 = t.map_point(p0)
it = t.inverse()
p2 = it.map_point(p1)
assert p2 == p0
assert Affine2D.degenerate().inverse() == Affine2D.degenerate()
t = Affine2D(1, 1, 1, 1, 0, 0).inverse()
assert t.is_degenerate()
@pytest.mark.parametrize(
"src, dest, preserveAspectRatio, expected",
[
((0, 0, 10, 10), (0, 0, 1000, 1000), "none", (100, 0, 0, 100, 0, 0)),
((0, 10, 10, -10), (0, 0, 1000, 1000), "none", (100, 0, 0, -100, 0, 1000)),
((0, 0, 0, 0), (0, 0, 1000, 1000), "none", (1, 0, 0, 1, 0, 0)),
((0, 0, 10, 10), (0, 0, 0, 1000), "none", (0, 0, 0, 0, 0, 0)),
# src.w > src.h, no preserveAspectRatio, scale_y > scale_x
((0, 0, 10, 5), (0, 0, 100, 100), "none", (10, 0, 0, 20, 0, 0)),
# src.w < src.h, no preserveAspectRatio, scale_x > scale_y
((0, 0, 5, 10), (0, 0, 100, 100), "none", (20, 0, 0, 10, 0, 0)),
# src.w > src.h, preserveAspectRatio ("meet" default): scale = min(sx, sy)
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMin", (10, 0, 0, 10, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMin", (10, 0, 0, 10, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMaxYMin", (10, 0, 0, 10, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMid", (10, 0, 0, 10, 0, 25)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMid", (10, 0, 0, 10, 0, 25)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMaxYMid", (10, 0, 0, 10, 0, 25)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMax", (10, 0, 0, 10, 0, 50)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMax", (10, 0, 0, 10, 0, 50)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMaxYMax", (10, 0, 0, 10, 0, 50)),
# src.w < src.h, preserveAspectRatio ("meet" default): scale = min(sx, sy)
((0, 0, 5, 10), (0, 0, 100, 100), "xMinYMin", (10, 0, 0, 10, 0, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMidYMin", (10, 0, 0, 10, 25, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMaxYMin", (10, 0, 0, 10, 50, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMinYMid", (10, 0, 0, 10, 0, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMidYMid", (10, 0, 0, 10, 25, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMaxYMid", (10, 0, 0, 10, 50, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMinYMax", (10, 0, 0, 10, 0, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMidYMax", (10, 0, 0, 10, 25, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMaxYMax", (10, 0, 0, 10, 50, 0)),
# src.w > src.h, preserveAspectRatio (explicit "meet"): scale = min(sx, sy)
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMin meet", (10, 0, 0, 10, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMin meet", (10, 0, 0, 10, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMaxYMin meet", (10, 0, 0, 10, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMid meet", (10, 0, 0, 10, 0, 25)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMid meet", (10, 0, 0, 10, 0, 25)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMaxYMid meet", (10, 0, 0, 10, 0, 25)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMax meet", (10, 0, 0, 10, 0, 50)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMax meet", (10, 0, 0, 10, 0, 50)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMaxYMax meet", (10, 0, 0, 10, 0, 50)),
# src.w < src.h, preserveAspectRatio (explicit "meet"): scale = min(sx, sy)
((0, 0, 5, 10), (0, 0, 100, 100), "xMinYMin meet", (10, 0, 0, 10, 0, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMidYMin meet", (10, 0, 0, 10, 25, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMaxYMin meet", (10, 0, 0, 10, 50, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMinYMid meet", (10, 0, 0, 10, 0, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMidYMid meet", (10, 0, 0, 10, 25, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMaxYMid meet", (10, 0, 0, 10, 50, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMinYMax meet", (10, 0, 0, 10, 0, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMidYMax meet", (10, 0, 0, 10, 25, 0)),
((0, 0, 5, 10), (0, 0, 100, 100), "xMaxYMax meet", (10, 0, 0, 10, 50, 0)),
# src.w > src.h, preserveAspectRatio ("slice"): scale = max(sx, sy)
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMin slice", (20, 0, 0, 20, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMin slice", (20, 0, 0, 20, -50, 0)),
(
(0, 0, 10, 5),
(0, 0, 100, 100),
"xMaxYMin slice",
(20, 0, 0, 20, -100, 0),
),
((0, 0, 10, 5), (0, 0, 100, 100), "xMinYMid slice", (20, 0, 0, 20, 0, 0)),
((0, 0, 10, 5), (0, 0, 100, 100), "xMidYMid | |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Fix cloudpickle compatible problem we known.
import compatible_trick
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['XPARL'] = 'True'
import argparse
import cloudpickle
import pickle
import psutil
import re
import sys
import tempfile
import threading
import time
import traceback
import zmq
from multiprocessing import Process, Pipe
from parl.utils import to_str, to_byte, get_ip_address, logger
from parl.utils.communication import loads_argument, loads_return,\
dumps_argument, dumps_return
from parl.remote import remote_constants
from parl.utils.exceptions import SerializeError, DeserializeError
from parl.remote.message import InitializedJob
class Job(object):
"""Base class for the job.
After establishing connection with the remote object, the job will
create a remote class instance locally and enter an infinite loop
in a separate process, waiting for commands from the remote object.
"""
def __init__(self, worker_address):
"""
Args:
worker_address(str): worker_address for sending job information(e.g, pid)
Attributes:
pid (int): Job process ID.
max_memory (float): Maximum memory (MB) can be used by each remote instance.
"""
self.max_memory = None
self.job_address_receiver, job_address_sender = Pipe()
self.worker_address = worker_address
self.job_ip = get_ip_address()
self.pid = os.getpid()
self.lock = threading.Lock()
self.run_job_process = Process(
target=self.run, args=(job_address_sender, ))
self.run_job_process.start()
self._create_sockets()
process = psutil.Process(self.pid)
self.init_memory = float(process.memory_info()[0]) / (1024**2)
self.run_job_process.join()
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
os._exit(1)
def _create_sockets(self):
"""Create five sockets for each job in main process.
(1) job_socket(functional socket): sends job_address and heartbeat_address to worker.
(2) ping_heartbeat_socket: replies ping message of client.
(3) worker_heartbeat_socket: replies heartbeat message of worker.
(4) client_heartbeat_socket: replies heartbeat message of client.
(5) kill_job_socket: sends a command to the corresponding worker to kill the job.
"""
# wait for another process to create reply socket
self.job_address = self.job_address_receiver.recv()
self.ctx = zmq.Context()
# create the job_socket
self.job_socket = self.ctx.socket(zmq.REQ)
self.job_socket.connect("tcp://{}".format(self.worker_address))
# a thread that reply ping signals from the client
ping_heartbeat_socket, ping_heartbeat_address = self._create_heartbeat_server(
timeout=False)
ping_thread = threading.Thread(
target=self._reply_ping, args=(ping_heartbeat_socket, ))
ping_thread.setDaemon(True)
ping_thread.start()
# a thread that reply heartbeat signals from the worker
worker_heartbeat_socket, worker_heartbeat_address = self._create_heartbeat_server(
)
worker_thread = threading.Thread(
target=self._reply_worker_heartbeat,
args=(worker_heartbeat_socket, ))
worker_thread.setDaemon(True)
# a thread that reply heartbeat signals from the client
client_heartbeat_socket, client_heartbeat_address = self._create_heartbeat_server(
)
self.client_thread = threading.Thread(
target=self._reply_client_heartbeat,
args=(client_heartbeat_socket, ))
self.client_thread.setDaemon(True)
# sends job information to the worker
initialized_job = InitializedJob(
self.job_address, worker_heartbeat_address,
client_heartbeat_address, ping_heartbeat_address, None, self.pid)
self.job_socket.send_multipart(
[remote_constants.NORMAL_TAG,
cloudpickle.dumps(initialized_job)])
message = self.job_socket.recv_multipart()
worker_thread.start()
tag = message[0]
assert tag == remote_constants.NORMAL_TAG
# create the kill_job_socket
kill_job_address = to_str(message[1])
self.kill_job_socket = self.ctx.socket(zmq.REQ)
self.kill_job_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)
self.kill_job_socket.connect("tcp://{}".format(kill_job_address))
def _check_used_memory(self):
"""Check if the memory used by this job exceeds self.max_memory."""
stop_job = False
if self.max_memory is not None:
process = psutil.Process(self.pid)
used_memory = float(process.memory_info()[0]) / (1024**2)
if used_memory > self.max_memory + self.init_memory:
stop_job = True
return stop_job
def _reply_ping(self, socket):
"""Create a socket server that reply the ping signal from client.
This signal is used to make sure that the job is still alive.
"""
message = socket.recv_multipart()
max_memory = to_str(message[1])
if max_memory != 'None':
self.max_memory = float(max_memory)
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
self.client_thread.start()
socket.close(0)
def _create_heartbeat_server(self, timeout=True):
"""Create a socket server that will raises timeout exception.
"""
heartbeat_socket = self.ctx.socket(zmq.REP)
if timeout:
heartbeat_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
heartbeat_socket.linger = 0
heartbeat_port = heartbeat_socket.bind_to_random_port(addr="tcp://*")
heartbeat_address = "{}:{}".format(self.job_ip, heartbeat_port)
return heartbeat_socket, heartbeat_address
def _reply_client_heartbeat(self, socket):
"""Create a socket that replies heartbeat signals from the client.
If the job losts connection with the client, it will exit too.
"""
while True:
try:
message = socket.recv_multipart()
stop_job = self._check_used_memory()
socket.send_multipart([
remote_constants.HEARTBEAT_TAG,
to_byte(str(stop_job)),
to_byte(self.job_address)
])
if stop_job == True:
logger.error(
"Memory used by this job exceeds {}. This job will exist."
.format(self.max_memory))
time.sleep(5)
socket.close(0)
os._exit(1)
except zmq.error.Again as e:
logger.warning(
"[Job] Cannot connect to the client. This job will exit and inform the worker."
)
break
socket.close(0)
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
logger.warning("[Job]lost connection with the client, will exit")
os._exit(1)
def _reply_worker_heartbeat(self, socket):
"""create a socket that replies heartbeat signals from the worker.
If the worker has exited, the job will exit automatically.
"""
while True:
try:
message = socket.recv_multipart()
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
except zmq.error.Again as e:
logger.warning("[Job] Cannot connect to the worker{}. ".format(
self.worker_address) + "Job will quit.")
break
socket.close(0)
os._exit(1)
def wait_for_files(self, reply_socket, job_address):
"""Wait for python files from remote object.
When a remote object receives the allocated job address, it will send
the python files to the job. Later, the job will save these files to a
temporary directory and add the temporary diretory to Python's working
directory.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
Returns:
A temporary directory containing the python files.
"""
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.SEND_FILE_TAG:
pyfiles = pickle.loads(message[1])
# save python files to temporary directory
envdir = tempfile.mkdtemp()
for file, code in pyfiles['python_files'].items():
file = os.path.join(envdir, file)
with open(file, 'wb') as code_file:
code_file.write(code)
# save other files to current directory
for file, content in pyfiles['other_files'].items():
# create directory (i.e. ./rom_files/)
if '/' in file:
try:
os.makedirs(os.path.join(*file.rsplit('/')[:-1]))
except OSError as e:
pass
with open(file, 'wb') as f:
f.write(content)
logger.info('[job] reply')
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
return envdir
else:
logger.error("NotImplementedError:{}, received tag:{}".format(
job_address, ))
raise NotImplementedError
def wait_for_connection(self, reply_socket):
"""Wait for connection from the remote object.
The remote object will send its class information and initialization
arguments to the job, these parameters are then used to create a
local instance in the job process.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
Returns:
A local instance of the remote class object.
"""
message = reply_socket.recv_multipart()
tag = message[0]
obj = None
if tag == remote_constants.INIT_OBJECT_TAG:
try:
cls = cloudpickle.loads(message[1])
args, kwargs = cloudpickle.loads(message[2])
obj = cls(*args, **kwargs)
except Exception as e:
traceback_str = str(traceback.format_exc())
error_str = str(e)
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" + traceback_str)
])
return None
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
else:
logger.error("Message from job {}".format(message))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
b"[job]Unkonwn tag when tried to receive the class definition"
])
raise NotImplementedError
return obj
def run(self, job_address_sender):
"""An infinite loop waiting for a new task.
Args:
job_address_sender(sending end of multiprocessing.Pipe): send job address of reply_socket to main process.
"""
ctx = zmq.Context()
# create the reply_socket
reply_socket = ctx.socket(zmq.REP)
job_port = reply_socket.bind_to_random_port(addr="tcp://*")
reply_socket.linger = 0
job_ip = get_ip_address()
job_address = "{}:{}".format(job_ip, job_port)
job_address_sender.send(job_address)
try:
# receive source code from the actor and append them to the environment variables.
envdir = self.wait_for_files(reply_socket, job_address)
sys.path.append(envdir)
obj = self.wait_for_connection(reply_socket)
assert obj is not None
self.single_task(obj, reply_socket, job_address)
except Exception as e:
logger.error(
"Error occurs when running a single task. We will reset this job. Reason:{}"
.format(e))
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
def single_task(self, obj, reply_socket, job_address):
"""An infinite loop waiting for commands from the remote object.
Each job will receive two kinds of message from the remote object:
1. When the remote object calls a function, job will run the
function on the local instance and return the results to the
remote object.
2. When the remote object is deleted, the job will quit and release
related computation resources.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
"""
while True:
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.CALL_TAG:
try:
function_name = to_str(message[1])
data = message[2]
args, kwargs = loads_argument(data)
ret = getattr(obj, function_name)(*args, **kwargs)
ret = dumps_return(ret)
reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
except Exception as e:
# reset the job
error_str = str(e)
logger.error(error_str)
if type(e) == AttributeError:
reply_socket.send_multipart([
remote_constants.ATTRIBUTE_EXCEPTION_TAG,
to_byte(error_str)
])
raise AttributeError
elif type(e) == SerializeError:
reply_socket.send_multipart([
remote_constants.SERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise SerializeError
elif type(e) == DeserializeError:
reply_socket.send_multipart([
remote_constants.DESERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise DeserializeError
else:
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str | |
board
Attributes
----------
map : list
a 2d list that is the game board
currentShips : dict
A dictionary of all ships currently on the board and how many
hits they have recived
hits : list
A list of coordinates that have been engaged
sunkShips : list
A list of ships that have been sunk
Methods
-------
generateBoard(x, y)
Generates a board of size `x` `y`
addShip(size, posX, posY, rotDir, maxX, maxY, symbol)
Adds a ship of size `size` starting at `posX` `posY`
addRandom(x, y)
Adds all the required ships in random positions on the board
printBoard()
Prints the game board
printBoardHidden()
Prints the gameboard but hides all except hits and misses
engage(posX, posY)
Engages at a specific position
won()
Checks if all ships have been destroyed
"""
def __init__(self) -> None:
"""
Returns
-------
None
"""
self.hits: list[tuple[int, int]] = []
self.map = None
self.sunkShips:list[str] = []
self.ships = {
'A':{
'name':'Aircraft Carrier',
'size':5,
'hits':0
},
'B':{
'name':'Battleship',
'size':4,
'hits':0
},
'C':{
'name':'Cruiser',
'size':3,
'hits':0
},
'S':{
'name':'Submarine',
'size':3,
'hits':0
},
'D':{
'name':'Destroyer',
'size':2,
'hits':0
},
}
return
def generateBoard(self, x:int = 10, y:int = 10) -> None:
"""Creates a board
Creates a board of size `x` `y` and set self.map to
the generated board
Parameters
----------
x : int, optional
The width of the game board (default is 10)
y : int, optional
The height of the game board (default is 10)
Returns
-------
None
"""
self.currentShips = copy.deepcopy(self.ships) #Don't use dict.copy() as it is shallow so doesn't account for nested items
self.sunkShips:list[str] = []
self.hits:list = []
# self.hitShip:list = []
self.map: list = [[0 for i in range(x)] for j in range(y)]
return
def addShip(self, size: int, posX: int, posY: int, rotDir: bool, maxX: int, maxY: int, symbol: str) -> None:
"""Adds a ship of specified size to board starting at specified coordinates
Parameters
----------
size : int
The size of the ship.
posX : int
The x coordinate for the start of the ship
posY : int
The y coordinate for the start of the ship
rotDir : bool
The direction of the ship. True is vertical. False is horizontal.
maxX : int
The width of the board
maxY : int
The height of the board
symbol : string
The symbol to be placed on the board
Raises
------
PositionAlreadyPopulatedError
If position for ship is already taken.
OutOfBoundsError
If the position for the ship is not within the confines of the
game board.
Returns
-------
None
"""
#Check that way is clear for ship
if rotDir:
#Two seperate for loops to avoid only half placing ships
for i in range(posY, posY+size):
try:
if self.map[i][posX] != 0:
raise PositionAlreadyPopulatedError
return
except IndexError:
raise OutOfBoundsError
return
for i in range(posY, posY+size):
self.map[i][posX] = symbol
else:
for i in range(posX, posX+size):
try:
if self.map[posY][i] != 0:
raise PositionAlreadyPopulatedError
return
except IndexError:
raise OutOfBoundsError
return
for i in range(posX, posX+size):
self.map[posY][i] = symbol
return
def addRandom(self, x:int, y:int) -> None:
for key in self.ships:
while True:
self.startPos = (random.randint(0,x), random.randint(0, y))
self.rotDirection = bool(random.getrandbits(1))
try:
self.addShip(self.ships[key]['size'], self.startPos[0], self.startPos[1], self.rotDirection, x, y, key)
break
except (PositionAlreadyPopulatedError, OutOfBoundsError):
continue
def printBoard(self) -> None:
"""Prints the game board
Outputs the game board with X and Y headers
Returns
-------
None
"""
# Print x heading
print(f"|{' ':^3}|", end='')
for i in range(len(self.map[0])):
print(f'{i+1:^3}|', end='')
# Print rows with y heading
for i in range(len(self.map)):
print(f'\n|{i+1:^3}|', end='')
for j in range(len(self.map[i])):
print(f'{self.map[i][j]:^3}|', end='')
return
def printBoardHidden(self) -> None:
"""Prints the game board
This function prints out the gameboard but all items except for hits and
misses are redacted.
Returns
-------
None
"""
#temporary for debugging. remove for production
self.printBoard()
return
# Print x heading
print(f"|{' ':^3}|", end='')
for i in range(len(self.map[0])):
print(f'{i+1:^3}|', end='')
# Print rows with y heading
for i in range(len(self.map)):
print(f'\n|{i+1:^3}|', end='')
for j in range(len(self.map[i])):
if (self.map[i][j] == 'H' or self.map[i][j] == 'M'):
print(f'{self.map[i][j]:^3}|', end='')
else:
print(f"{'#':^3}|", end='')
return
def engage(self, posX: int, posY: int) -> str:
"""Engages a ship at specified position
Engages the position specified. This checks if the position has
aleady been engaged and if not engages the position. It then
returns the result of that engagement as a string.
Parameters
----------
posX : int
The x coordinate to engage
posY : int
The y coordinate to engage
Returns
-------
string
The type of ship that has been hit
"""
posX -= 1 #Account for list starting at 0 but board starting at 1
posY -= 1
if [posX, posY] in self.hits:
print('You have already engaged this position!')
return 'AE'
else:
self.hits.append([posX, posY])
self.hitShip = self.map[posY][posX]
if self.hitShip == 0:
self.map[posY][posX] = 'M'
return 'miss'
else:
self.map[posY][posX] = 'H'
self.currentShips[self.hitShip]['hits'] += 1
return self.hitShip
def isSunk(self, ship:str) -> bool:
"""Checks if ship has been sunk
Checks if the specified ship has been sunk and returns it
as a boolean value.
Parameters
----------
ship : string
The ship to check
Returns
-------
boolean
If the specified ship has been sunk or not
"""
if self.currentShips[ship]['size'] == self.currentShips[ship]['hits']:
self.sunkShips.append(ship)
return True
else:
return False
def won(self) -> bool:
"""Checks if all ships have been sunk
Checks if all the ships on the board have been sunk and
returns the status it as a boolean value.
Returns
-------
boolean
If all of the ships on the board have been sunk
"""
if len(self.sunkShips) >= 5:
return True
else:
return False
#End class Board()
class Scoring():
"""This class handles the scoring and saving of scores
Attributes
----------
score : int
The current users score
Methods
-------
showScores()
print a list of top 10 scores
"""
def __init__(self, saveLocation:str) -> None:
self.score = 0
with open(os.path.join(saveLocation, 'scores.json'), 'r') as data:
self.scoresSave = json.load(data)
return
def getScores(self, ordered:bool = False) -> dict:
if ordered:
return {k: v for k, v in sorted(self.scoresSave.items(), key=lambda item: item[1])}
else:
return self.scoresSave
def showScores(self) -> None:
"""Prints a list of the top 10 scores
Reads the contents of scores.json and then sorts by highest
before printing it to screen.
Returns
-------
None
"""
self.tempScore = dict(itertools.islice(self.getScores(True).items(), 10))
i = 0
print('Scores:')
for key in self.tempScore:
i +=1
print(f'[{i}] {key}: {self.scoresSave[key]}')
Helpers.anyKey()
return
def addScore(self, name:str, saveLocation:str, force:bool = False) -> dict:
"""Adds a score to scores file
Adds a score to the scores file with the users name as
the key. If force is not set then it checks to see if
it is going to overwrite an existing score. It returns
a dict that contains the return status and if error an
error code .
Parameters
-----------
name : string
The name to write the score under
saveLocation : string
The path to the current save location
force : bool, optional
Bypass overwriting check (default false)
Returns
-------
dict : {'status':bool, 'errCd':str}
A simple success or fail indicator. If fail returns
status false and the appropriate error code. If
success returns status true and appropriate error
code.
Error Codes
-----------
ok
Success
ovrwrt
This action will overwrite a pre-existing entry
"""
if force:
pass
else:
if name in self.scoresSave:
return {'status':False, 'errCd':'ovrwrt'}
self.scoresSave[name] = self.score
with open(os.path.join(saveLocation, 'scores.json'), 'w') as data:
json.dump(self.scoresSave, data)
return {'status':True, 'errCd':'ok'}
#End class Scoring()
class GameSave():
"""This class handles the saving and loading of game files
Methods
-------
listSave()
return a list of all saved games
saveGame()
Saves the current game to disk
loadGame()
Loads a game from disk
deleteGame()
Deletes a game from disk
"""
def __init__(self, saveLocation:str) -> None:
"""
Parameters
----------
saveLocation : string
The path to the current save location
Returns
-------
None
"""
self.defaultReturn:tuple = (None, | |
"""Exit context manager.
.. versionchanged:: 1.0.0 disconnect enforced on close
.. versionchanged:: 1.1.0 release lock on exit
.. versionchanged:: 1.2.1 disconnect enforced on close only not in keepalive mode
"""
if not self.__keepalive_period:
self.close()
super().__exit__(exc_type, exc_val, exc_tb)
@property
def sudo_mode(self) -> bool:
"""Persistent sudo mode for connection object.
:rtype: bool
"""
return self.__sudo_mode
@sudo_mode.setter
def sudo_mode(self, mode: bool) -> None:
"""Persistent sudo mode change for connection object.
:param mode: sudo status: enabled | disabled
:type mode: bool
"""
self.__sudo_mode = bool(mode)
@property
def keepalive_period(self) -> int:
"""Keepalive period for connection object.
:rtype: int
If 0 - close connection on exit from context manager.
"""
return self.__keepalive_period
@keepalive_period.setter
def keepalive_period(self, period: KeepAlivePeriodT) -> None:
"""Keepalive period change for connection object.
:param period: keepalive period change
:type period: typing.Union[int, bool]
If 0 - close connection on exit from context manager.
"""
self.__keepalive_period = int(period)
transport: paramiko.Transport = self._ssh_transport
transport.set_keepalive(int(period))
def reconnect(self) -> None:
"""Reconnect SSH session."""
with self.lock:
self.close()
self.__connect()
def sudo(self, enforce: bool | None = None) -> _SudoContext:
"""Call contextmanager for sudo mode change.
:param enforce: Enforce sudo enabled or disabled. By default: None
:type enforce: typing.Optional[bool]
:return: context manager with selected sudo state inside
:rtype: typing.ContextManager[None]
"""
return _SudoContext(ssh=self, enforce=enforce)
def keepalive(self, enforce: KeepAlivePeriodT = 1) -> _KeepAliveContext:
"""Call contextmanager with keepalive period change.
:param enforce: Enforce keepalive period.
:type enforce: typing.Union[int, bool]
:return: context manager with selected keepalive state inside
:rtype: typing.ContextManager[None]
.. Note:: Enter and exit ssh context manager is produced as well.
.. versionadded:: 1.2.1
"""
return _KeepAliveContext(ssh=self, enforce=int(enforce))
def _prepare_command(self, cmd: str, chroot_path: str | None = None) -> str:
"""Prepare command: cower chroot and other cases.
:param cmd: main command
:param chroot_path: path to make chroot for execution
:return: final command, includes chroot, if required
"""
if not self.sudo_mode:
return super()._prepare_command(cmd=cmd, chroot_path=chroot_path)
quoted_command: str = shlex.quote(cmd)
if chroot_path is None and self._chroot_path is None:
return f'sudo -S sh -c {shlex.quote(f"eval {quoted_command}")}'
if chroot_path is not None:
target_path: str = shlex.quote(chroot_path)
else:
target_path = shlex.quote(self._chroot_path) # type: ignore[arg-type]
return f'chroot {target_path} sudo sh -c {shlex.quote(f"eval {quoted_command}")}'
# noinspection PyMethodOverriding
def _execute_async(
self,
command: str,
*,
stdin: OptionalStdinT = None,
open_stdout: bool = True,
open_stderr: bool = True,
chroot_path: str | None = None,
get_pty: bool = False,
width: int = 80,
height: int = 24,
timeout: OptionalTimeoutT = None,
**kwargs: typing.Any,
) -> SshExecuteAsyncResult:
"""Execute command in async mode and return channel with IO objects.
:param command: Command for execution
:type command: str
:param stdin: pass STDIN text to the process
:type stdin: typing.Union[bytes, str, bytearray, None]
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param open_stderr: open STDERR stream for read
:type open_stderr: bool
:param chroot_path: chroot path override
:type chroot_path: typing.Optional[str]
:param get_pty: Get PTY for connection
:type get_pty: bool
:param width: PTY width
:type width: int
:param height: PTY height
:type height: int
:param timeout: timeout before stop execution with TimeoutError (will be set on channel)
:type timeout: typing.Union[int, float, None]
:param kwargs: additional parameters for call.
:type kwargs: typing.Any
:return: Tuple with control interface and file-like objects for STDIN/STDERR/STDOUT
:rtype: typing.NamedTuple(
'SshExecuteAsyncResult',
[
('interface', paramiko.Channel),
('stdin', paramiko.ChannelFile),
('stderr', typing.Optional[paramiko.ChannelFile]),
('stdout', typing.Optional[paramiko.ChannelFile]),
("started", datetime.datetime),
]
)
.. versionchanged:: 1.2.0 open_stdout and open_stderr flags
.. versionchanged:: 1.2.0 stdin data
.. versionchanged:: 1.2.0 get_pty moved to `**kwargs`
.. versionchanged:: 2.1.0 Use typed NamedTuple as result
.. versionchanged:: 3.2.0 Expose pty options as optional keyword-only arguments
.. versionchanged:: 4.1.0 support chroot
"""
warnings.warn("_execute_async is deprecated and will be removed soon", DeprecationWarning)
chan: paramiko.Channel = self._ssh_transport.open_session()
if timeout is not None:
chan.settimeout(timeout)
if get_pty:
# Open PTY
chan.get_pty(term="vt100", width=width, height=height, width_pixels=0, height_pixels=0)
_stdin: paramiko.ChannelFile = chan.makefile("wb") # type: ignore[name-defined]
stdout: paramiko.ChannelFile = chan.makefile("rb") # type: ignore[name-defined]
if open_stderr:
stderr: paramiko.ChannelFile | None = chan.makefile_stderr("rb") # type: ignore[name-defined]
else:
stderr = None
cmd = f"{self._prepare_command(cmd=command, chroot_path=chroot_path)}\n"
started = datetime.datetime.utcnow()
if self.sudo_mode:
chan.exec_command(cmd) # nosec # Sanitize on caller side
if not stdout.channel.closed:
# noinspection PyTypeChecker
self.auth.enter_password(_stdin)
_stdin.flush()
else:
chan.exec_command(cmd) # nosec # Sanitize on caller side
if stdin is not None:
if not _stdin.channel.closed:
stdin_str: bytes = self._string_bytes_bytearray_as_bytes(stdin)
_stdin.write(stdin_str)
_stdin.flush()
else:
self.logger.warning("STDIN Send failed: closed channel")
if open_stdout:
res_stdout = stdout
else:
stdout.close()
res_stdout = None
# noinspection PyArgumentList
return SshExecuteAsyncResult(
interface=chan,
stdin=_stdin,
stderr=stderr,
stdout=res_stdout,
started=started,
)
def _exec_command( # type: ignore[override]
self,
command: str,
async_result: SshExecuteAsyncResult,
timeout: OptionalTimeoutT,
*,
verbose: bool = False,
log_mask_re: LogMaskReT = None,
stdin: OptionalStdinT = None,
log_stdout: bool = True,
log_stderr: bool = True,
**kwargs: typing.Any,
) -> exec_result.ExecResult:
"""Get exit status from channel with timeout.
:param command: executed command (for logs)
:type command: str
:param async_result: execute_async result
:type async_result: SshExecuteAsyncResult
:param timeout: timeout before stop execution with TimeoutError
:type timeout: typing.Union[int, float, None]
:param verbose: produce log.info records for STDOUT/STDERR
:type verbose: bool
:param log_mask_re: regex lookup rule to mask command for logger.
all MATCHED groups will be replaced by '<*masked*>'
:type log_mask_re: typing.Optional[str]
:param stdin: pass STDIN text to the process
:type stdin: typing.Union[bytes, str, bytearray, None]
:param log_stdout: log STDOUT during read
:type log_stdout: bool
:param log_stderr: log STDERR during read
:type log_stderr: bool
:param kwargs: additional parameters for call.
:type kwargs: typing.Any
:return: Execution result
:rtype: ExecResult
:raises ExecHelperTimeoutError: Timeout exceeded
.. versionchanged:: 1.2.0 log_mask_re regex rule for masking cmd
"""
def poll_streams() -> None:
"""Poll FIFO buffers if data available."""
if async_result.stdout and async_result.interface.recv_ready():
result.read_stdout(src=async_result.stdout, log=self.logger if log_stdout else None, verbose=verbose)
if async_result.stderr and async_result.interface.recv_stderr_ready():
result.read_stderr(src=async_result.stderr, log=self.logger if log_stderr else None, verbose=verbose)
def poll_pipes() -> None:
"""Polling task for FIFO buffers."""
while not async_result.interface.status_event.is_set():
time.sleep(0.1)
if async_result.stdout or async_result.stderr:
poll_streams()
result.read_stdout(src=async_result.stdout, log=self.logger, verbose=verbose)
result.read_stderr(src=async_result.stderr, log=self.logger, verbose=verbose)
result.exit_code = async_result.interface.exit_status
# channel.status_event.wait(timeout)
cmd_for_log: str = self._mask_command(cmd=command, log_mask_re=log_mask_re)
# Store command with hidden data
result = exec_result.ExecResult(cmd=cmd_for_log, stdin=stdin, started=async_result.started)
with concurrent.futures.ThreadPoolExecutor(thread_name_prefix="exec-helpers_ssh_poll_") as executor:
future: concurrent.futures.Future[None] = executor.submit(poll_pipes)
concurrent.futures.wait([future], timeout)
# Process closed?
if async_result.interface.status_event.is_set():
async_result.interface.close()
return result
async_result.interface.close()
async_result.interface.status_event.set()
future.cancel()
concurrent.futures.wait([future], 0.001)
result.set_timestamp()
wait_err_msg: str = _log_templates.CMD_WAIT_ERROR.format(result=result, timeout=timeout)
self.logger.debug(wait_err_msg)
raise exceptions.ExecHelperTimeoutError(result=result, timeout=timeout) # type: ignore[arg-type]
def open_execute_context(
self,
command: str,
*,
stdin: OptionalStdinT = None,
open_stdout: bool = True,
open_stderr: bool = True,
chroot_path: str | None = None,
get_pty: bool = False,
width: int = 80,
height: int = 24,
timeout: OptionalTimeoutT = None,
**kwargs: typing.Any,
) -> _SSHExecuteContext:
"""Get execution context manager.
:param command: Command for execution
:type command: typing.Union[str, typing.Iterable[str]]
:param stdin: pass STDIN text to the process
:type stdin: typing.Union[bytes, str, bytearray, None]
:param open_stdout: open STDOUT stream for read
:type open_stdout: bool
:param open_stderr: open STDERR stream for read
:type open_stderr: bool
:param chroot_path: chroot path override
:type chroot_path: typing.Optional[str]
:param get_pty: Get PTY for connection
:type get_pty: bool
:param width: PTY width
:type width: int
:param height: PTY height
:type height: int
:param timeout: Timeout for **connection open**.
:type timeout: typing.Union[int, float, None]
:param kwargs: additional parameters for call.
:type kwargs: typing.Any
:return: Execute context
:rtype: _SSHExecuteContext
.. versionadded:: 8.0.0
"""
return _SSHExecuteContext(
transport=self._ssh_transport,
command=f"{self._prepare_command(cmd=command, chroot_path=chroot_path)}\n",
stdin=None if stdin is None else self._string_bytes_bytearray_as_bytes(stdin),
open_stdout=open_stdout,
open_stderr=open_stderr,
get_pty=get_pty,
width=width,
height=height,
timeout=timeout,
sudo_mode=self.sudo_mode,
auth=self.auth,
logger=self.logger,
**kwargs,
)
def execute(
self,
command: CommandT,
verbose: bool = False,
timeout: OptionalTimeoutT = constants.DEFAULT_TIMEOUT,
*,
log_mask_re: LogMaskReT = None,
stdin: OptionalStdinT = None,
open_stdout: bool = True,
log_stdout: bool = True,
open_stderr: bool = True,
log_stderr: bool = True,
chroot_path: str | None = None,
get_pty: bool = False,
width: int = 80,
height: int = 24,
**kwargs: typing.Any,
) -> exec_result.ExecResult:
"""Execute command and wait for return code.
:param command: Command for execution
:type command: typing.Union[str, typing.Iterable[str]]
:param verbose: Produce log.info records for command call and output
:type verbose: bool
:param timeout: Timeout for command execution.
:type timeout: typing.Union[int, float, None]
:param log_mask_re: regex lookup rule to mask command for logger.
all MATCHED groups will be replaced by '<*masked*>'
:type log_mask_re: typing.Optional[str]
:param stdin: pass | |
the variable
"""
if not self._bound_target:
self._metadata_buffer[name] = value
else:
self._bound_target.setncattr(name, value)
def _dump_metadata_buffer(self):
"""
Dump the metadata buffer to file
"""
if self._bound_target is None:
raise UnboundLocalError("Cannot dump the metadata buffer to target since no target exists!")
self._bound_target.setncatts(self._metadata_buffer)
self._metadata_buffer = {}
@staticmethod
def _convert_netcdf_store_type(stored_type):
"""
Convert the stored NetCDF data type from string to type without relying on unsafe eval() function
Parameters
----------
stored_type : string
Read from ncfile.Variable.type
Returns
-------
proper_type : type
Python or module type
"""
try:
# Check if it's a builtin type
try: # Python 2
module = importlib.import_module('__builtin__')
except ImportError: # Python 3
module = importlib.import_module('builtins')
proper_type = getattr(module, stored_type)
except AttributeError:
# if not, separate module and class
module, stored_type = stored_type.rsplit(".", 1)
module = importlib.import_module(module)
proper_type = getattr(module, stored_type)
return proper_type
@property
def _output_mode(self):
"""
Set the write and append flags. Code should only call this after being bound to a variable
Returns
-------
output_mode : string
Either 'a' for append or 'w' for write
"""
if self._bound_target.getncattr('IODriver_Appendable'):
output_mode = 'a'
else:
output_mode = 'w'
return output_mode
def _attempt_storage_read(self):
"""
This is a helper function to try and read the target from the disk then do some validation checks common to
every _bind_read call. Helps cut down on recoding.
Returns
-------
None, but should try to set _bound_target from disk
"""
self._bound_target = getattr(self._storage_object, self.storage_type)[self._target]
# Ensure that the target we bind to matches the type of driver
try:
if self._bound_target.getncattr('IODriver_Type') != self.dtype_string():
raise TypeError("Storage target on NetCDF file is of type {} but this driver is designed to handle "
"type {}!".format(self._bound_target.getncattr('IODriver_Type'), self.dtype_string()))
except AttributeError:
warnings.warn("This Codec cannot detect storage type from on-disk variable. .write() and .append() "
"operations will not work and .read() operations may work", RuntimeWarning)
def _check_storage_mode(self, expected_mode):
"""
Check to see if the data stored at this codec is actually compatible with the type of write operation that was
performed (write vs. append)
Parameters
----------
expected_mode : string, either "w' or "a"
Raises
------
TypeError if ._output_mode != expected mode
"""
# String fill in, uses the opposite of expected mode to raise warnings
saved_as = {'w': 'appendable', 'a': 'statically written'}
cannot = {'w': 'write', 'a': 'append'}
must_use = {'w': 'append() or the to_index keyword of write()', 'a': 'write()'}
if self._output_mode != expected_mode:
raise TypeError("{target} at {type} was saved as {saved_as} data! Cannot {cannot}, must use "
"{must_use}".format(target=self._target,
type=self.dtype_string(),
saved_as=saved_as[expected_mode],
cannot=cannot[expected_mode],
must_use=must_use[expected_mode])
)
def _write_to_append_at_index(self, data, index):
"""
Try to write data to a specific site on an append variable. This is a method which should be called in
every `write` call if the index is defined by something other than None.
Parameters
----------
data : Data to write to location on a previously appended variable
index : Int,
Index to write the data at, replacing what is already there
If index > size of written data, crash
"""
if self._bound_target is None:
try:
self._bind_read()
except KeyError:
# Trap the NetCDF Key Error to raise an issue that data must exist first
raise IOError("Cannot write to a specific index for data that does not exist!")
if type(index) is not int:
raise ValueError("to_index must be an integer!")
self._check_storage_mode('a') # We want this in append mode
self._check_data_shape_matching(data)
# Determine current current length and therefore if the index is too large
length = self._bound_target.shape[0]
# Must actually compare to full length so people don't fill an infinite variable with garbage that is just
# masked from empty entries
if index >= length or abs(index) > length:
raise ValueError("Cannot choose an index beyond the maximum length of the "
"appended data of {}".format(length))
self._bound_target[index, :] = self._encoder(data)
# =============================================================================
# NETCDF NON-COMPOUND TYPE CODECS
# =============================================================================
# Decoders: Convert from NC variable to python type
# Encoders: Decompose Python Type into something NC storable data
def nc_string_decoder(nc_variable):
if nc_variable.shape == ():
return str(nc_variable.getValue())
elif nc_variable.shape == (1,):
return str(nc_variable[0])
else:
return nc_variable[:].astype(str)
def nc_string_encoder(data):
packed_data = np.empty(1, 'O')
packed_data[0] = data
return packed_data
# There really isn't anything that needs to happen here, arrays are the ideal type
# Leaving these as explicit codecs in case we need to change them later
def nc_numpy_array_decoder(nc_variable):
return nc_variable[:]
# List and tuple iterables, assumes contents are the same type.
# Use dictionaries for compound types
def nc_iterable_decoder(nc_variable):
shape = nc_variable.shape
type_name = nc_variable.getncattr('type')
output_type = NCVariableCodec._convert_netcdf_store_type(type_name)
if len(shape) == 1: # Determine if iterable
output = output_type(nc_variable[:])
else: # Handle long form iterable by making an array of iterable type
output = np.empty(shape[0], dtype=output_type)
for i in range(shape[0]):
output[i] = output_type(nc_variable[i])
return output
# Encoder for float, int, iterable, and numpy arrays
def simple_encoder(data):
return data
# Works for float and int
def scalar_decoder_generator(casting_type):
def _scalar_decoder(nc_variable):
data = nc_variable[:]
if data.shape == (1,):
data = casting_type(data[0])
else:
data = data.astype(casting_type)
return data
return _scalar_decoder
# =============================================================================
# HDF5 CHUNK SIZE ROUTINES
# =============================================================================
def determine_appendable_chunk_size(data, max_iteration=128, max_memory=104857600):
"""
Determine the chunk size of the appendable dimension, it will either be max_iterations in count or max_memory in
bytes where the function will try to reduce the number of iterations until it is under the max chunk size down to
a single iteration.
Parameters
----------
data : Data that will be saved to disk of shape that will be saved
This is a sample of what will be written at any one point in time.
max_iteration : int, Default: 128
Maximum number of iterations that will be chunked, either this limit or max_memory will be hit first, reducing
the max iterations by a factor of 2 until we are below the memory limit, to a minimum of 1
max_memory: int (bytes), Default: 104856700 (100MB)
Maximum number of bytes the chunk is allowed to have, if the 100 iterations exceeds this size, then we
reduce the number of iterations by half until we are below the memory limit
Returns
-------
iteration_chunk : int
Chunksize of the iteration dimension
"""
if max_iteration < 1 or not isinstance(max_iteration, int):
raise ValueError("max_iteration was {} but must be an integer greater than 1!".format(max_iteration))
iteration_chunk = int(max_iteration)
data_size = getsizeof(data)
while iteration_chunk * data_size > max_memory and iteration_chunk > 1:
iteration_chunk /= 2
# Ceiling and int since np.ceil returns a float
return int(np.ceil(iteration_chunk))
# =============================================================================
# REAL Codecs
# =============================================================================
# Generic codecs for non-compound data types: inf, float, string
class NCScalar(NCVariableCodec, ABC):
""""
This particular class is to minimize code duplication between some very basic data types such as int, str, float
It is itself an abstract class and requires the following functions to be complete:
dtype (@property)
dtype_string (@staticmethod)
"""
def _bind_write(self, data):
try:
self._bind_read()
except KeyError:
self._parent_driver.check_scalar_dimension()
self._bound_target = self._storage_object.createVariable(self._target, self._on_disk_dtype,
dimensions='scalar',
chunksizes=(1,))
self._common_bind_output_actions(typename(self.dtype), 0)
self._dump_metadata_buffer()
def _bind_append(self, data):
try:
self._bind_read()
except KeyError:
self._parent_driver.check_scalar_dimension()
infinite_name = self._parent_driver.generate_infinite_dimension()
appendable_chunk_size = determine_appendable_chunk_size(data)
self._bound_target = self._storage_object.createVariable(self._target, self._on_disk_dtype,
dimensions=[infinite_name, 'scalar'],
chunksizes=(appendable_chunk_size, 1))
self._common_bind_output_actions(typename(self.dtype), 1)
self._dump_metadata_buffer()
return
def _check_data_shape_matching(self, data):
pass
@property
def storage_type(self):
return 'variables'
@property
def _on_disk_dtype(self):
"""
Allow overwriting the dtype for storage for extending this method to cast data as a different type on disk
This is the property to overwrite the cast dtype if it is different than the input/output dtype
"""
return self.dtype
class NCInt(NCScalar):
"""
NetCDF codec for Integers
"""
@property
def _encoder(self):
return simple_encoder
@property
def _decoder(self):
return scalar_decoder_generator(int)
@property
def dtype(self):
return int
@staticmethod
def dtype_string():
return "int"
class NCFloat(NCScalar):
"""
NetCDF codec for Floats
"""
@property
def _encoder(self):
return simple_encoder
@property
def _decoder(self):
return scalar_decoder_generator(float)
@property
def dtype(self):
return float
@staticmethod
def dtype_string():
return "float"
class NCString(NCScalar):
"""
NetCDF codec for String
"""
@property
def _encoder(self):
return nc_string_encoder
@property
def _decoder(self):
return nc_string_decoder
@property
def dtype(self):
return str
@staticmethod
def dtype_string():
return "str"
# Array
class NCArray(NCVariableCodec):
"""
NetCDF Codec for numpy arrays
"""
@property
def _encoder(self):
return simple_encoder
@property
def _decoder(self):
return nc_numpy_array_decoder
@property
def dtype(self):
return np.ndarray
@staticmethod
def dtype_string():
return "numpy.ndarray"
def _bind_write(self, data):
try:
self._bind_read()
except KeyError:
data_shape, data_base_type, data_type_name = self._determine_data_information(data)
dims = []
for length in | |
"dtype"):
rhs_dtype = rhs.dtype
else:
try:
rhs_dtype = np.dtype(rhs)
except:
rhs_dtype = None
if dtype is None:
dtype = np.result_type(self.dtype, rhs_dtype)
elif dtype == "float":
dtype = (
np.float32
if rhs_dtype == np.float32 and self.dtype == np.float32
else np.float64
)
if isinstance(new_array_size, tuple):
if len(new_array_size) > 0:
new_ndarray = empty(new_array_size, local_border=lb, dtype=dtype)
else:
res = getattr(selfview, op)(rhsview)
if isinstance(res, np.ndarray):
return fromarray(res)
elif isinstance(res, numbers.Number) and new_array_size == ():
return array(res)
else:
return res
else: # must be scalar output
res = getattr(selfview, op)(rhsview)
return res
if reverse:
deferred_op.add_op(
["", new_ndarray, " = ", rhsview, optext, selfview],
new_ndarray,
imports=imports,
)
else:
deferred_op.add_op(
["", new_ndarray, " = ", selfview, optext, rhsview],
new_ndarray,
imports=imports,
)
t1 = timer()
dprint(4, "BINARY_OP:", optext, "time", (t1 - t0) * 1000)
return new_ndarray
def __setitem__(self, key, value):
dprint(1, "ndarray::__setitem__:", key, type(key), value, type(value))
if self.readonly:
raise ValueError("assignment destination is read-only")
if not isinstance(key, tuple):
key = (key,)
if all([isinstance(i, int) for i in key]) and len(key) == len(self.size):
print("Setting individual element is not handled yet!")
assert 0
if any([isinstance(i, slice) for i in key]) or len(key) < len(self.size):
view = self[key]
if isinstance(value, (int, bool, float, complex)):
deferred_op.add_op(["", view, " = ", value, ""], view)
return
elif view.size == value.size:
# avoid adding code in case of inplace operations
if not (
view.gid == value.gid
and shardview.dist_is_eq(view.distribution, value.distribution)
):
deferred_op.add_op(["", view, " = ", value, ""], view)
return
else:
# TODO: Should try to broadcast value to view.size before giving up
print("Mismatched sizes", view.size, value.size)
assert 0
"""
if isinstance(key[0], slice):
if key.start is None and key.stop is None: # a[:] = b
if isinstance(value, ndarray):
if self.size == value.size:
deferred_op.add_op(["", self, " = ", value, ""], self)
#[remote_states[i].setitem1.remote(self.gid, value.gid) for i in range(num_workers)]
return
elif isinstance(value, (int, float, complex)):
deferred_op.add_op(["", self, " = ", value, ""], self)
return
else: # a[s:e] = b
view = self[key]
if view.size == value.size:
deferred_op.add_op(["", view, " = ", value, ""], view)
return
"""
# Need to handle all possible remaining cases.
print("Don't know how to set index", key, " of dist array of size", self.size)
assert 0
def __getitem__(self, index):
indhash = pickle.dumps(index)
if indhash not in self.getitem_cache:
self.getitem_cache[indhash] = self.__getitem__real(index)
return self.getitem_cache[indhash]
def __getitem__real(self, index):
dprint(1, "ndarray::__getitem__:", index, type(index))
if not isinstance(index, tuple):
index = (index,)
# If all the indices are integers and the number of indices equals the number of array dimensions.
if all([isinstance(i, int) for i in index]) and len(index) == len(self.size):
deferred_op.do_ops()
owner = shardview.find_index(
self.distribution, index
) # find_index(self.distribution, index)
dprint(2, "owner:", owner)
# ret = ray.get(remote_states[owner].getitem_global.remote(self.gid, index, self.distribution[owner]))
ret = remote_call(
owner, "getitem_global", self.gid, index, self.distribution[owner]
)
return ret
# If any of the indices are slices or the number of indices is less than the number of array dimensions.
if any([isinstance(i, slice) for i in index]) or len(index) < len(self.size):
# check for out-of-bounds
for i in range(len(index)):
if isinstance(index[i], int) and index[i] >= self.size[i]:
raise IndexError(
"index "
+ str(index[i])
+ " is out of bounds for axis "
+ str(i)
+ " with size "
+ str(self.size[i])
)
# make sure array distribution can't change (ie, not flexible or is already constructed)
if self.bdarray.flex_dist or not self.bdarray.remote_constructed:
deferred_op.do_ops()
num_dim = len(self.size)
cindex = canonical_index(index, self.size)
dim_sizes = tuple([max(0, x.stop - x.start) for x in cindex])
dprint(2, "getitem slice:", cindex, dim_sizes)
# sdistribution = [
# [[max(self.distribution[i][0][j], cindex[j].start) for j in range(num_dim)],
# [max(max(self.distribution[i][0][j]-1, cindex[j].start), min(self.distribution[i][1][j], cindex[j].stop-1)) for j in range(num_dim)]]
# for i in range(len(self.distribution))]
sdistribution = shardview.slice_distribution(cindex, self.distribution)
# reduce dimensionality as needed
axismap = [
i
for i in range(len(dim_sizes))
if i >= len(index) or isinstance(index[i], slice)
]
if len(axismap) < len(dim_sizes):
dim_sizes, sdistribution = shardview.remap_axis(
dim_sizes, sdistribution, axismap
)
dprint(2, "getitem slice:", dim_sizes, sdistribution)
# deferred_op.add_op(["", self, " = ", value, ""])
# return ndarray(self.gid, tuple(dim_sizes), np.asarray(sdistribution))
# Note: slices have local border set to 0 -- otherwise may corrupt data in the array
return ndarray(
dim_sizes,
gid=self.gid,
distribution=sdistribution,
local_border=0,
readonly=self.readonly,
dtype=self.dtype,
)
print("Don't know how to get index", index, " of dist array of size", self.size)
assert 0 # Handle other types
def get_remote_ranges(self, required_division):
return get_remote_ranges(self.distribution, required_division)
def __matmul__(self, rhs):
return matmul(self, rhs)
def reshape(self, newshape):
return reshape(self, newshape)
def reshape_copy(self, newshape):
return reshape_copy(self, newshape)
def mean(self, axis=None, dtype=None):
n = np.prod(self.shape) if axis is None else self.shape[axis]
s = 1 / n
rv = s * self.sum(axis=axis, dtype=dtype)
if dtype is not None:
if isinstance(rv, ndarray):
rv = rv.astype(dtype)
else:
rv = np.mean([rv], dtype=dtype)
return rv
# def __len__(self):
# return self.size[0]
def __array_function__(self, func, types, args, kwargs):
dprint(
2,
"__array_function__",
func,
types,
args,
kwargs,
func in HANDLED_FUNCTIONS,
)
for arg in args:
dprint(4, "arg:", arg, type(arg))
new_args = []
if func not in HANDLED_FUNCTIONS:
return NotImplemented
hf = HANDLED_FUNCTIONS[func]
if hf[1]:
new_args.append(self)
for arg in args:
if isinstance(arg, np.ndarray):
new_args.append(fromarray(arg))
elif isinstance(arg, ndarray):
new_args.append(arg)
else:
return NotImplemented
return hf[0](*new_args, **kwargs)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
dprint(2, "__array_ufunc__", ufunc, type(ufunc), method, len(inputs), kwargs)
real_args = []
if method == "__call__":
for arg in inputs:
if isinstance(arg, np.ndarray):
real_args.append(fromarray(arg))
elif isinstance(arg, ndarray):
real_args.append(arg)
elif isinstance(arg, numbers.Number):
real_args.append(arg)
else:
print(type(arg))
return NotImplemented
isreversed = not isinstance(inputs[0], ndarray)
if ufunc.__name__ in ufunc_map:
mapname = ufunc_map[ufunc.__name__]
else:
mapname = ufunc.__name__
if isreversed:
mapname = "r" + mapname
real_args = real_args[::-1]
attrres = getattr(real_args[0], "__" + mapname + "__", None)
if attrres is None:
attrres = getattr(real_args[0], mapname, None)
if attrres is None:
attrres = getattr(real_args[0], ufunc.__name__, None)
assert attrres is not None
dprint(2, "attrres:", attrres, type(attrres), real_args)
return attrres(*real_args[1:], **kwargs)
elif method == "reduce":
return sreduce(lambda x: x, ufunc, *inputs)
else:
return NotImplemented
# We only have to put functions here where the ufunc name is different from the
# Python operation name.
ufunc_map = {
"multiply": "mul",
"subtract": "sub",
"divide": "div",
"true_divide": "truediv",
"floor_divide": "floordiv",
}
def dot(a, b, out=None):
ashape = a.shape
bshape = b.shape
if len(ashape) <= 2 and len(bshape) <= 2:
return matmul(a, b, out=out)
else:
print("dot for matrices higher than 2 dimensions not currently supported.")
assert 0
# --------------- Global variables to hold timings of parts of Ramba -------------
# The dict value {0:(0,0)} is explained as follows:
# The first 0 is an identifier. If it is 0 then it corresponds to the overall time for
# that key entry. Other key values indicate sub-parts of that time.
# In the (0,0) tuple, the first zero is a counter of how many times have been recorded
# and the second 0 is the total time.
time_dict = {
"matmul_b_c_not_dist": {0: (0, 0)},
"matmul_c_not_dist_a_b_dist_match": {0: (0, 0)},
"matmul_c_not_dist_a_b_dist_non_match": {0: (0, 0)},
"matmul_general": {0: (0, 0)},
}
if in_driver() and numba.version_info.short >= (0, 53):
global compile_recorder
compile_recorder = numba.core.event.RecordingListener()
numba.core.event.register("numba:compile", compile_recorder)
def reset_timing():
for k, v in time_dict.items():
time_dict[k] = {0: (0, 0)}
if numba.version_info.short >= (0, 53):
compile_recorder.buffer = []
remote_exec_all("reset_compile_stats")
remote_exec_all("reset_def_ops_stats")
def get_timing(details=False):
if numba.version_info.short >= (0, 53):
driver_summary = rec_buf_summary(compile_recorder.buffer)
stats = remote_call_all("get_compile_stats")
remote_maximum = functools.reduce(lambda a, b: a if a[1] > b[1] else b, stats)
time_dict["numba_compile_time"] = {
0: tuple(map(operator.add, driver_summary, remote_maximum))
}
stats = remote_call_all("get_def_ops_stats")
remote_maximum = functools.reduce(lambda a, b: a if a[1] > b[1] else b, stats)
time_dict["remote_deferred_ops"] = {0: remote_maximum}
if details:
return time_dict
else:
return {k: time_dict[k][0] for k in time_dict.keys()}
def get_timing_str(details=False):
timings = get_timing(details=details)
res = ""
if details:
for k, v in timings.items():
res += k + ": " + str(v[0][1]) + "s(" + str(v[0][0]) + ")\n"
for tk, tv in v.items():
if tk != 0:
res += " " + tk + ": " + str(tv[1]) + "s(" + str(tv[0]) + ")\n"
else:
for k, v in timings.items():
res += k + ": " + str(v[1]) + "s(" + str(v[0]) + ") "
return res
def add_time(time_name, val):
tindex = time_dict[time_name]
assert isinstance(tindex, dict)
cur_val = tindex[0]
assert isinstance(cur_val, tuple)
tindex[0] = (cur_val[0] + 1, cur_val[1] + | |
7.25162339277303*(1.27323954473516*m.x117)**2.435*(m.x14 - m.x21) == 0)
m.c60 = Constraint(expr=SignPower(m.x60,1.852) - 2.58578563053754*(1.27323954473516*m.x118)**2.435*(m.x15 - m.x16) == 0)
m.c61 = Constraint(expr=SignPower(m.x61,1.852) - 5.84921218726644*(1.27323954473516*m.x119)**2.435*(m.x15 - m.x22) == 0)
m.c62 = Constraint(expr=SignPower(m.x62,1.852) - 5.74472389081144*(1.27323954473516*m.x120)**2.435*(m.x16 - m.x25) == 0)
m.c63 = Constraint(expr=SignPower(m.x63,1.852) - 3.47546926168853*(1.27323954473516*m.x121)**2.435*(m.x16 - m.x29) == 0)
m.c64 = Constraint(expr=SignPower(m.x64,1.852) - 1.26169886029378*(1.27323954473516*m.x122)**2.435*(m.x17 - m.x2) == 0)
m.c65 = Constraint(expr=SignPower(m.x65,1.852) - 2.01506107832427*(1.27323954473516*m.x123)**2.435*(m.x17 - m.x18) == 0)
m.c66 = Constraint(expr=SignPower(m.x66,1.852) - 5.63045889679459*(1.27323954473516*m.x124)**2.435*(m.x18 - m.x10) == 0)
m.c67 = Constraint(expr=SignPower(m.x67,1.852) - 4.74441317718661*(1.27323954473516*m.x125)**2.435*(m.x19 - m.x12) == 0)
m.c68 = Constraint(expr=SignPower(m.x68,1.852) - 1.90579854454696*(1.27323954473516*m.x126)**2.435*(m.x19 - m.x20) == 0)
m.c69 = Constraint(expr=SignPower(m.x69,1.852) - 4.34297959554316*(1.27323954473516*m.x127)**2.435*(m.x20 - m.x15) == 0)
m.c70 = Constraint(expr=SignPower(m.x70,1.852) - 2.25014674175293*(1.27323954473516*m.x128)**2.435*(m.x21 - m.x6) == 0)
m.c71 = Constraint(expr=SignPower(m.x71,1.852) - 3.20345143982431*(1.27323954473516*m.x129)**2.435*(m.x21 - m.x22) == 0)
m.c72 = Constraint(expr=SignPower(m.x72,1.852) - 1.38643672163203*(1.27323954473516*m.x130)**2.435*(m.x22 - m.x7) == 0)
m.c73 = Constraint(expr=SignPower(m.x73,1.852) - 4.5542709920508*(1.27323954473516*m.x131)**2.435*(m.x22 - m.x23) == 0)
m.c74 = Constraint(expr=SignPower(m.x74,1.852) - 6.29639489843998*(1.27323954473516*m.x132)**2.435*(m.x23 - m.x25) == 0)
m.c75 = Constraint(expr=SignPower(m.x75,1.852) - 3.56699108862049*(1.27323954473516*m.x133)**2.435*(m.x24 - m.x8) == 0)
m.c76 = Constraint(expr=SignPower(m.x76,1.852) - 2.24097335375622*(1.27323954473516*m.x134)**2.435*(m.x24 - m.x23) == 0)
m.c77 = Constraint(expr=SignPower(m.x77,1.852) - 3.20518902281425*(1.27323954473516*m.x135)**2.435*(m.x25 - m.x8) == 0)
m.c78 = Constraint(expr=SignPower(m.x78,1.852) - 4.15407143211664*(1.27323954473516*m.x136)**2.435*(m.x26 - m.x15) == 0)
m.c79 = Constraint(expr=SignPower(m.x79,1.852) - 2.62207182303441*(1.27323954473516*m.x137)**2.435*(m.x26 - m.x27) == 0)
m.c80 = Constraint(expr=SignPower(m.x80,1.852) - 4.76209659489144*(1.27323954473516*m.x138)**2.435*(m.x27 - m.x16) == 0)
m.c81 = Constraint(expr=SignPower(m.x81,1.852) - 2.69026479043292*(1.27323954473516*m.x139)**2.435*(m.x28 - m.x9) == 0)
m.c82 = Constraint(expr=SignPower(m.x82,1.852) - 3.17164259627557*(1.27323954473516*m.x140)**2.435*(m.x28 - m.x29) == 0)
m.c83 = Constraint(expr=SignPower(m.x83,1.852) - 8.337448482802*(1.27323954473516*m.x141)**2.435*(m.x29 - m.x30) == 0)
m.c84 = Constraint(expr=SignPower(m.x84,1.852) - 2.19824844908102*(1.27323954473516*m.x142)**2.435*(m.x29 - m.x33) == 0)
m.c85 = Constraint(expr=SignPower(m.x85,1.852) - 3.80990755137712*(1.27323954473516*m.x143)**2.435*(m.x30 - m.x9) == 0)
m.c86 = Constraint(expr=SignPower(m.x86,1.852) - 3.94042951550282*(1.27323954473516*m.x144)**2.435*(m.x30 - m.x35) == 0)
m.c87 = Constraint(expr=SignPower(m.x87,1.852) - 2.60573987969834*(1.27323954473516*m.x145)**2.435*(m.x31 - m.x10) == 0)
m.c88 = Constraint(expr=SignPower(m.x88,1.852) - 7.88809158976929*(1.27323954473516*m.x146)**2.435*(m.x31 - m.x34) == 0)
m.c89 = Constraint(expr=SignPower(m.x89,1.852) - 6.02208062388374*(1.27323954473516*m.x147)**2.435*(m.x32 - m.x27) == 0)
m.c90 = Constraint(expr=SignPower(m.x90,1.852) - 3.27286990428464*(1.27323954473516*m.x148)**2.435*(m.x32 - m.x33) == 0)
m.c91 = Constraint(expr=SignPower(m.x91,1.852) - 13.6077527050913*(1.27323954473516*m.x149)**2.435*(m.x33 - m.x34) == 0)
m.c92 = Constraint(expr=SignPower(m.x92,1.852) - 2.85346368669568*(1.27323954473516*m.x150)**2.435*(m.x34 - m.x35) == 0)
m.c93 = Constraint(expr=SignPower(m.x93,1.852) - 5.68394047102168*(1.27323954473516*m.x151)**2.435*(m.x35 - m.x36) == 0)
m.c94 = Constraint(expr=SignPower(m.x94,1.852) - 2.2432064580757*(1.27323954473516*m.x152)**2.435*(m.x36 - m.x1) == 0)
m.c95 = Constraint(expr=SignPower(m.x95,1.852) - 472.733328974874*(1.27323954473516*m.x153)**2.435*(m.x37 - m.x1) == 0)
m.c96 = Constraint(expr= m.x38 - m.x96 <= 0)
m.c97 = Constraint(expr= m.x39 - m.x97 <= 0)
m.c98 = Constraint(expr= m.x40 - m.x98 <= 0)
m.c99 = Constraint(expr= m.x41 - m.x99 <= 0)
m.c100 = Constraint(expr= m.x42 - m.x100 <= 0)
m.c101 = Constraint(expr= m.x43 - m.x101 <= 0)
m.c102 = Constraint(expr= m.x44 - m.x102 <= 0)
m.c103 = Constraint(expr= m.x45 - m.x103 <= 0)
m.c104 = Constraint(expr= m.x46 - m.x104 <= 0)
m.c105 = Constraint(expr= m.x47 - m.x105 <= 0)
m.c106 = Constraint(expr= m.x48 - m.x106 <= 0)
m.c107 = Constraint(expr= m.x49 - m.x107 <= 0)
m.c108 = Constraint(expr= m.x50 - m.x108 <= 0)
m.c109 = Constraint(expr= m.x51 - m.x109 <= 0)
m.c110 = Constraint(expr= m.x52 - m.x110 <= 0)
m.c111 = Constraint(expr= m.x53 - m.x111 <= 0)
m.c112 = Constraint(expr= m.x54 - m.x112 <= 0)
m.c113 = Constraint(expr= m.x55 - m.x113 <= 0)
m.c114 = Constraint(expr= m.x56 - m.x114 <= 0)
m.c115 = Constraint(expr= m.x57 - m.x115 <= 0)
m.c116 = Constraint(expr= m.x58 - m.x116 <= 0)
m.c117 = Constraint(expr= m.x59 - m.x117 <= 0)
m.c118 = Constraint(expr= m.x60 - m.x118 <= 0)
m.c119 = Constraint(expr= m.x61 - m.x119 <= 0)
m.c120 = Constraint(expr= m.x62 - m.x120 <= 0)
m.c121 = Constraint(expr= m.x63 - m.x121 <= 0)
m.c122 = Constraint(expr= m.x64 - m.x122 <= 0)
m.c123 = Constraint(expr= m.x65 - m.x123 <= 0)
m.c124 = Constraint(expr= m.x66 - m.x124 <= 0)
m.c125 = Constraint(expr= m.x67 - m.x125 <= 0)
m.c126 = Constraint(expr= m.x68 - m.x126 <= 0)
m.c127 = Constraint(expr= m.x69 - m.x127 <= 0)
m.c128 = Constraint(expr= m.x70 - m.x128 <= 0)
m.c129 = Constraint(expr= m.x71 - m.x129 <= 0)
m.c130 = Constraint(expr= m.x72 - m.x130 <= 0)
m.c131 = Constraint(expr= m.x73 - m.x131 <= 0)
m.c132 = Constraint(expr= m.x74 - m.x132 <= 0)
m.c133 = Constraint(expr= m.x75 - m.x133 <= 0)
m.c134 = Constraint(expr= m.x76 - m.x134 <= 0)
m.c135 = Constraint(expr= m.x77 - m.x135 <= 0)
m.c136 = Constraint(expr= m.x78 - m.x136 <= 0)
m.c137 = Constraint(expr= m.x79 - m.x137 <= 0)
m.c138 = Constraint(expr= m.x80 - m.x138 <= 0)
m.c139 = Constraint(expr= m.x81 - m.x139 <= 0)
m.c140 = Constraint(expr= m.x82 - m.x140 <= 0)
m.c141 = Constraint(expr= m.x83 - m.x141 <= 0)
m.c142 = Constraint(expr= m.x84 - m.x142 <= 0)
m.c143 = Constraint(expr= m.x85 - m.x143 <= 0)
m.c144 = Constraint(expr= m.x86 - m.x144 <= 0)
m.c145 = Constraint(expr= m.x87 - m.x145 <= 0)
m.c146 = Constraint(expr= m.x88 - m.x146 <= 0)
m.c147 = Constraint(expr= m.x89 - m.x147 <= 0)
m.c148 = Constraint(expr= m.x90 - m.x148 <= 0)
m.c149 = Constraint(expr= m.x91 - m.x149 <= 0)
m.c150 = Constraint(expr= m.x92 - m.x150 <= 0)
m.c151 = Constraint(expr= m.x93 - m.x151 <= 0)
m.c152 = Constraint(expr= m.x94 - m.x152 <= 0)
m.c153 = Constraint(expr= m.x95 - m.x153 <= 0)
m.c154 = Constraint(expr= m.x38 + m.x96 >= 0)
m.c155 = Constraint(expr= m.x39 + m.x97 >= 0)
m.c156 = Constraint(expr= m.x40 + m.x98 >= 0)
m.c157 = Constraint(expr= m.x41 + m.x99 >= 0)
m.c158 = Constraint(expr= m.x42 + m.x100 >= 0)
m.c159 = Constraint(expr= m.x43 + m.x101 >= 0)
m.c160 = Constraint(expr= m.x44 + m.x102 >= 0)
m.c161 = Constraint(expr= m.x45 + m.x103 >= 0)
m.c162 = Constraint(expr= m.x46 + m.x104 >= 0)
m.c163 = Constraint(expr= m.x47 + m.x105 >= 0)
m.c164 = Constraint(expr= m.x48 + m.x106 >= 0)
m.c165 = Constraint(expr= m.x49 + m.x107 >= 0)
m.c166 = Constraint(expr= m.x50 + m.x108 >= 0)
m.c167 = Constraint(expr= m.x51 + m.x109 >= 0)
m.c168 = Constraint(expr= m.x52 + m.x110 >= 0)
m.c169 = Constraint(expr= m.x53 + m.x111 >= 0)
m.c170 = Constraint(expr= m.x54 + m.x112 >= 0)
m.c171 = Constraint(expr= m.x55 + m.x113 >= 0)
m.c172 = Constraint(expr= m.x56 + m.x114 >= 0)
m.c173 = Constraint(expr= m.x57 + m.x115 >= 0)
m.c174 = Constraint(expr= m.x58 + m.x116 >= 0)
m.c175 = Constraint(expr= m.x59 + m.x117 >= 0)
m.c176 = Constraint(expr= m.x60 + m.x118 >= 0)
m.c177 = Constraint(expr= m.x61 + m.x119 >= 0)
m.c178 = Constraint(expr= m.x62 + m.x120 >= 0)
m.c179 = Constraint(expr= m.x63 + m.x121 >= 0)
m.c180 = Constraint(expr= m.x64 + m.x122 >= 0)
m.c181 = Constraint(expr= m.x65 + m.x123 >= 0)
m.c182 = Constraint(expr= m.x66 + m.x124 >= 0)
m.c183 = Constraint(expr= m.x67 + m.x125 >= 0)
m.c184 = Constraint(expr= m.x68 + m.x126 >= 0)
m.c185 = Constraint(expr= m.x69 + m.x127 >= 0)
m.c186 = Constraint(expr= m.x70 + m.x128 >= 0)
m.c187 = Constraint(expr= m.x71 + m.x129 >= 0)
m.c188 = Constraint(expr= m.x72 + m.x130 >= 0)
m.c189 = Constraint(expr= m.x73 + m.x131 >= 0)
m.c190 = Constraint(expr= m.x74 + m.x132 >= 0)
m.c191 = Constraint(expr= m.x75 + m.x133 >= 0)
m.c192 = Constraint(expr= m.x76 + m.x134 >= 0)
m.c193 = Constraint(expr= m.x77 + m.x135 >= 0)
m.c194 = Constraint(expr= m.x78 + m.x136 >= 0)
m.c195 = Constraint(expr= m.x79 + m.x137 >= 0)
m.c196 = Constraint(expr= m.x80 + m.x138 >= 0)
m.c197 = Constraint(expr= m.x81 + m.x139 >= 0)
m.c198 = Constraint(expr= m.x82 + m.x140 >= 0)
m.c199 = Constraint(expr= m.x83 + m.x141 >= 0)
m.c200 = Constraint(expr= m.x84 + m.x142 >= 0)
m.c201 = Constraint(expr= m.x85 + m.x143 >= 0)
m.c202 = Constraint(expr= m.x86 + m.x144 >= 0)
m.c203 = Constraint(expr= m.x87 + m.x145 >= 0)
m.c204 = Constraint(expr= m.x88 + m.x146 >= 0)
m.c205 = Constraint(expr= m.x89 + m.x147 >= 0)
m.c206 = Constraint(expr= m.x90 + m.x148 >= 0)
m.c207 = Constraint(expr= m.x91 + m.x149 >= 0)
m.c208 = Constraint(expr= m.x92 + m.x150 >= 0)
m.c209 = Constraint(expr= m.x93 + m.x151 >= 0)
m.c210 = Constraint(expr= m.x94 + m.x152 >= 0)
m.c211 = Constraint(expr= m.x95 + m.x153 >= 0)
m.c212 = Constraint(expr= m.x96 - 0.00282743338823081*m.b154 - 0.00502654824574367*m.b155 - 0.0122718463030851*m.b156
- 0.0176714586764426*m.b157 - 0.0314159265358979*m.b158 - 0.0490873852123405*m.b159
- 0.0706858347057703*m.b160 == 0)
m.c213 = Constraint(expr= m.x97 - 0.00282743338823081*m.b161 - 0.00502654824574367*m.b162 - 0.0122718463030851*m.b163
- 0.0176714586764426*m.b164 - 0.0314159265358979*m.b165 - 0.0490873852123405*m.b166
- 0.0706858347057703*m.b167 == 0)
m.c214 = Constraint(expr= m.x98 - 0.00282743338823081*m.b168 - 0.00502654824574367*m.b169 - 0.0122718463030851*m.b170
- 0.0176714586764426*m.b171 - 0.0314159265358979*m.b172 - 0.0490873852123405*m.b173
- 0.0706858347057703*m.b174 == 0)
m.c215 = Constraint(expr= m.x99 - 0.00282743338823081*m.b175 - 0.00502654824574367*m.b176 - 0.0122718463030851*m.b177
- 0.0176714586764426*m.b178 - 0.0314159265358979*m.b179 - 0.0490873852123405*m.b180
- 0.0706858347057703*m.b181 == 0)
m.c216 = Constraint(expr= m.x100 - 0.00282743338823081*m.b182 - 0.00502654824574367*m.b183 - 0.0122718463030851*m.b184
- 0.0176714586764426*m.b185 - 0.0314159265358979*m.b186 - 0.0490873852123405*m.b187
- 0.0706858347057703*m.b188 == 0)
m.c217 = Constraint(expr= m.x101 - 0.00282743338823081*m.b189 - 0.00502654824574367*m.b190 - 0.0122718463030851*m.b191
- 0.0176714586764426*m.b192 - 0.0314159265358979*m.b193 - 0.0490873852123405*m.b194
- 0.0706858347057703*m.b195 == 0)
m.c218 = Constraint(expr= m.x102 - 0.00282743338823081*m.b196 - 0.00502654824574367*m.b197 - 0.0122718463030851*m.b198
- 0.0176714586764426*m.b199 - 0.0314159265358979*m.b200 - 0.0490873852123405*m.b201
- 0.0706858347057703*m.b202 == 0)
m.c219 = Constraint(expr= m.x103 - 0.00282743338823081*m.b203 - 0.00502654824574367*m.b204 - 0.0122718463030851*m.b205
- 0.0176714586764426*m.b206 - 0.0314159265358979*m.b207 - 0.0490873852123405*m.b208
- 0.0706858347057703*m.b209 == 0)
m.c220 = Constraint(expr= m.x104 - 0.00282743338823081*m.b210 - 0.00502654824574367*m.b211 - 0.0122718463030851*m.b212
- 0.0176714586764426*m.b213 - 0.0314159265358979*m.b214 - 0.0490873852123405*m.b215
- 0.0706858347057703*m.b216 == 0)
m.c221 = Constraint(expr= m.x105 - | |
input climate data
###Convert thiessen polygons to raster by group
arcpy.PolygonToRaster_conversion(outFspPoly,"SS_GROUP",spgpMASK,"CELL_CENTER","NONE","#")
###Clip theissen groups by bias file, then save to ascii
arcpy.gp.Times_sa(BiFiIn,spgpMASK,BiFiOut)
gp.AddMessage("Generating regional bias files for spatial jackknifing: " + outFoldsp)
###Reclassify masks, to leave one group out: create AB, AC, BC grids
arcpy.gp.Reclassify_sa(BiFiOut,"Value","1 1;2 1;3 1;4 1;5 NODATA",BiFiT,"DATA")
arcpy.RasterToASCII_conversion(BiFiT,BiFiABCD)
arcpy.gp.Reclassify_sa(BiFiOut,"Value","1 NODATA;2 1;3 1;4 1;5 1",BiFiT,"DATA")
arcpy.RasterToASCII_conversion(BiFiT,BiFiBCDE)
arcpy.gp.Reclassify_sa(BiFiOut,"Value","1 1;2 NODATA;3 1;4 1;5 1",BiFiT,"DATA")
arcpy.RasterToASCII_conversion(BiFiT,BiFiACDE)
arcpy.gp.Reclassify_sa(BiFiOut,"Value","1 1;2 1;3 NODATA;4 1;5 1",BiFiT,"DATA")
arcpy.RasterToASCII_conversion(BiFiT,BiFiABDE)
arcpy.gp.Reclassify_sa(BiFiOut,"Value","1 1;2 1;3 1;4 NODATA;5 1",BiFiT,"DATA")
arcpy.RasterToASCII_conversion(BiFiT,BiFiABCE)
gp.AddMessage("Generating test and training CSV files for spatial jackknifing: " + outFoldsp)
#create spatial groups inSHP
arcpy.CopyFeatures_management(inSHP,outFsp2)
arcpy.JoinField_management(outFsp2,"UN_ID",outFsp,"UN_ID","SS_GROUP")
arcpy.CopyFeatures_management(outFsp2,outFspABCD)
arcpy.CopyFeatures_management(outFsp2,outFspBCDE)
arcpy.CopyFeatures_management(outFsp2,outFspACDE)
arcpy.CopyFeatures_management(outFsp2,outFspABDE)
arcpy.CopyFeatures_management(outFsp2,outFspABCE)
arcpy.CopyFeatures_management(outFsp2,outFspA)
arcpy.CopyFeatures_management(outFsp2,outFspB)
arcpy.CopyFeatures_management(outFsp2,outFspC)
arcpy.CopyFeatures_management(outFsp2,outFspD)
arcpy.CopyFeatures_management(outFsp2,outFspE)
#trim shps to groups
#ABDE
shp = outFspABDE
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] == 3:
cursor.deleteRow()
del cursor
#BCDE
shp = outFspBCDE
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] == 1:
cursor.deleteRow()
del cursor
#ACDE
shp = outFspACDE
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] == 2:
cursor.deleteRow()
del cursor
#ABDE
shp = outFspABCE
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] == 4:
cursor.deleteRow()
del cursor
#ABCD
shp = outFspABCD
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] == 5:
cursor.deleteRow()
del cursor
#A
shp = outFspA
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] != 1:
cursor.deleteRow()
del cursor
#B
shp = outFspB
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] != 2:
cursor.deleteRow()
del cursor
#C
shp = outFspC
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] != 3:
cursor.deleteRow()
del cursor
#D
shp = outFspD
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] != 4:
cursor.deleteRow()
del cursor
#E
shp = outFspE
cursor = arcpy.da.UpdateCursor(shp, ["SS_GROUP"])
for row in cursor:
if row [0] != 5:
cursor.deleteRow()
del cursor
gp.AddMessage("Writing test and training CSV files for spatial jackknifing: " + outFoldsp)
#now save shp table as CSV
theFilesB= glob.glob(outFoldGIS+"/*Sp_Occ.dbf")
for Z in theFilesB:
inShp = str(Z).replace("\\","/")
outNameA = os.path.split(inShp)[1]
outNamespA = outNameA[:-4]
CSVFile = outFoldGIS +"/" +outNamespA +"T.csv"
fieldnames = [f.name for f in arcpy.ListFields(inShp) if f.type <> 'Geometry']
with open(CSVFile, 'w') as f:
f.write(','.join(fieldnames)+'\n') #csv headers
with arcpy.da.SearchCursor(inShp, fieldnames) as cursor:
for row in cursor:
f.write(','.join([str(r) for r in row])+'\n')
del cursor
#delete unnecessary fields for Maxent
theFilesC= glob.glob(outFoldGIS+"/*Sp_OccT.csv")
for Z in theFilesC:
inCSV = str(Z).replace("\\","/")
outNameA = os.path.split(inCSV)[1]
outNamespA = outNameA[:-5]
CSVFile = outFoldGIS +"/" +outNamespA +".csv"
with open(inCSV,"rb") as source:
rdr= csv.reader( source )
with open(CSVFile,"wb") as result:
wtr= csv.writer(result)
for r in rdr:
wtr.writerow((r[1], r[3], r[2]))
gp.AddMessage("Successfully created GIS layers for: " + outFoldsp)
gp.AddMessage("************************************************")
gp.AddMessage("************************************************")
gp.AddMessage("***Step 2 of 2 *********************************")
gp.AddMessage("Generating batch code for running MaxEnt models: " + outFoldsp)
#define text folders for script
outFoldspFi =outFoldspF.replace("/","\\")
outFoldGISi=outFoldGIS.replace("/","\\")
outFoldspFii =outFoldspF.replace("\\","/")
outFoldGISii=outFoldGIS.replace("\\","/")
###Create Python Script 1: Move input bias file to climate folder
OutBfin=BiFiIn.replace("/","\\")
PythSMoBF1 ="import shutil, os\n"
PythSMoBF1 +="shutil.copyfile('"+OutBfin+"','"+climatedataFolder+"\\MaskTemp.asc')\n"
#write Python Script 1
newLine = PythSMoBF1.replace("\\","/")
file = open(outFoldGIS+"/importMask.py", "w")
file.write(newLine)
file.close()
#create code to run python script from batch file and create myCommand##*new
RunPySpt1 = ("##start"+str(int(nSppRLabel))+"\n")
RunPySpt1+=("start "+PyLocT+" "+outFoldGIS+"/importMask.py").replace("/","\\")
RunPySpt1+=" \n"
myCommandOLD=RunPySpt1
###Create Python Script 2: Delete input bias file to climate folder
PythSDeBF2 ="import os\n"
PythSDeBF2 +="os.remove('"+climatedataFolder+"\\MaskTemp.asc')\n"
PythSDeBF2 +="os.remove('"+climatedataFolder+"\\maxent.cache\\MaskTemp.mxe')\n"
PythSDeBF2 +="os.remove('"+climatedataFolder+"\\maxent.cache\\MaskTemp.info')\n"
#Write Python Script 2
newLine = PythSDeBF2.replace("\\","/")
file = open(outFoldGIS+"/deleteMask.py", "w")
file.write(newLine)
file.close()
#create code to run python script from batch file##*new
RunPySpt2=("start "+PyLocT+" "+outFoldGIS+"\\deleteMask.py").replace("/","\\")
RunPySpt2+=" \n"
RunPySpt2+= ("##end"+str(int(nSppRLabel))+"\n")
#****************************************************
#create code to add header to summary stats, copy to new, then sort
PythAHd4="import numpy as np \n"
PythAHd4+="import csv, os, operator \n"
PythAHd4+="csvfile = '"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_ALL.csv' \n"
PythAHd4+='resA = "Species,Regularization Multiplier, Feature Type, Feature N, weighted PR, AUC"\n'
PythAHd4+='file = open(csvfile, "r") \n'
PythAHd4+="filedata = file.read() \n"
PythAHd4+="file.close() \n"
PythAHd4+="newLine = resA+os.linesep+filedata \n"
PythAHd4+='file = open(csvfile, "w") \n'
PythAHd4+="file.write(newLine) \n"
PythAHd4+="file.close() \n"
#****************************************************
###NEWCODEsort
###NEWCODEsort-default (OER then AUC)
if OERthenAUC =='OERtoAUC':
PythAHd4+="dataDO = np.genfromtxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_ALL.csv', delimiter=',', usecols=(4,5),skiprows=1)\n"
PythAHd4+="dataDO = dataDO[~np.isnan(dataDO).any(1)]\n"
PythAHd4+="Dn = dataDO.shape[0]\n"
PythAHd4+="dataDO2 = dataDO.sum(axis=1)\n"
PythAHd4+="dataDO2 = dataDO2.reshape(Dn,1)\n"
PythAHd4+="dataRO = np.genfromtxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_ALL.csv', delimiter=',', usecols=(1,3,4,5),skiprows=1)\n"
PythAHd4+="dataRO = dataRO[~np.isnan(dataRO).any(1)]\n"
PythAHd4+="dataRO = np.concatenate((dataRO,dataDO2),axis=1)\n"
PythAHd4+="dataRO = dataRO[~(dataRO[:,4]==1.5), :]\n"
PythAHd4+="list1 = sorted(dataRO, key=operator.itemgetter(1))\n"
PythAHd4+="list1 = sorted(list1, key=operator.itemgetter(3), reverse=True)\n"
PythAHd4+="list1 = sorted(list1, key=operator.itemgetter(2), reverse=True)\n"
PythAHd4+="np.savetxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_RANKED_MODELS.csv', list1,delimiter=',',fmt='%1.15s')\n"
###To this block for AUC first (changing or sorted)
if OERthenAUC =='AUCtoOER':
PythAHd4+="dataRO = np.genfromtxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_ALL.csv', delimiter=',', usecols=(1,3,4,5),skiprows=1)\n"
PythAHd4+="dataRO = dataRO[~np.isnan(dataRO).any(1)]\n"
PythAHd4+="list1 = sorted(dataRO, key=operator.itemgetter(1))\n"
PythAHd4+="list1 = sorted(list1, key=operator.itemgetter(2), reverse=True)\n"
PythAHd4+="list1 = sorted(list1, key=operator.itemgetter(3), reverse=True)\n"
PythAHd4+="np.savetxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_RANKED_MODELS.csv', list1,delimiter=',',fmt='%1.15s')\n"
if OERthenAUC =='maxPRandAUC':
PythAHd4+="dataDO = np.genfromtxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_ALL.csv', delimiter=',', usecols=(4,5),skiprows=1)\n"
PythAHd4+="dataDO = dataDO[~np.isnan(dataDO).any(1)]\n"
PythAHd4+="Dn = dataDO.shape[0]\n"
PythAHd4+="dataDO2 = dataDO.sum(axis=1)\n"
PythAHd4+="dataDO2 = dataDO2.reshape(Dn,1)\n"
PythAHd4+="dataRO = np.genfromtxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_ALL.csv', delimiter=',', usecols=(1,3,4,5),skiprows=1)\n"
PythAHd4+="dataRO = dataRO[~np.isnan(dataRO).any(1)]\n"
PythAHd4+="dataRO = np.concatenate((dataRO,dataDO2),axis=1)\n"
PythAHd4+="dataRO = dataRO[~(dataRO[:,4]==1.5), :]\n"
PythAHd4+="list1 = sorted(dataRO, key=operator.itemgetter(1))\n"
PythAHd4+="list1 = sorted(list1, key=operator.itemgetter(4), reverse=True)\n"
PythAHd4+="np.savetxt('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_RANKED_MODELS.csv', list1,delimiter=',',fmt='%1.15s')\n"
#****************************************************
#save sorted data
PythAHd4+="input_file = open('"+outFoldspFii+"/"+outFoldsp+"_SUMSTATS_RANKED_MODELS.csv', 'r')\n"
PythAHd4+="data = csv.reader(input_file, delimiter=',', quoting=csv.QUOTE_NONE)\n"
PythAHd4+="line = next(data)\n"
PythAHd4+="Reg=line[0]\nFeat=line[1]\nRegN=float(Reg)\nFeatN=float(Feat)\n"
#Write Python Script 4
#python script 4 copy write text for step2
PythAHd4wr=" file = open('"+outFolderPy+"/Step2_Run_Optimized_MaxEnt_Models.bat', 'r')\n"
PythAHd4wr+=' filedata = file.read()\n file.close()\n newLine =resA+os.linesep+filedata \n'
PythAHd4wr+=" file = open('"+outFolderPy+"/Step2_Run_Optimized_MaxEnt_Models.bat', 'w')\n"
PythAHd4wr+=' file.write(newLine)\n file.close()\n'
#****************************************************
#create code to run python script4 from batch file
RunPyAHd4=("timeout 1 \nstart "+PyLocT+" "+outFoldGIS+"\\FinalRank.py").replace("/","\\")
RunPyAHd4+=" \ntimeout 1 \n"
##create summary stats
csvfile = outFoldspF+"/"+outFoldsp+"_SUMSTATS_ALL.csv"
newLine = ""
file = open(csvfile, "w")
file.write(newLine)
file.close()
###python code to delete all .asc from training
PythDel5="import os, fnmatch \n"
PythDel5+="src="+"'"+outFoldspABCD.replace("\\","/")+"'\n"
PythDel5+="for root, dirnames, filenames in os.walk(src):\n"
PythDel5+=" for filename in fnmatch.filter(filenames, '*.asc'):\n"
PythDel5+=" delFile=os.path.join(root, filename)\n"
PythDel5+=" os.remove(delFile)\n"
PythDel5+="src="+"'"+outFoldspACDE.replace("\\","/")+"'\n"
PythDel5+="for root, dirnames, filenames in os.walk(src):\n"
PythDel5+=" for filename in fnmatch.filter(filenames, '*.asc'):\n"
PythDel5+=" delFile=os.path.join(root, filename)\n"
PythDel5+=" os.remove(delFile)\n"
PythDel5+="src="+"'"+outFoldspBCDE.replace("\\","/")+"'\n"
PythDel5+="for root, dirnames, filenames in os.walk(src):\n"
PythDel5+=" for filename in fnmatch.filter(filenames, '*.asc'):\n"
PythDel5+=" delFile=os.path.join(root, filename)\n"
PythDel5+=" os.remove(delFile)\n"
PythDel5+="src="+"'"+outFoldspABDE.replace("\\","/")+"'\n"
PythDel5+="for root, dirnames, filenames in os.walk(src):\n"
PythDel5+=" for filename in fnmatch.filter(filenames, '*.asc'):\n"
PythDel5+=" delFile=os.path.join(root, filename)\n"
PythDel5+=" os.remove(delFile)\n"
PythDel5+="src="+"'"+outFoldspABCE.replace("\\","/")+"'\n"
PythDel5+="for root, dirnames, filenames in os.walk(src):\n"
PythDel5+=" for filename in fnmatch.filter(filenames, '*.asc'):\n"
PythDel5+=" delFile=os.path.join(root, filename)\n"
PythDel5+=" os.remove(delFile)\n"
###iterate thought beta values
#create code to run python script4 from batch file
RunPyDel5=("start "+PyLocT+" "+outFoldGIS+"\\DeleteASCIIs.py").replace("/","\\")
RunPyDel5+=" \n"
#Write Python Script 2
newLine = PythDel5
file = open(outFoldGIS+"/DeleteASCIIs.py", "w")
file.write(newLine)
file.close()
###***************************
###iterate thought beta values
if BVal=="AutomaticSettings":
BVal="1"
for Bn in BVal.split(';'):
Bvalx = str(Bn)
Bvalf = "b"+str(Bn).replace(".","_")
gp.AddMessage("Creating batch files for Beta value: "+Bvalx)
arcpy.CreateFolder_management(outFoldspABCD,Bvalf)
arcpy.CreateFolder_management(outFoldspBCDE,Bvalf)
arcpy.CreateFolder_management(outFoldspACDE,Bvalf)
arcpy.CreateFolder_management(outFoldspABDE,Bvalf)
arcpy.CreateFolder_management(outFoldspABCE,Bvalf)
arcpy.CreateFolder_management(outFoldspABCDE,Bvalf)
outFoldspABCD_B = outFoldspABCD+"/"+Bvalf
outFoldspBCDE_B = outFoldspBCDE+"/"+Bvalf
outFoldspACDE_B = outFoldspACDE+"/"+Bvalf
outFoldspABCDE_B = outFoldspABCDE+"/"+Bvalf
outFoldspABDE_B = outFoldspABDE+"/"+Bvalf
outFoldspABCE_B = outFoldspABCE+"/"+Bvalf
arcpy.CreateFolder_management(outFoldspABCD_B,"L")
arcpy.CreateFolder_management(outFoldspABCD_B,"LQ")
arcpy.CreateFolder_management(outFoldspABCD_B,"H")
arcpy.CreateFolder_management(outFoldspABCD_B,"LQH")
arcpy.CreateFolder_management(outFoldspABCD_B,"LQHPT")
arcpy.CreateFolder_management(outFoldspACDE_B,"L")
arcpy.CreateFolder_management(outFoldspACDE_B,"LQ")
arcpy.CreateFolder_management(outFoldspACDE_B,"H")
arcpy.CreateFolder_management(outFoldspACDE_B,"LQH")
arcpy.CreateFolder_management(outFoldspACDE_B,"LQHPT")
arcpy.CreateFolder_management(outFoldspBCDE_B,"L")
arcpy.CreateFolder_management(outFoldspBCDE_B,"LQ")
arcpy.CreateFolder_management(outFoldspBCDE_B,"H")
arcpy.CreateFolder_management(outFoldspBCDE_B,"LQH")
arcpy.CreateFolder_management(outFoldspBCDE_B,"LQHPT")
arcpy.CreateFolder_management(outFoldspABCDE_B,"L")
arcpy.CreateFolder_management(outFoldspABCDE_B,"LQ")
arcpy.CreateFolder_management(outFoldspABCDE_B,"H")
arcpy.CreateFolder_management(outFoldspABCDE_B,"LQH")
arcpy.CreateFolder_management(outFoldspABCDE_B,"LQHPT")
arcpy.CreateFolder_management(outFoldspABDE_B,"L")
arcpy.CreateFolder_management(outFoldspABDE_B,"LQ")
arcpy.CreateFolder_management(outFoldspABDE_B,"H")
arcpy.CreateFolder_management(outFoldspABDE_B,"LQH")
arcpy.CreateFolder_management(outFoldspABDE_B,"LQHPT")
arcpy.CreateFolder_management(outFoldspABCE_B,"L")
arcpy.CreateFolder_management(outFoldspABCE_B,"LQ")
arcpy.CreateFolder_management(outFoldspABCE_B,"H")
arcpy.CreateFolder_management(outFoldspABCE_B,"LQH")
arcpy.CreateFolder_management(outFoldspABCE_B,"LQHPT")
#TrAiNiNgDaT=location of training CSV file (either ABC, BCD, ACD, ABD, ABCD), oUtFoLdEr=outfolder (AB+Beta, AC+Beta, BC+beta, ABC+beta, FeTuRe2R= feature classes,4 types: Linear, Linear+Quadratic, Hinge, Linear+Quadratric+Hinge, BiAsFiLeLoc= biasfile
myCommandi = "java -mx512m -jar " + MaxEntJar + " -e " + climatedataFolder +" -s TrAiNiNgDaT -o oUtFoLdEr noautofeature FeTuRe2R pictures=true biasfile=BiAsFiLeLoc biastype=3 betamultiplier="+Bvalx+"eXtRaPaR"
# add options
if optSkipifExists == 'true':
myCommandi += " -S"
if int(pReps) > 1:
myCommandi +=pRepsSJ
if int(Nthreads) > 1:
myCommandi +=" threads="+str(Nthreads)
if doThres == "no threshold":
myCommandi +=' "applythresholdrule=10 percentile training presence"'
if doThres != "no threshold":
myCommandi +=' "applythresholdrule='+str(doThres)+'"'
if excVars != "":
myCommandi +=str(excVarsT)
if catVars != "":
myCommandi +=str(catVarsT)
if GUIsil == 'true' and supressWarnings == 'true':
myCommandi +=" -z warnings=false"
if GUIsil == 'true' and supressWarnings == 'false':
myCommandi +=" -z warnings=false"
if GUIsil == 'false' and supressWarnings == 'true':
myCommandi +="warnings=false"
if GUIsil == 'false' and supressWarnings == 'false':
myCommandi +="warnings=true"
myCommandi +=" -a \n"
myCommandiShT=myCommandi[:-2]
if int(pReps) <= 1:
myCommandi +="java -cp "+ MaxEntJar +" density.AUC TeStCsV oUtFoLdEr\\"
myCommandi +=outFoldsp+ ".asc >> "+ outFoldGISi +"\\tempAUC.csv \n"
myCommandi +="java -cp "+ MaxEntJar +" density.Getval TeStCsV oUtFoLdEr\\"
myCommandi +=outFoldsp+"_thresholded.asc >> "+outFoldGISi+"\\tempOC.csv | |
self.btn_execution.setMinimumSize(QSize(0, 45))
self.btn_execution.setFont(font)
self.btn_execution.setCursor(QCursor(Qt.PointingHandCursor))
self.btn_execution.setLayoutDirection(Qt.LeftToRight)
self.btn_execution.setStyleSheet(u"background-image: url(:/icons/images/icons/cil-terminal.png);")
self.verticalLayout_8.addWidget(self.btn_execution)
self.btn_monitoring = QPushButton(self.topMenu)
self.btn_monitoring.setObjectName(u"btn_monitoring")
sizePolicy2.setHeightForWidth(self.btn_monitoring.sizePolicy().hasHeightForWidth())
self.btn_monitoring.setSizePolicy(sizePolicy2)
self.btn_monitoring.setMinimumSize(QSize(0, 45))
self.btn_monitoring.setFont(font)
self.btn_monitoring.setCursor(QCursor(Qt.PointingHandCursor))
self.btn_monitoring.setLayoutDirection(Qt.LeftToRight)
self.btn_monitoring.setStyleSheet(u"background-image: url(:/icons/images/icons/cil-check.png);")
self.verticalLayout_8.addWidget(self.btn_monitoring)
self.verticalMenuLayout.addWidget(self.topMenu, 0, Qt.AlignTop)
self.bottomMenu = QFrame(self.leftMenuFrame)
self.bottomMenu.setObjectName(u"bottomMenu")
self.bottomMenu.setFrameShape(QFrame.NoFrame)
self.bottomMenu.setFrameShadow(QFrame.Raised)
self.verticalLayout_9 = QVBoxLayout(self.bottomMenu)
self.verticalLayout_9.setSpacing(0)
self.verticalLayout_9.setObjectName(u"verticalLayout_9")
self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)
self.verticalMenuLayout.addWidget(self.bottomMenu, 0, Qt.AlignBottom)
self.verticalLayout_3.addWidget(self.leftMenuFrame)
self.appLayout.addWidget(self.leftMenuBg)
self.contentBox = QFrame(self.bgApp)
self.contentBox.setObjectName(u"contentBox")
self.contentBox.setFrameShape(QFrame.NoFrame)
self.contentBox.setFrameShadow(QFrame.Raised)
self.verticalLayout_2 = QVBoxLayout(self.contentBox)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.contentTopBg = QFrame(self.contentBox)
self.contentTopBg.setObjectName(u"contentTopBg")
self.contentTopBg.setMinimumSize(QSize(0, 50))
self.contentTopBg.setMaximumSize(QSize(16777215, 50))
self.contentTopBg.setFrameShape(QFrame.NoFrame)
self.contentTopBg.setFrameShadow(QFrame.Raised)
self.horizontalLayout = QHBoxLayout(self.contentTopBg)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.horizontalLayout.setContentsMargins(0, 0, 10, 0)
self.leftBox = QFrame(self.contentTopBg)
self.leftBox.setObjectName(u"leftBox")
sizePolicy3 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
sizePolicy3.setHorizontalStretch(0)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(self.leftBox.sizePolicy().hasHeightForWidth())
self.leftBox.setSizePolicy(sizePolicy3)
self.leftBox.setFrameShape(QFrame.NoFrame)
self.leftBox.setFrameShadow(QFrame.Raised)
self.horizontalLayout_3 = QHBoxLayout(self.leftBox)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.titleRightInfo = QLabel(self.leftBox)
self.titleRightInfo.setObjectName(u"titleRightInfo")
sizePolicy.setHeightForWidth(self.titleRightInfo.sizePolicy().hasHeightForWidth())
self.titleRightInfo.setSizePolicy(sizePolicy)
self.titleRightInfo.setMaximumSize(QSize(16777215, 45))
font3 = QFont()
font3.setFamilies([u"URW Gothic"])
font3.setPointSize(13)
font3.setBold(False)
font3.setItalic(False)
self.titleRightInfo.setFont(font3)
self.titleRightInfo.setStyleSheet(u"font: 63 13pt \"URW Gothic\";")
self.titleRightInfo.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.horizontalLayout_3.addWidget(self.titleRightInfo)
self.horizontalLayout.addWidget(self.leftBox)
self.rightButtons = QFrame(self.contentTopBg)
self.rightButtons.setObjectName(u"rightButtons")
self.rightButtons.setMinimumSize(QSize(0, 28))
self.rightButtons.setFrameShape(QFrame.NoFrame)
self.rightButtons.setFrameShadow(QFrame.Raised)
self.horizontalLayout_2 = QHBoxLayout(self.rightButtons)
self.horizontalLayout_2.setSpacing(5)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.settingsTopBtn = QPushButton(self.rightButtons)
self.settingsTopBtn.setObjectName(u"settingsTopBtn")
self.settingsTopBtn.setMinimumSize(QSize(28, 28))
self.settingsTopBtn.setMaximumSize(QSize(28, 28))
self.settingsTopBtn.setFont(font)
self.settingsTopBtn.setCursor(QCursor(Qt.PointingHandCursor))
self.settingsTopBtn.setStyleSheet(u"")
icon = QIcon()
icon.addFile(u":/icons/images/icons/cil-loop-circular.png", QSize(), QIcon.Normal, QIcon.Off)
self.settingsTopBtn.setIcon(icon)
self.settingsTopBtn.setIconSize(QSize(20, 20))
self.horizontalLayout_2.addWidget(self.settingsTopBtn)
self.minimizeAppBtn = QPushButton(self.rightButtons)
self.minimizeAppBtn.setObjectName(u"minimizeAppBtn")
self.minimizeAppBtn.setMinimumSize(QSize(28, 28))
self.minimizeAppBtn.setMaximumSize(QSize(28, 28))
self.minimizeAppBtn.setCursor(QCursor(Qt.PointingHandCursor))
icon1 = QIcon()
icon1.addFile(u":/icons/images/icons/icon_minimize.png", QSize(), QIcon.Normal, QIcon.Off)
self.minimizeAppBtn.setIcon(icon1)
self.minimizeAppBtn.setIconSize(QSize(20, 20))
self.horizontalLayout_2.addWidget(self.minimizeAppBtn)
self.maximizeRestoreAppBtn = QPushButton(self.rightButtons)
self.maximizeRestoreAppBtn.setObjectName(u"maximizeRestoreAppBtn")
self.maximizeRestoreAppBtn.setMinimumSize(QSize(28, 28))
self.maximizeRestoreAppBtn.setMaximumSize(QSize(28, 28))
font4 = QFont()
font4.setFamilies([u"Segoe UI"])
font4.setPointSize(10)
font4.setBold(False)
font4.setItalic(False)
font4.setStyleStrategy(QFont.PreferDefault)
self.maximizeRestoreAppBtn.setFont(font4)
self.maximizeRestoreAppBtn.setCursor(QCursor(Qt.PointingHandCursor))
icon2 = QIcon()
icon2.addFile(u":/icons/images/icons/icon_maximize.png", QSize(), QIcon.Normal, QIcon.Off)
self.maximizeRestoreAppBtn.setIcon(icon2)
self.maximizeRestoreAppBtn.setIconSize(QSize(20, 20))
self.horizontalLayout_2.addWidget(self.maximizeRestoreAppBtn)
self.closeAppBtn = QPushButton(self.rightButtons)
self.closeAppBtn.setObjectName(u"closeAppBtn")
self.closeAppBtn.setMinimumSize(QSize(28, 28))
self.closeAppBtn.setMaximumSize(QSize(28, 28))
self.closeAppBtn.setCursor(QCursor(Qt.PointingHandCursor))
icon3 = QIcon()
icon3.addFile(u":/icons/images/icons/icon_close.png", QSize(), QIcon.Normal, QIcon.Off)
self.closeAppBtn.setIcon(icon3)
self.closeAppBtn.setIconSize(QSize(20, 20))
self.horizontalLayout_2.addWidget(self.closeAppBtn)
self.horizontalLayout.addWidget(self.rightButtons, 0, Qt.AlignRight)
self.verticalLayout_2.addWidget(self.contentTopBg)
self.contentBottom = QFrame(self.contentBox)
self.contentBottom.setObjectName(u"contentBottom")
self.contentBottom.setFrameShape(QFrame.NoFrame)
self.contentBottom.setFrameShadow(QFrame.Raised)
self.verticalLayout_6 = QVBoxLayout(self.contentBottom)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName(u"verticalLayout_6")
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.content = QFrame(self.contentBottom)
self.content.setObjectName(u"content")
self.content.setFrameShape(QFrame.NoFrame)
self.content.setFrameShadow(QFrame.Raised)
self.horizontalLayout_4 = QHBoxLayout(self.content)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.pagesContainer = QFrame(self.content)
self.pagesContainer.setObjectName(u"pagesContainer")
self.pagesContainer.setStyleSheet(u"")
self.pagesContainer.setFrameShape(QFrame.NoFrame)
self.pagesContainer.setFrameShadow(QFrame.Raised)
self.verticalLayout_15 = QVBoxLayout(self.pagesContainer)
self.verticalLayout_15.setSpacing(0)
self.verticalLayout_15.setObjectName(u"verticalLayout_15")
self.verticalLayout_15.setContentsMargins(10, 10, 10, 10)
self.stackedWidget = QStackedWidget(self.pagesContainer)
self.stackedWidget.setObjectName(u"stackedWidget")
self.stackedWidget.setStyleSheet(u"background: transparent;")
self.home = QWidget()
self.home.setObjectName(u"home")
self.verticalLayout_5 = QVBoxLayout(self.home)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.gridLayout_15 = QGridLayout()
self.gridLayout_15.setSpacing(20)
self.gridLayout_15.setObjectName(u"gridLayout_15")
self.label_2 = QLabel(self.home)
self.label_2.setObjectName(u"label_2")
self.label_2.setMaximumSize(QSize(1280, 400))
self.label_2.setPixmap(QPixmap(u"images/images/IM-FIT_Mimari.jpg"))
self.label_2.setScaledContents(True)
self.gridLayout_15.addWidget(self.label_2, 0, 1, 1, 1)
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.gridLayout_15.addItem(self.horizontalSpacer, 0, 0, 1, 1)
self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.gridLayout_15.addItem(self.horizontalSpacer_2, 0, 2, 1, 1)
self.gridLayout_24 = QGridLayout()
self.gridLayout_24.setObjectName(u"gridLayout_24")
self.gridLayout_24.setHorizontalSpacing(20)
self.textEdit_35 = QTextEdit(self.home)
self.textEdit_35.setObjectName(u"textEdit_35")
self.textEdit_35.setMaximumSize(QSize(16777215, 16777215))
self.textEdit_35.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.gridLayout_24.addWidget(self.textEdit_35, 0, 1, 1, 1)
self.textEdit_36 = QTextEdit(self.home)
self.textEdit_36.setObjectName(u"textEdit_36")
self.textEdit_36.setMaximumSize(QSize(16777215, 16777215))
self.textEdit_36.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.gridLayout_24.addWidget(self.textEdit_36, 0, 3, 1, 1)
self.textEdit_37 = QTextEdit(self.home)
self.textEdit_37.setObjectName(u"textEdit_37")
self.textEdit_37.setMaximumSize(QSize(16777215, 16777215))
self.textEdit_37.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.gridLayout_24.addWidget(self.textEdit_37, 0, 4, 1, 1)
self.textEdit_38 = QTextEdit(self.home)
self.textEdit_38.setObjectName(u"textEdit_38")
self.textEdit_38.setMaximumSize(QSize(16777215, 16777215))
self.textEdit_38.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.gridLayout_24.addWidget(self.textEdit_38, 0, 0, 1, 1)
self.textEdit_39 = QTextEdit(self.home)
self.textEdit_39.setObjectName(u"textEdit_39")
self.textEdit_39.setMaximumSize(QSize(16777215, 16777215))
self.textEdit_39.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.gridLayout_24.addWidget(self.textEdit_39, 0, 2, 1, 1)
self.gridLayout_15.addLayout(self.gridLayout_24, 1, 0, 1, 3)
self.verticalLayout_5.addLayout(self.gridLayout_15)
self.btn_go_start = QPushButton(self.home)
self.btn_go_start.setObjectName(u"btn_go_start")
sizePolicy.setHeightForWidth(self.btn_go_start.sizePolicy().hasHeightForWidth())
self.btn_go_start.setSizePolicy(sizePolicy)
self.btn_go_start.setMinimumSize(QSize(300, 30))
self.btn_go_start.setMaximumSize(QSize(16777215, 30))
self.btn_go_start.setCursor(QCursor(Qt.PointingHandCursor))
self.btn_go_start.setStyleSheet(u"background-color: rgb(52, 59, 72);")
icon4 = QIcon()
icon4.addFile(u":/icons/images/icons/cil-arrow-circle-right.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_go_start.setIcon(icon4)
self.verticalLayout_5.addWidget(self.btn_go_start)
self.stackedWidget.addWidget(self.home)
self.start = QWidget()
self.start.setObjectName(u"start")
self.start.setStyleSheet(u"b")
self.verticalLayout = QVBoxLayout(self.start)
self.verticalLayout.setSpacing(10)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.gridLayout = QGridLayout()
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setSizeConstraint(QLayout.SetDefaultConstraint)
self.gridLayout.setHorizontalSpacing(20)
self.gridLayout.setVerticalSpacing(6)
self.gridLayout_22 = QGridLayout()
self.gridLayout_22.setSpacing(10)
self.gridLayout_22.setObjectName(u"gridLayout_22")
self.listWidget_8 = QListWidget(self.start)
self.listWidget_8.setObjectName(u"listWidget_8")
self.listWidget_8.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.gridLayout_22.addWidget(self.listWidget_8, 6, 4, 9, 1)
self.checkBox_3 = QCheckBox(self.start)
self.checkBox_3.setObjectName(u"checkBox_3")
font5 = QFont()
font5.setFamilies([u"Ubuntu"])
font5.setPointSize(13)
font5.setBold(False)
font5.setItalic(False)
self.checkBox_3.setFont(font5)
self.checkBox_3.setStyleSheet(u"font: 13pt \"Ubuntu\";")
self.gridLayout_22.addWidget(self.checkBox_3, 15, 3, 1, 1)
self.btn_select_snippet = QPushButton(self.start)
self.btn_select_snippet.setObjectName(u"btn_select_snippet")
self.btn_select_snippet.setMinimumSize(QSize(0, 30))
self.btn_select_snippet.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon5 = QIcon()
icon5.addFile(u":/icons/images/icons/cil-hand-point-up.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_select_snippet.setIcon(icon5)
self.gridLayout_22.addWidget(self.btn_select_snippet, 14, 0, 1, 3)
self.textEdit_24 = QTextEdit(self.start)
self.textEdit_24.setObjectName(u"textEdit_24")
self.textEdit_24.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.gridLayout_22.addWidget(self.textEdit_24, 6, 3, 9, 1)
self.label_54 = QLabel(self.start)
self.label_54.setObjectName(u"label_54")
self.label_54.setStyleSheet(u"")
self.gridLayout_22.addWidget(self.label_54, 5, 0, 1, 3)
self.label_55 = QLabel(self.start)
self.label_55.setObjectName(u"label_55")
self.label_55.setStyleSheet(u"")
self.gridLayout_22.addWidget(self.label_55, 5, 3, 1, 1)
self.gridLayout_50 = QGridLayout()
self.gridLayout_50.setSpacing(10)
self.gridLayout_50.setObjectName(u"gridLayout_50")
self.pushButton_4 = QPushButton(self.start)
self.pushButton_4.setObjectName(u"pushButton_4")
sizePolicy4 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
sizePolicy4.setHorizontalStretch(0)
sizePolicy4.setVerticalStretch(0)
sizePolicy4.setHeightForWidth(self.pushButton_4.sizePolicy().hasHeightForWidth())
self.pushButton_4.setSizePolicy(sizePolicy4)
self.pushButton_4.setMinimumSize(QSize(100, 30))
self.pushButton_4.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon6 = QIcon()
icon6.addFile(u":/icons/images/icons/cil-save.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_4.setIcon(icon6)
self.gridLayout_50.addWidget(self.pushButton_4, 2, 1, 1, 1)
self.test_case_terminal = QTextEdit(self.start)
self.test_case_terminal.setObjectName(u"test_case_terminal")
sizePolicy3.setHeightForWidth(self.test_case_terminal.sizePolicy().hasHeightForWidth())
self.test_case_terminal.setSizePolicy(sizePolicy3)
self.test_case_terminal.setMinimumSize(QSize(0, 30))
self.test_case_terminal.setMaximumSize(QSize(16777215, 100))
self.test_case_terminal.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.test_case_terminal.setLineWrapMode(QTextEdit.NoWrap)
self.test_case_terminal.setReadOnly(True)
self.gridLayout_50.addWidget(self.test_case_terminal, 1, 0, 1, 2)
self.checkBox_15 = QCheckBox(self.start)
self.checkBox_15.setObjectName(u"checkBox_15")
sizePolicy4.setHeightForWidth(self.checkBox_15.sizePolicy().hasHeightForWidth())
self.checkBox_15.setSizePolicy(sizePolicy4)
self.checkBox_15.setMinimumSize(QSize(100, 30))
self.checkBox_15.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon7 = QIcon()
icon7.addFile(u":/icons/images/icons/cil-pencil.png", QSize(), QIcon.Normal, QIcon.Off)
self.checkBox_15.setIcon(icon7)
self.gridLayout_50.addWidget(self.checkBox_15, 2, 0, 1, 1)
self.gridLayout_22.addLayout(self.gridLayout_50, 4, 0, 1, 3)
self.test_case_content = QTextEdit(self.start)
self.test_case_content.setObjectName(u"test_case_content")
self.test_case_content.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.gridLayout_22.addWidget(self.test_case_content, 1, 0, 2, 3)
self.gridLayout_51 = QGridLayout()
self.gridLayout_51.setObjectName(u"gridLayout_51")
self.gridLayout_51.setHorizontalSpacing(10)
self.label_110 = QLabel(self.start)
self.label_110.setObjectName(u"label_110")
self.label_110.setStyleSheet(u"")
self.gridLayout_51.addWidget(self.label_110, 0, 0, 1, 1)
self.btn_open_tc = QPushButton(self.start)
self.btn_open_tc.setObjectName(u"btn_open_tc")
sizePolicy4.setHeightForWidth(self.btn_open_tc.sizePolicy().hasHeightForWidth())
self.btn_open_tc.setSizePolicy(sizePolicy4)
self.btn_open_tc.setMinimumSize(QSize(80, 30))
self.btn_open_tc.setMaximumSize(QSize(80, 30))
self.btn_open_tc.setStyleSheet(u"background-color: rgb(52, 59, 72);")
icon8 = QIcon()
icon8.addFile(u":/icons/images/icons/cil-folder-open.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_open_tc.setIcon(icon8)
self.gridLayout_51.addWidget(self.btn_open_tc, 0, 2, 1, 1)
self.test_case_directory_text = QTextEdit(self.start)
self.test_case_directory_text.setObjectName(u"test_case_directory_text")
self.test_case_directory_text.setMinimumSize(QSize(0, 30))
self.test_case_directory_text.setMaximumSize(QSize(16777215, 30))
self.test_case_directory_text.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.test_case_directory_text.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.test_case_directory_text.setLineWrapMode(QTextEdit.NoWrap)
self.test_case_directory_text.setReadOnly(True)
self.gridLayout_51.addWidget(self.test_case_directory_text, 0, 1, 1, 1)
self.gridLayout_22.addLayout(self.gridLayout_51, 0, 0, 1, 3)
self.label_56 = QLabel(self.start)
self.label_56.setObjectName(u"label_56")
self.label_56.setStyleSheet(u"")
self.gridLayout_22.addWidget(self.label_56, 5, 4, 1, 1)
self.gridLayout_47 = QGridLayout()
self.gridLayout_47.setObjectName(u"gridLayout_47")
self.gridLayout_47.setHorizontalSpacing(10)
self.btn_create_code = QPushButton(self.start)
self.btn_create_code.setObjectName(u"btn_create_code")
sizePolicy5 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy5.setHorizontalStretch(0)
sizePolicy5.setVerticalStretch(0)
sizePolicy5.setHeightForWidth(self.btn_create_code.sizePolicy().hasHeightForWidth())
self.btn_create_code.setSizePolicy(sizePolicy5)
self.btn_create_code.setMinimumSize(QSize(0, 30))
self.btn_create_code.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon9 = QIcon()
icon9.addFile(u":/icons/images/icons/cil-code.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_create_code.setIcon(icon9)
self.gridLayout_47.addWidget(self.btn_create_code, 0, 0, 1, 1)
self.btn_add_custom = QPushButton(self.start)
self.btn_add_custom.setObjectName(u"btn_add_custom")
sizePolicy5.setHeightForWidth(self.btn_add_custom.sizePolicy().hasHeightForWidth())
self.btn_add_custom.setSizePolicy(sizePolicy5)
self.btn_add_custom.setMinimumSize(QSize(0, 30))
self.btn_add_custom.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon10 = QIcon()
icon10.addFile(u":/icons/images/icons/cil-plus.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_add_custom.setIcon(icon10)
self.gridLayout_47.addWidget(self.btn_add_custom, 0, 1, 1, 2)
self.gridLayout_22.addLayout(self.gridLayout_47, 15, 0, 1, 3)
self.gridLayout_52 = QGridLayout()
self.gridLayout_52.setObjectName(u"gridLayout_52")
self.gridLayout_52.setHorizontalSpacing(10)
self.btn_select_workload = QPushButton(self.start)
self.btn_select_workload.setObjectName(u"btn_select_workload")
self.btn_select_workload.setEnabled(True)
sizePolicy4.setHeightForWidth(self.btn_select_workload.sizePolicy().hasHeightForWidth())
self.btn_select_workload.setSizePolicy(sizePolicy4)
self.btn_select_workload.setMinimumSize(QSize(80, 30))
self.btn_select_workload.setMaximumSize(QSize(80, 30))
self.btn_select_workload.setFont(font)
self.btn_select_workload.setCursor(QCursor(Qt.PointingHandCursor))
self.btn_select_workload.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.btn_select_workload.setIcon(icon8)
self.gridLayout_52.addWidget(self.btn_select_workload, 0, 2, 1, 1)
self.label_42 = QLabel(self.start)
self.label_42.setObjectName(u"label_42")
self.label_42.setMaximumSize(QSize(16777215, 30))
self.label_42.setStyleSheet(u"")
self.gridLayout_52.addWidget(self.label_42, 0, 0, 1, 1)
self.textEdit_46 = QTextEdit(self.start)
self.textEdit_46.setObjectName(u"textEdit_46")
self.textEdit_46.setMinimumSize(QSize(0, 30))
self.textEdit_46.setMaximumSize(QSize(16777215, 30))
self.textEdit_46.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.textEdit_46.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.textEdit_46.setLineWrapMode(QTextEdit.NoWrap)
self.textEdit_46.setReadOnly(True)
self.gridLayout_52.addWidget(self.textEdit_46, 0, 1, 1, 1)
self.gridLayout_22.addLayout(self.gridLayout_52, 0, 3, 1, 2)
self.btn_remove_snip = QPushButton(self.start)
self.btn_remove_snip.setObjectName(u"btn_remove_snip")
sizePolicy5.setHeightForWidth(self.btn_remove_snip.sizePolicy().hasHeightForWidth())
self.btn_remove_snip.setSizePolicy(sizePolicy5)
self.btn_remove_snip.setMinimumSize(QSize(0, 30))
self.btn_remove_snip.setMaximumSize(QSize(16777215, 16777215))
self.btn_remove_snip.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon11 = QIcon()
icon11.addFile(u":/icons/images/icons/cil-minus.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_remove_snip.setIcon(icon11)
self.gridLayout_22.addWidget(self.btn_remove_snip, 15, 4, 1, 1)
self.code_snippet_list = QListWidget(self.start)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
QListWidgetItem(self.code_snippet_list)
font6 = QFont()
font6.setPointSize(20)
__qlistwidgetitem = QListWidgetItem(self.code_snippet_list)
__qlistwidgetitem.setFont(font6);
__qlistwidgetitem.setFlags(Qt.NoItemFlags);
self.code_snippet_list.setObjectName(u"code_snippet_list")
sizePolicy.setHeightForWidth(self.code_snippet_list.sizePolicy().hasHeightForWidth())
self.code_snippet_list.setSizePolicy(sizePolicy)
self.code_snippet_list.setMinimumSize(QSize(0, 0))
self.code_snippet_list.setMaximumSize(QSize(16777215, 16777215))
self.code_snippet_list.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.gridLayout_22.addWidget(self.code_snippet_list, 6, 0, 8, 3)
self.gridLayout_49 = QGridLayout()
self.gridLayout_49.setSpacing(10)
self.gridLayout_49.setObjectName(u"gridLayout_49")
self.btn_clear_workload = QPushButton(self.start)
self.btn_clear_workload.setObjectName(u"btn_clear_workload")
sizePolicy4.setHeightForWidth(self.btn_clear_workload.sizePolicy().hasHeightForWidth())
self.btn_clear_workload.setSizePolicy(sizePolicy4)
self.btn_clear_workload.setMinimumSize(QSize(100, 30))
self.btn_clear_workload.setStyleSheet(u"background-color: rgb(52, 59, 72);")
icon12 = QIcon()
icon12.addFile(u":/icons/images/icons/cil-x.png", QSize(), QIcon.Normal, QIcon.Off)
self.btn_clear_workload.setIcon(icon12)
self.gridLayout_49.addWidget(self.btn_clear_workload, 1, 2, 1, 1)
self.checkBox_5 = QCheckBox(self.start)
self.checkBox_5.setObjectName(u"checkBox_5")
sizePolicy4.setHeightForWidth(self.checkBox_5.sizePolicy().hasHeightForWidth())
self.checkBox_5.setSizePolicy(sizePolicy4)
self.checkBox_5.setMinimumSize(QSize(100, 30))
self.checkBox_5.setMaximumSize(QSize(16777215, 16777215))
self.checkBox_5.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.checkBox_5.setIcon(icon7)
self.gridLayout_49.addWidget(self.checkBox_5, 1, 1, 1, 1)
self.btn_create_workload = QPushButton(self.start)
self.btn_create_workload.setObjectName(u"btn_create_workload")
sizePolicy4.setHeightForWidth(self.btn_create_workload.sizePolicy().hasHeightForWidth())
self.btn_create_workload.setSizePolicy(sizePolicy4)
self.btn_create_workload.setMinimumSize(QSize(100, 30))
self.btn_create_workload.setCursor(QCursor(Qt.PointingHandCursor))
self.btn_create_workload.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.btn_create_workload.setIcon(icon10)
self.gridLayout_49.addWidget(self.btn_create_workload, 1, 0, 1, 1)
self.textEdit_3 = QTextEdit(self.start)
self.textEdit_3.setObjectName(u"textEdit_3")
self.textEdit_3.setMinimumSize(QSize(0, 150))
self.textEdit_3.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.textEdit_3.setReadOnly(True)
self.gridLayout_49.addWidget(self.textEdit_3, 0, 0, 1, 3)
self.gridLayout_22.addLayout(self.gridLayout_49, 1, 3, 4, 2)
self.try_test_case = QPushButton(self.start)
self.try_test_case.setObjectName(u"try_test_case")
sizePolicy4.setHeightForWidth(self.try_test_case.sizePolicy().hasHeightForWidth())
self.try_test_case.setSizePolicy(sizePolicy4)
self.try_test_case.setMinimumSize(QSize(30, 0))
self.try_test_case.setMaximumSize(QSize(16777215, 30))
self.try_test_case.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
icon13 = QIcon()
icon13.addFile(u":/icons/images/icons/cil-chevron-double-right.png", QSize(), QIcon.Normal, QIcon.Off)
self.try_test_case.setIcon(icon13)
self.gridLayout_22.addWidget(self.try_test_case, 3, 0, 1, 3)
self.gridLayout.addLayout(self.gridLayout_22, 0, 1, 4, 4)
self.gridLayout_48 = QGridLayout()
self.gridLayout_48.setSpacing(10)
self.gridLayout_48.setObjectName(u"gridLayout_48")
self.source_code_directory_text = QTextEdit(self.start)
self.source_code_directory_text.setObjectName(u"source_code_directory_text")
sizePolicy.setHeightForWidth(self.source_code_directory_text.sizePolicy().hasHeightForWidth())
self.source_code_directory_text.setSizePolicy(sizePolicy)
self.source_code_directory_text.setMinimumSize(QSize(0, 30))
self.source_code_directory_text.setMaximumSize(QSize(16777215, 30))
self.source_code_directory_text.setStyleSheet(u"background-color: rgb(52, 59, 72);\n"
"")
self.source_code_directory_text.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.source_code_directory_text.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.source_code_directory_text.setLineWrapMode(QTextEdit.NoWrap)
self.source_code_directory_text.setReadOnly(True)
self.gridLayout_48.addWidget(self.source_code_directory_text, 1, 1, 1, 1)
self.gridLayout_34 = QGridLayout()
self.gridLayout_34.setObjectName(u"gridLayout_34")
self.checkBox_8 = QCheckBox(self.start)
self.checkBox_8.setObjectName(u"checkBox_8")
sizePolicy4.setHeightForWidth(self.checkBox_8.sizePolicy().hasHeightForWidth())
self.checkBox_8.setSizePolicy(sizePolicy4)
self.checkBox_8.setMinimumSize(QSize(0, 30))
self.checkBox_8.setMaximumSize(QSize(16777215, 30))
self.checkBox_8.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.checkBox_8.setIcon(icon7)
self.gridLayout_34.addWidget(self.checkBox_8, 0, 0, 1, 1)
self.btn_clear_codes = QPushButton(self.start)
self.btn_clear_codes.setObjectName(u"btn_clear_codes")
sizePolicy4.setHeightForWidth(self.btn_clear_codes.sizePolicy().hasHeightForWidth())
self.btn_clear_codes.setSizePolicy(sizePolicy4)
self.btn_clear_codes.setMinimumSize(QSize(0, 30))
self.btn_clear_codes.setMaximumSize(QSize(16777215, 30))
self.btn_clear_codes.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.btn_clear_codes.setIcon(icon12)
self.gridLayout_34.addWidget(self.btn_clear_codes, 0, 1, 1, 1)
self.gridLayout_48.addLayout(self.gridLayout_34, 3, 0, 1, 3)
self.label_9 = QLabel(self.start)
self.label_9.setObjectName(u"label_9")
self.label_9.setMaximumSize(QSize(16777215, 30))
self.label_9.setStyleSheet(u"")
self.gridLayout_48.addWidget(self.label_9, 1, 0, 1, 1)
self.btn_open_folder = QPushButton(self.start)
self.btn_open_folder.setObjectName(u"btn_open_folder")
sizePolicy6 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy6.setHorizontalStretch(0)
sizePolicy6.setVerticalStretch(0)
sizePolicy6.setHeightForWidth(self.btn_open_folder.sizePolicy().hasHeightForWidth())
self.btn_open_folder.setSizePolicy(sizePolicy6)
self.btn_open_folder.setMinimumSize(QSize(80, 30))
self.btn_open_folder.setMaximumSize(QSize(80, 30))
self.btn_open_folder.setFont(font)
self.btn_open_folder.setCursor(QCursor(Qt.PointingHandCursor))
self.btn_open_folder.setStyleSheet(u"background-color: rgb(52, 59, 72);")
self.btn_open_folder.setIcon(icon8)
self.gridLayout_48.addWidget(self.btn_open_folder, 1, 2, 1, | |
heapq.heappop(lNodes)
lVisitedNodes.append((h, tNode))
for plot in plots.surrounding(tNode):
if plotFunction(location(plot)):
if plot in targets: return True
tTuple = (targets.closest_distance(plot), location(plot))
if not tTuple in lVisitedNodes and not tTuple in lNodes:
heapq.heappush(lNodes, tTuple)
return False
def isConnectedByTradeRoute(iPlayer, lStarts, lTargets):
for tStart in lStarts:
startPlot = plot(tStart)
if not startPlot.isCity(): continue
plotFunction = lambda tPlot: plot(tPlot).getOwner() in [iPlayer, startPlot.getOwner()] and (plot(tPlot).isCity() or plot(tPlot).getRouteType() in [iRouteRoad, iRouteRailroad, iRouteRomanRoad, iRouteHighway])
if isConnected(tStart, lTargets, plotFunction): return True
return False
def isConnectedByRailroad(iPlayer, tStart, lTargets):
if not team(iPlayer).isHasTech(iRailroad): return False
startPlot = plot(tStart)
if not (startPlot.isCity() and startPlot.getOwner() == iPlayer): return False
plotFunction = lambda tPlot: plot(tPlot).getOwner() == iPlayer and (plot(tPlot).isCity() or plot(tPlot).getRouteType() == iRouteRailroad)
return isConnected(tStart, lTargets, plotFunction)
def countPlayersWithAttitudeAndCriteria(iPlayer, eAttitude, function):
return players.major().without(iPlayer).where(lambda p: player(iPlayer).canContact(p) and player(p).AI_getAttitude(iPlayer) >= eAttitude and function(p))
def countPlayersWithAttitudeAndReligion(iPlayer, eAttitude, iReligion):
return countPlayersWithAttitudeAndCriteria(iPlayer, iAttitude, lambda p: cities.owner(p).religion(iReligion).any())
def countPlayersWithAttitudeInGroup(iPlayer, eAttitude, lOtherCivs):
return countPlayersWithAttitudeAndCriteria(iPlayer, eAttitude, lambda p: civ(p) in lOtherCivs and not team(p).isAVassal())
def getLargestCities(iPlayer, iNumCities):
return cities.owner(iPlayer).highest(iNumCities, lambda city: city.getPopulation())
def countCitiesOfSize(iPlayer, iThreshold):
return cities.owner(iPlayer).where(lambda city: city.getPopulation() >= iThreshold).count()
def countCitiesWithCultureLevel(iPlayer, iThreshold):
return cities.owner(iPlayer).where(lambda city: city.getCultureLevel() >= iThreshold).count()
def countAcquiredResources(iPlayer, lResources):
iCount = 0
pPlayer = player(iPlayer)
for iBonus in lResources:
iCount += pPlayer.getNumAvailableBonuses(iBonus)
return iCount
def isRoad(iPlayer, lPlots):
for tPlot in lPlots:
plot = plot_(tPlot)
if plot.getOwner() != iPlayer: return False
if not plot.getRouteType() == iRouteRoad and not plot.isCity(): return False
return True
def countCityWonders(iPlayer, (x, y), bIncludeObsolete=False):
iCount = 0
city = city_(x, y)
if not city: return 0
if city.getOwner() != iPlayer: return 0
for iWonder in lWonders:
iObsoleteTech = infos.building(iWonder).getObsoleteTech()
if not bIncludeObsolete and iObsoleteTech != -1 and team(iPlayer).isHasTech(iObsoleteTech): continue
if city.isHasRealBuilding(iWonder):
iCount += 1
return iCount
def isCultureControlled(iPlayer, lPlots):
return all(plot(x, y).getOwner() in [iPlayer, -1] for (x, y) in lPlots)
def controlsCity(iPlayer, tPlot):
return cities.surrounding(tPlot).owner(iPlayer)
def getTotalCulture(lCivs):
return players.civs(*lCivs).sum(lambda p: player(p).countTotalCulture())
def countImprovements(iPlayer, iImprovement):
if iImprovement <= 0: return 0
return player(iPlayer).getImprovementCount(iImprovement)
def controlsAllCities(iPlayer, area):
return area.cities().all(lambda city: city.getOwner() == iPlayer)
def isAtPeace(iPlayer):
return players.major().alive().none(lambda p: team(iPlayer).isAtWar(p))
def getHappiest():
return players.major().alive().maximum(getApprovalRating)
def isHappiest(iPlayer):
return getHappiest() == iPlayer
def getHealthiest():
return players.major().alive().maximum(getLifeExpectancyRating)
def isHealthiest(iPlayer):
return getHealthiest() == iPlayer
def countReligionCities(iPlayer):
return cities.owner(iPlayer).where(lambda city: city.getReligionCount() > 0).count()
def isCompleteTechTree(iPlayer):
if player(iPlayer).getCurrentEra() < iGlobal: return False
tPlayer = team(iPlayer)
for iTech in range(iNumTechs):
if not (tPlayer.isHasTech(iTech) or tPlayer.getTechCount(iTech) > 0): return False
return True
def countFirstDiscovered(iPlayer, iEra):
iCount = 0
for iTech in range(iNumTechs):
if infos.tech(iTech).getEra() == iEra and data.lFirstDiscovered[iTech] == iPlayer:
iCount += 1
return iCount
def isFirstDiscoveredPossible(iPlayer, iEra, iRequired):
iCount = countFirstDiscovered(iPlayer, iEra)
iNotYetDiscovered = countFirstDiscovered(-1, iEra)
return iCount + iNotYetDiscovered >= iRequired
def isWonder(iBuilding):
return iBeginWonders <= iBuilding < iNumBuildings
def countReligionPlayers(iReligion):
iReligionPlayers = 0
iTotalPlayers = 0
for iPlayer in players.major().alive():
pPlayer = player(iPlayer)
iTotalPlayers += 1
if pPlayer.getStateReligion() == iReligion:
iReligionPlayers += 1
return iReligionPlayers, iTotalPlayers
def countCivicPlayers(iCivic):
iCivicPlayers = 0
iTotalPlayers = 0
for iPlayer in players.major().alive():
pPlayer = player(iPlayer)
iTotalPlayers += 1
if has_civic(iPlayer, iCivic):
iCivicPlayers += 1
return iCivicPlayers, iTotalPlayers
def getBestCities(function):
return cities.all().sort(function, True)
def countBestCitiesReligion(iReligion, function, iNumCities):
lCities = getBestCities(function)
iCount = 0
for city in lCities[:iNumCities]:
if city.isHasReligion(iReligion) and player(city).getStateReligion() == iReligion:
iCount += 1
return iCount
def getReligiousLand(iReligion):
fLandPercent = 0.0
for iPlayer in players.major().alive():
pPlayer = player(iPlayer)
if pPlayer.getStateReligion() == iReligion:
fLandPercent += getLandPercent(iPlayer)
return fLandPercent
def countLivingPlayers():
return players.major().alive().count()
def countGoodRelationPlayers(iPlayer, iAttitudeThreshold):
iCount = 0
tPlayer = team(iPlayer)
for iLoopPlayer in players.major().without(iPlayer):
if tPlayer.isHasMet(iLoopPlayer):
if player(iLoopPlayer).AI_getAttitude(iPlayer) >= iAttitudeThreshold:
iCount += 1
return iCount
def countUnitsOfType(iPlayer, lTypes, bIncludeObsolete=False):
iCount = 0
pPlayer = player(iPlayer)
for iUnit in range(iNumUnits):
if bIncludeObsolete or pPlayer.canTrain(iUnit, False, False):
if infos.unit(iUnit).getUnitCombatType() in lTypes:
iUnitClass = infos.unit(iUnit).getUnitClassType()
iCount += pPlayer.getUnitClassCount(iUnitClass)
return iCount
def calculateShrineIncome(iPlayer, iReligion):
if getNumBuildings(iPlayer, iShrine + 4*iReligion) == 0: return 0
iThreshold = 20
if getNumBuildings(iPlayer, iDomeOfTheRock) > 0 and not team(iPlayer).isHasTech(iLiberalism): iThreshold = 40
return min(iThreshold, game.countReligionLevels(iReligion))
def countWorldBuildings(iBuilding):
iCount = 0
for iPlayer in players.major().alive():
iCount += getNumBuildings(iPlayer, unique_building(iPlayer, iBuilding))
return iCount
def countReligionWonders(iPlayer, iReligion):
iCount = 0
for iWonder in lWonders:
if infos.building(iWonder).getPrereqReligion() == iReligion and getNumBuildings(iPlayer, iWonder) > 0:
iCount += 1
return iCount
def countCivicBuildings(iCivic, iBuilding):
iCount = 0
for iPlayer in players.major().alive():
pPlayer = player(iPlayer)
if has_civic(iPlayer, iCivic):
iCount += getNumBuildings(iPlayer, unique_building(iPlayer, iBuilding))
return iCount
def getApostolicVotePercent(iPlayer):
iTotal = 0
for iLoopPlayer in players.major():
iTotal += player(iLoopPlayer).getVotes(16, 1)
if iTotal == 0: return 0.0
return player(iPlayer).getVotes(16, 1) * 100.0 / iTotal
def countNativeCulture(iPlayer, iPercent):
iPlayerCulture = 0
for city in cities.owner(iPlayer):
iCulture = city.getCulture(iPlayer)
iTotal = 0
for iLoopPlayer in players.all().barbarian():
iTotal += city.getCulture(iLoopPlayer)
if iTotal > 0 and iCulture * 100 / iTotal >= iPercent:
iPlayerCulture += iCulture
return iPlayerCulture
def isTradeConnected(iPlayer):
return players.major().without(iPlayer).any(lambda p: player(iPlayer).canContact(p) and player(iPlayer).canTradeNetworkWith(p))
def countUnitsOfLevel(iPlayer, iLevel):
return units.owner(iPlayer).where(lambda unit: unit.getLevel() >= iLevel).count()
def countControlledTerrain(iPlayer, iTerrain):
return plots.all().owner(iPlayer).where(lambda p: p.getTerrainType() == iTerrain).count()
def countControlledFeatures(iPlayer, iFeature, iImprovement):
return plots.all().owner(iPlayer).where(lambda p: p.getFeatureType() == iFeature and p.getImprovementType() == iImprovement).count()
def countControlledPeaks(iPlayer):
return plots.all().owner(iPlayer).where(lambda p: p.isPeak()).count()
def getGlobalTreasury():
iTreasury = 0
for iPlayer in players.major():
iTreasury += player(iPlayer).getGold()
return iTreasury
def countFirstGreatPeople(iPlayer):
return len([iGreatPerson for iGreatPerson in lGreatPeopleUnits if getFirstBorn(iGreatPerson) == iPlayer])
def countReligionSpecialistCities(iPlayer, iReligion, iSpecialist):
return cities.owner(iPlayer).where(lambda city: city.isHasReligion(iReligion) and city.getFreeSpecialistCount(iSpecialist) > 0).count()
def calculateAlliedPercent(iPlayer, function):
pTeam = team(iPlayer)
iAlliedValue = 0
iTotalValue = 0
for iLoopPlayer in players.major().alive():
iValue = function(iLoopPlayer)
iTotalValue += iValue
iMaster = master(iLoopPlayer)
if iLoopPlayer == iPlayer or pTeam.isDefensivePact(player(iLoopPlayer).getTeam()):
iAlliedValue += iValue
elif iMaster and (iMaster == iPlayer or pTeam.isDefensivePact(player(iMaster).getTeam())):
iAlliedValue += iValue
if iTotalValue == 0: return 0
return 100.0 * iAlliedValue / iTotalValue
def calculateAlliedCommercePercent(iPlayer):
return calculateAlliedPercent(iPlayer, lambda x: player(x).calculateTotalCommerce())
def calculateAlliedPowerPercent(iPlayer):
return calculateAlliedPercent(iPlayer, lambda x: player(x).getPower())
def countRegionReligion(iReligion, lRegions):
return cities.regions(*lRegions).religion(iReligion).count()
def findBestCityWith(iPlayer, filter, metric):
return cities.owner(iPlayer).where(filter).maximum(metric)
def countVassals(iPlayer, lCivs=None, iReligion=-1):
return players.vassals(iPlayer).where(lambda p: not lCivs or civ(p) in lCivs).where(lambda p: iReligion < 0 or player(p).getStateReligion() == iReligion).count()
### UHV HELP SCREEN ###
def getIcon(bVal):
if bVal:
return u"%c" %(CyGame().getSymbolID(FontSymbols.SUCCESS_CHAR))
else:
return u"%c" %(CyGame().getSymbolID(FontSymbols.FAILURE_CHAR))
def getURVHelp(iPlayer, iGoal):
pPlayer = player(iPlayer)
iVictoryType = getReligiousVictoryType(iPlayer)
aHelp = []
if checkReligiousGoal(iPlayer, iGoal) == 1:
aHelp.append(getIcon(True) + text("TXT_KEY_VICTORY_GOAL_ACCOMPLISHED"))
return aHelp
elif checkReligiousGoal(iPlayer, iGoal) == 0:
aHelp.append(getIcon(False) + text("TXT_KEY_VICTORY_GOAL_FAILED"))
return aHelp
if iVictoryType == iJudaism:
if iGoal == 0:
iProphets = countReligionSpecialists(iJudaism, iSpecialistGreatProphet)
iScientists = countReligionSpecialists(iJudaism, iSpecialistGreatScientist)
iStatesmen = countReligionSpecialists(iJudaism, iSpecialistGreatStatesman)
aHelp.append(getIcon(iProphets + iScientists + iStatesmen) + text("TXT_KEY_VICTORY_JEWISH_SPECIALISTS", iProphets + iScientists + iStatesmen, 15))
elif iGoal == 1:
holyCity = game.getHolyCity(iJudaism)
aHelp.append(getIcon(holyCity.getOwner() == iPlayer) + text("TXT_KEY_VICTORY_CONTROL_HOLY_CITY", holyCity.getName()) + ' ' + getIcon(holyCity.getCultureLevel() >= 6) + text("TXT_KEY_VICTORY_LEGENDARY_CULTURE_CITY", holyCity.getName()))
elif iGoal == 2:
iFriendlyRelations = countPlayersWithAttitudeAndReligion(iPlayer, AttitudeTypes.ATTITUDE_FRIENDLY, iJudaism)
aHelp.append(getIcon(iFriendlyRelations >= 6) + text("TXT_KEY_VICTORY_FRIENDLY_RELIGION", infos.religion(iJudaism).getAdjectiveKey(), iFriendlyRelations, 6))
elif iVictoryType == iOrthodoxy:
if iGoal == 0:
iOrthodoxCathedrals = getNumBuildings(iPlayer, iOrthodoxCathedral)
aHelp.append(getIcon(iOrthodoxCathedrals >= 4) + text("TXT_KEY_VICTORY_ORTHODOX_CATHEDRALS", iOrthodoxCathedrals, 4))
elif iGoal == 1:
lCultureCities = getBestCities(cityCulture)[:5]
iCultureCities = countBestCitiesReligion(iOrthodoxy, cityCulture, 5)
for city in lCultureCities:
aHelp.append(getIcon(city.isHasReligion(iOrthodoxy) and player(city).getStateReligion() == iOrthodoxy) + city.getName())
elif iGoal == 2:
bNoCatholics = countReligionPlayers(iCatholicism)[0] == 0
aHelp.append(getIcon(bNoCatholics) + text("TXT_KEY_VICTORY_NO_CATHOLICS"))
elif iVictoryType == iCatholicism:
if iGoal == 0:
iPopeTurns = data.iPopeTurns
aHelp.append(getIcon(iPopeTurns >= turns(100)) + text("TXT_KEY_VICTORY_POPE_TURNS", iPopeTurns, turns(100)))
elif iGoal == 1:
bShrine = pPlayer.countNumBuildings(iCatholicShrine) > 0
iSaints = countReligionSpecialists(iCatholicism, iSpecialistGreatProphet)
aHelp.append(getIcon(bShrine) + text("TXT_KEY_BUILDING_CATHOLIC_SHRINE") + ' ' + getIcon(iSaints >= 12) + text("TXT_KEY_VICTORY_CATHOLIC_SAINTS", iSaints, 12))
elif iGoal == 2:
fLandPercent = getReligiousLand(iCatholicism)
aHelp.append(getIcon(fLandPercent >= 50.0) + text("TXT_KEY_VICTORY_CATHOLIC_WORLD_TERRITORY", "%.2f%%" % fLandPercent, '50'))
elif iVictoryType == iProtestantism:
if iGoal == 0:
bCivilLiberties = data.lFirstDiscovered[iCivilLiberties] == iPlayer
bConstitution = data.lFirstDiscovered[iSocialContract] == iPlayer
bEconomics = data.lFirstDiscovered[iEconomics] == iPlayer
aHelp.append(getIcon(bCivilLiberties) + text("TXT_KEY_TECH_CIVIL_LIBERTIES") + ' ' + getIcon(bConstitution) + text("TXT_KEY_TECH_CONSTITUTION") + ' ' + getIcon(bEconomics) + text("TXT_KEY_TECH_ECONOMICS"))
elif iGoal == 1:
iMerchants = countReligionSpecialists(iProtestantism, iSpecialistGreatMerchant)
iEngineers = countReligionSpecialists(iProtestantism, iSpecialistGreatEngineer)
aHelp.append(getIcon(iMerchants >= 5) + text("TXT_KEY_VICTORY_PROTESTANT_MERCHANTS", iMerchants, 5) + ' ' + getIcon(iEngineers >= 5) + text("TXT_KEY_VICTORY_PROTESTANT_ENGINEERS", iEngineers, 5))
elif iGoal == 2:
iProtestantCivs, iTotal = countReligionPlayers(iProtestantism)
iSecularCivs, iTotal = countCivicPlayers(iSecularism)
iNumProtestantCivs = iProtestantCivs + iSecularCivs
aHelp.append(getIcon(2 * iNumProtestantCivs >= iTotal) + text("TXT_KEY_VICTORY_PROTESTANT_CIVS", iNumProtestantCivs, iTotal))
elif iVictoryType == iIslam:
if iGoal == 0:
fReligionPercent = game.calculateReligionPercent(iIslam)
aHelp.append(getIcon(fReligionPercent >= 40.0) + text("TXT_KEY_VICTORY_SPREAD_RELIGION_PERCENT", infos.religion(iIslam).getTextKey(), "%.2f%%" % fReligionPercent, 40))
elif iGoal == 1:
iCount = 0
pHolyCity = game.getHolyCity(iIslam)
for iGreatPerson in lGreatPeople:
iCount += pHolyCity.getFreeSpecialistCount(iGreatPerson)
aHelp.append(getIcon(iCount >= 7) + text("TXT_KEY_VICTORY_CITY_GREAT_PEOPLE", game.getHolyCity(iIslam).getName(), iCount, 7))
elif iGoal == 2:
iCount = 0
for iReligion in range(iNumReligions):
iCount += getNumBuildings(iPlayer, iShrine + 4*iReligion)
aHelp.append(getIcon(iCount >= 5) + text("TXT_KEY_VICTORY_NUM_SHRINES", iCount, 5))
elif iVictoryType == iHinduism:
if iGoal == 0:
iCount = 0
pHolyCity = game.getHolyCity(iHinduism)
for iGreatPerson in lGreatPeople:
if pHolyCity.getFreeSpecialistCount(iGreatPerson) > 0:
iCount += 1
aHelp.append(getIcon(iCount >= 5) + text("TXT_KEY_VICTORY_CITY_DIFFERENT_GREAT_PEOPLE", game.getHolyCity(iHinduism).getName(), iCount, 5))
elif iGoal == 1:
iGoldenAgeTurns = data.iHinduGoldenAgeTurns
aHelp.append(getIcon(iGoldenAgeTurns >= turns(24)) + text("TXT_KEY_VICTORY_GOLDEN_AGE_TURNS", iGoldenAgeTurns, turns(24)))
elif iGoal == 2:
iLargestCities = countBestCitiesReligion(iHinduism, cityPopulation, 5)
aHelp.append(getIcon(iLargestCities >= 5) + text("TXT_KEY_VICTORY_HINDU_LARGEST_CITIES", iLargestCities, 5))
elif iVictoryType == iBuddhism:
if iGoal == 0:
iPeaceTurns = data.iBuddhistPeaceTurns
aHelp.append(getIcon(iPeaceTurns >= turns(100)) + text("TXT_KEY_VICTORY_PEACE_TURNS", iPeaceTurns, turns(100)))
elif iGoal == 1:
iHappinessTurns = data.iBuddhistHappinessTurns
aHelp.append(getIcon(iHappinessTurns >= turns(100)) + text("TXT_KEY_VICTORY_HAPPINESS_TURNS", iHappinessTurns, turns(100)))
elif iGoal == 2:
iGoodRelations = countGoodRelationPlayers(iPlayer, AttitudeTypes.ATTITUDE_CAUTIOUS)
iTotalPlayers = countLivingPlayers()-1
aHelp.append(getIcon(iGoodRelations >= iTotalPlayers) + text("TXT_KEY_VICTORY_CAUTIOUS_OR_BETTER_RELATIONS", iGoodRelations, iTotalPlayers))
elif iVictoryType == iConfucianism:
if iGoal == 0:
iFriendlyCivs = countGoodRelationPlayers(iPlayer, AttitudeTypes.ATTITUDE_FRIENDLY)
aHelp.append(getIcon(iFriendlyCivs >= 5) + text("TXT_KEY_VICTORY_FRIENDLY_CIVS", iFriendlyCivs, 5))
elif iGoal == 1:
holyCity = game.getHolyCity(iConfucianism)
iCount = countCityWonders(iPlayer, (holyCity.getX(), holyCity.getY()), True)
aHelp.append(getIcon(holyCity.getOwner() == iPlayer) + text("TXT_KEY_VICTORY_CONTROL_HOLY_CITY", holyCity.getName()) + ' ' + getIcon(iCount >= 5) + text("TXT_KEY_VICTORY_HOLY_CITY_WONDERS", holyCity.getName(), iCount, 5))
elif iGoal == 2:
iUnitCombatMelee = infos.type('UNITCOMBAT_MELEE')
iUnitCombatGunpowder = infos.type('UNITCOMBAT_GUN')
iCount = countUnitsOfType(iPlayer, [iUnitCombatMelee, iUnitCombatGunpowder])
aHelp.append(getIcon(iCount >= 200) + text("TXT_KEY_VICTORY_CONTROL_NUM_UNITS", iCount, 200))
elif iVictoryType == iTaoism:
if iGoal == 0:
iHealthTurns = data.iTaoistHealthTurns
aHelp.append(getIcon(iHealthTurns >= turns(100)) + text("TXT_KEY_VICTORY_HEALTH_TURNS", iHealthTurns, turns(100)))
elif iGoal == 1:
bConfucianShrine = getNumBuildings(iPlayer, iConfucianShrine) > 0
bTaoistShrine = getNumBuildings(iPlayer, iTaoistShrine) > 0
iConfucianIncome = calculateShrineIncome(iPlayer, iConfucianism)
iTaoistIncome = calculateShrineIncome(iPlayer, iTaoism)
aHelp.append(getIcon(bConfucianShrine) + text("TXT_KEY_BUILDING_CONFUCIAN_SHRINE") + ' ' + getIcon(bTaoistShrine) + text("TXT_KEY_BUILDING_TAOIST_SHRINE") + ' ' + getIcon(iConfucianIncome + iTaoistIncome >= 40) + text("TXT_KEY_VICTORY_CHINESE_SHRINE_INCOME", iConfucianIncome + iTaoistIncome, 40))
elif iGoal == 2:
holyCity = game.getHolyCity(iTaoism)
aHelp.append(getIcon(holyCity.getOwner() == iPlayer) + text("TXT_KEY_VICTORY_CONTROL_HOLY_CITY", holyCity.getName()) + ' ' + getIcon(holyCity.getCultureLevel() >= 6) + text("TXT_KEY_VICTORY_LEGENDARY_CULTURE_CITY", holyCity.getName()))
elif iVictoryType == iZoroastrianism:
if iGoal == 0:
iNumIncense = pPlayer.getNumAvailableBonuses(iIncense)
aHelp.append(getIcon(iNumIncense >= 6) + text("TXT_KEY_VICTORY_AVAILABLE_INCENSE_RESOURCES", iNumIncense, 6))
elif iGoal == 1:
fReligionPercent = game.calculateReligionPercent(iZoroastrianism)
aHelp.append(getIcon(fReligionPercent >= 10.0) + text("TXT_KEY_VICTORY_SPREAD_RELIGION_PERCENT", infos.religion(iZoroastrianism).getTextKey(), "%.2f%%" % fReligionPercent, 10))
elif iGoal == 2:
holyCity = game.getHolyCity(iZoroastrianism)
aHelp.append(getIcon(holyCity.getOwner() | |
the serial port
:rtype: str
**Command**
+----------+------------+------------+----------------------------------------------------------------------------------+
| Byte 1 | Byte 2-3 | Byte 4-5 | Byte 6 | Byte 7 |
+==========+============+============+==================================================================================+
| I | 00 | KX | 0 | \* |
+----------+------------+------------+----------------------------------------------------------------------------------+
|
| **Command breakdown**
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte | Setting | Description |
+==============+============================+==========================================================================================+
| Byte 1 | I=Instant Command | Sets command to either Instant or Buffer. |
| | B=Buffer Command | |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 2-3 | 0-99 | Optional Command ID |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 4-5 | | KX = Set Limit Switch X | Set Limit Switch or Emergency Stop Enable/Disable |
| | | KY = Set Limit Switch Y | |
| | | KZ = Set Limit Switch Z | |
| | | KE = Set Limit Switch E | |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 6 | 0-1 | | Switches selected Limit Switch to Enable/Disable |
| | | | 0=Disable |
| | | | 1=Enable |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 7 | \* | End of Command |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
|
| **Reply**
The PTHAT will send back a reply when it receives a command and also when it has completed a command.
If the Command sent ID number was set for bytes 2-3, then this will be returned in the reply.
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+---------------+---------------+---------------+
| | X Limit | | Y Limit | | Z Limit | | E Limit | | Emergency | | X Limit | | Y Limit | | Z Limit | | E Limit | | Emergency |
| | Received | | Received | | Received | | Received | | Stop | | Completed | | Completed | | Completed | | Completed | | Stop |
| | | | | | Received | | | | | | Completed |
+==============+==============+==============+==============+===============+===============+===============+===============+===============+===============+
| R00KX* | R00KY* | R00KZ* | R00KE* | R00KS* | C00KX* | C00KY* | C00KZ* | C00KE* | C00KS* |
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+---------------+---------------+---------------+
"""
if not self._validate_command():
return False
command = f"{self.command_type}{self.command_id:02}{self.__enable_disable_limit_switches_command}{self.axis}0" \
f"{self._command_end}"
if self.debug:
print(f"disable_limit_switches command: {command}")
if self.auto_send_command:
self.send_command(command=command)
return command
def enable_emergency_stop(self):
"""
When this request is sent, it will Disable Limit Switch or Emergency Stop inputs. A reset on the PTHAT
will set them to default of Disable
:returns: the command to send to the serial port
:rtype: str
**Command**
+----------+------------+------------+----------------------------------------------------------------------------------+
| Byte 1 | Byte 2-3 | Byte 4-5 | Byte 6 | Byte 7 |
+==========+============+============+==================================================================================+
| I | 00 | KS | 1 | \* |
+----------+------------+------------+----------------------------------------------------------------------------------+
|
| **Command breakdown**
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte | Setting | Description |
+==============+============================+==========================================================================================+
| Byte 1 | I=Instant Command | Sets command to either Instant or Buffer. |
| | B=Buffer Command | |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 2-3 | 0-99 | Optional Command ID |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 4-5 | | KX = Set Limit Switch X | Set Limit Switch or Emergency Stop Enable/Disable |
| | | KY = Set Limit Switch Y | |
| | | KZ = Set Limit Switch Z | |
| | | KE = Set Limit Switch E | |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 6 | 0-1 | | Switches selected Limit Switch to Enable/Disable |
| | | | 0=Disable |
| | | | 1=Enable |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 7 | \* | End of Command |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
|
| **Reply**
The PTHAT will send back a reply when it receives a command and also when it has completed a command.
If the Command sent ID number was set for bytes 2-3, then this will be returned in the reply.
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+---------------+---------------+---------------+
| | X Limit | | Y Limit | | Z Limit | | E Limit | | Emergency | | X Limit | | Y Limit | | Z Limit | | E Limit | | Emergency |
| | Received | | Received | | Received | | Received | | Stop | | Completed | | Completed | | Completed | | Completed | | Stop |
| | | | | | Received | | | | | | Completed |
+==============+==============+==============+==============+===============+===============+===============+===============+===============+===============+
| R00KX* | R00KY* | R00KZ* | R00KE* | R00KS* | C00KX* | C00KY* | C00KZ* | C00KE* | C00KS* |
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+---------------+---------------+---------------+
"""
if not self._validate_command():
return False
command = f"{self.command_type}{self.command_id:02}{self.__enable_disable_limit_switches_command}S1" \
f"{self._command_end}"
if self.debug:
print(f"enable_emergency_stop command: {command}")
if self.auto_send_command:
self.send_command(command=command)
return command
def disable_emergency_stop(self):
"""
When this request is sent, it will Disable Limit Switch or Emergency Stop inputs. A reset on the PTHAT
will set them to default of Disable
:returns: the command to send to the serial port
:rtype: str
**Command**
+----------+------------+------------+----------------------------------------------------------------------------------+
| Byte 1 | Byte 2-3 | Byte 4-5 | Byte 6 | Byte 7 |
+==========+============+============+==================================================================================+
| I | 00 | KS | 0 | \* |
+----------+------------+------------+----------------------------------------------------------------------------------+
|
| **Command breakdown**
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte | Setting | Description |
+==============+============================+==========================================================================================+
| Byte 1 | I=Instant Command | Sets command to either Instant or Buffer. |
| | B=Buffer Command | |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 2-3 | 0-99 | Optional Command ID |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 4-5 | | KX = Set Limit Switch X | Set Limit Switch or Emergency Stop Enable/Disable |
| | | KY = Set Limit Switch Y | |
| | | KZ = Set Limit Switch Z | |
| | | KE = Set Limit Switch E | |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 6 | 0-1 | | Switches selected Limit Switch to Enable/Disable |
| | | | 0=Disable |
| | | | 1=Enable |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
| Byte 7 | \* | End of Command |
+--------------+----------------------------+------------------------------------------------------------------------------------------+
|
| **Reply**
The PTHAT will send back a reply when it receives a command and also when it has completed a command.
If the Command sent ID number was set for bytes 2-3, then this will be returned in the reply.
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+---------------+---------------+---------------+
| | X Limit | | Y Limit | | Z Limit | | E Limit | | Emergency | | X Limit | | Y Limit | | Z Limit | | E Limit | | Emergency |
| | Received | | Received | | Received | | Received | | Stop | | Completed | | Completed | | Completed | | Completed | | Stop |
| | | | | | Received | | | | | | Completed |
+==============+==============+==============+==============+===============+===============+===============+===============+===============+===============+
| R00KX* | R00KY* | R00KZ* | R00KE* | R00KS* | C00KX* | C00KY* | C00KZ* | C00KE* | C00KS* |
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+---------------+---------------+---------------+
"""
if not self._validate_command():
return False
command = f"{self.command_type}{self.command_id:02}{self.__enable_disable_limit_switches_command}S0" \
f"{self._command_end}"
if self.debug:
print(f"disable_emergency_stop command: {command}")
if self.auto_send_command:
self.send_command(command=command)
return command
def reset(self):
"""
Call reset on the parent class and then reset all the variables
"""
super().reset()
self.frequency = 0.0
self.pulse_count = 0
self.direction = 0
self.start_ramp = 0
self.finish_ramp = 0
self.ramp_divide = 0
self.ramp_pause = 0
self.link_to_adc = 0
self.enable_line_polarity = 0
self.pulse_count_change_direction = 0
self.pulse_counts_sent_back = 0
self.enable_disable_x_pulse_count_replies = 1
self.enable_disable_y_pulse_count_replies = 0
self.enable_disable_z_pulse_count_replies = 0
self.enable_disable_e_pulse_count_replies = 0
self.pause_all_return_x_pulse_count = 0
self.pause_all_return_y_pulse_count = 0
self.pause_all_return_z_pulse_count = 0
self.pause_all_return_e_pulse_count = 0
self.__paused = False
self.__started = False
def _validate_command(self):
"""
Validate command settings that are the same for every axis command
:returns: true if the command settings are valid, otherwise false
:rtype: bool
"""
if not self.axis == "X" and not self.axis == "Y" and not self.axis == "Z" and not self.axis == "E":
if self.debug:
print(f"Invalid axis {self.axis}")
return False
return super()._validate_command()
class ADC(PTHat):
"""
.. class:: ADC
This is an ADC class containing info about an ADC. It inherits from PTHat so contains all functionality
needed to communicate with the PTHat via the serial interface.
:param adc_number: ADC | |
The video is not in VAL so in contexts that do and don't allow cache misses we should always get a fallback
result = self.get_result(allow_cache_miss)
self.verify_result_with_fallback_and_youtube(result)
@ddt.data(
({}, '', [], ['en']),
({}, '', ['de'], ['de']),
({}, '', ['en', 'de'], ['en', 'de']),
({}, 'en-subs', ['de'], ['en', 'de']),
({'uk': 1}, 'en-subs', ['de'], ['en', 'uk', 'de']),
({'uk': 1, 'de': 1}, 'en-subs', ['de', 'en'], ['en', 'uk', 'de']),
)
@ddt.unpack
@patch('xmodule.video_module.transcripts_utils.edxval_api.get_available_transcript_languages')
def test_student_view_with_val_transcripts_enabled(self, transcripts, english_sub, val_transcripts,
expected_transcripts, mock_get_transcript_languages):
"""
Test `student_view_data` with edx-val transcripts enabled.
"""
mock_get_transcript_languages.return_value = val_transcripts
self.video.transcripts = transcripts
self.video.sub = english_sub
student_view_response = self.get_result()
self.assertCountEqual(list(student_view_response['transcripts'].keys()), expected_transcripts)
@ddt.ddt
class VideoBlockTest(TestCase, VideoBlockTestBase):
"""
Tests for video descriptor that requires access to django settings.
"""
def setUp(self):
super().setUp()
self.descriptor.runtime.handler_url = MagicMock()
self.descriptor.runtime.course_id = MagicMock()
self.temp_dir = mkdtemp()
file_system = OSFS(self.temp_dir)
self.file_system = file_system.makedir(EXPORT_IMPORT_COURSE_DIR, recreate=True)
self.addCleanup(shutil.rmtree, self.temp_dir)
def get_video_transcript_data(self, video_id, language_code='en', file_format='srt', provider='Custom'):
return dict(
video_id=video_id,
language_code=language_code,
provider=provider,
file_format=file_format,
)
def test_get_context(self):
""""
Test get_context.
This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields.
Which, in turn, uses settings.LANGUAGES from django setttings.
"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
# Assert that the Video ID field is present in basic tab metadata context.
assert rendered_context['transcripts_basic_tab_metadata']['edx_video_id'] ==\
self.descriptor.editable_metadata_fields['edx_video_id']
def test_export_val_data_with_internal(self):
"""
Tests that exported VAL videos are working as expected.
"""
language_code = 'ar'
transcript_file_name = 'test_edx_video_id-ar.srt'
expected_transcript_path = combine(
combine(self.temp_dir, EXPORT_IMPORT_COURSE_DIR),
combine(EXPORT_IMPORT_STATIC_DIR, transcript_file_name)
)
self.descriptor.edx_video_id = 'test_edx_video_id'
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111.0,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
create_or_update_video_transcript(
video_id=self.descriptor.edx_video_id,
language_code=language_code,
metadata={
'provider': 'Cielo24',
'file_format': 'srt'
},
file_data=ContentFile(TRANSCRIPT_FILE_SRT_DATA)
)
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = """
<video url_name="SampleProblem" transcripts='{transcripts}'>
<video_asset client_video_id="test_client_video_id" duration="111.0" image="">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
<transcripts>
<transcript file_format="srt" language_code="{language_code}" provider="Cielo24"/>
</transcripts>
</video_asset>
<transcript language="{language_code}" src="{transcript_file}"/>
</video>
""".format(
language_code=language_code,
transcript_file=transcript_file_name,
transcripts=json.dumps({language_code: transcript_file_name})
)
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
# Verify transcript file is created.
assert [transcript_file_name] == self.file_system.listdir(EXPORT_IMPORT_STATIC_DIR)
# Also verify the content of created transcript file.
expected_transcript_content = File(open(expected_transcript_path)).read()
transcript = get_video_transcript_data(video_id=self.descriptor.edx_video_id, language_code=language_code)
assert transcript['content'].decode('utf-8') == expected_transcript_content
@ddt.data(
(['en', 'da'], 'test_sub', ''),
(['da'], 'test_sub', 'test_sub')
)
@ddt.unpack
def test_export_val_transcripts_backward_compatibility(self, languages, sub, expected_sub):
"""
Tests new transcripts export for backward compatibility.
"""
self.descriptor.edx_video_id = 'test_video_id'
self.descriptor.sub = sub
# Setup VAL encode profile, video and transcripts
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111.0,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
for language in languages:
create_video_transcript(
video_id=self.descriptor.edx_video_id,
language_code=language,
file_format=Transcript.SRT,
content=ContentFile(TRANSCRIPT_FILE_SRT_DATA)
)
# Export the video module into xml
video_xml = self.descriptor.definition_to_xml(resource_fs=self.file_system)
# Assert `sub` and `transcripts` attribute in the xml
assert video_xml.get('sub') == expected_sub
expected_transcripts = {
language: "{edx_video_id}-{language}.srt".format(
edx_video_id=self.descriptor.edx_video_id,
language=language
)
for language in languages
}
self.assertDictEqual(json.loads(video_xml.get('transcripts')), expected_transcripts)
# Assert transcript content from course OLX
for language in languages:
expected_transcript_path = combine(
combine(self.temp_dir, EXPORT_IMPORT_COURSE_DIR),
combine(EXPORT_IMPORT_STATIC_DIR, expected_transcripts[language])
)
expected_transcript_content = File(open(expected_transcript_path)).read()
transcript = get_video_transcript_data(video_id=self.descriptor.edx_video_id, language_code=language)
assert transcript['content'].decode('utf-8') == expected_transcript_content
def test_export_val_data_not_found(self):
"""
Tests that external video export works as expected.
"""
self.descriptor.edx_video_id = 'nonexistent'
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = """<video url_name="SampleProblem"/>"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
@patch('xmodule.video_module.transcripts_utils.get_video_ids_info')
def test_export_no_video_ids(self, mock_get_video_ids_info):
"""
Tests export when there is no video id. `export_to_xml` only works in case of video id.
"""
mock_get_video_ids_info.return_value = True, []
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = '<video url_name="SampleProblem"></video>'
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_import_val_data_internal(self):
"""
Test that import val data internal works as expected.
"""
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
edx_video_id = 'test_edx_video_id'
sub_id = '0CzPOIIdUsA'
external_transcript_name = 'The_Flash.srt'
external_transcript_language_code = 'ur'
val_transcript_language_code = 'ar'
val_transcript_provider = 'Cielo24'
external_transcripts = {
external_transcript_language_code: external_transcript_name
}
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Create VAL transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'test_edx_video_id-ar.srt',
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
# Create self.sub and self.transcripts transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
subs_filename(sub_id, self.descriptor.transcript_language),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
external_transcript_name,
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data = """
<video edx_video_id='{edx_video_id}' sub='{sub_id}' transcripts='{transcripts}'>
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
<transcripts>
<transcript file_format="srt" language_code="{val_transcript_language_code}" provider="{val_transcript_provider}"/>
</transcripts>
</video_asset>
</video>
""".format(
edx_video_id=edx_video_id,
sub_id=sub_id,
transcripts=json.dumps(external_transcripts),
val_transcript_language_code=val_transcript_language_code,
val_transcript_provider=val_transcript_provider
)
id_generator = Mock()
id_generator.target_course_id = "test_course_id"
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
assert video.edx_video_id == 'test_edx_video_id'
video_data = get_video_info(video.edx_video_id)
assert video_data['client_video_id'] == 'test_client_video_id'
assert video_data['duration'] == 111.0
assert video_data['status'] == 'imported'
assert video_data['courses'] == [{id_generator.target_course_id: None}]
assert video_data['encoded_videos'][0]['profile'] == 'mobile'
assert video_data['encoded_videos'][0]['url'] == 'http://example.com/video'
assert video_data['encoded_videos'][0]['file_size'] == 222
assert video_data['encoded_videos'][0]['bitrate'] == 333
# Verify that VAL transcript is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=val_transcript_language_code,
provider=val_transcript_provider
),
get_video_transcript(video.edx_video_id, val_transcript_language_code)
)
# Verify that transcript from sub field is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=self.descriptor.transcript_language
),
get_video_transcript(video.edx_video_id, self.descriptor.transcript_language)
)
# Verify that transcript from transcript field is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=external_transcript_language_code
),
get_video_transcript(video.edx_video_id, external_transcript_language_code)
)
def test_import_no_video_id(self):
"""
Test that importing a video with no video id, creates a new external video.
"""
xml_data = """<video><video_asset></video_asset></video>"""
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Verify edx_video_id is empty before.
assert self.descriptor.edx_video_id == ''
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is populated after the import.
assert video.edx_video_id != ''
video_data = get_video_info(video.edx_video_id)
assert video_data['client_video_id'] == 'External Video'
assert video_data['duration'] == 0.0
assert video_data['status'] == 'external'
def test_import_val_transcript(self):
"""
Test that importing a video with val transcript, creates a new transcript record.
"""
edx_video_id = 'test_edx_video_id'
val_transcript_language_code = 'es'
val_transcript_provider = 'Cielo24'
xml_data = """
<video edx_video_id='{edx_video_id}'>
<video_asset client_video_id="test_client_video_id" duration="111.0">
<transcripts>
<transcript file_format="srt" language_code="{val_transcript_language_code}" provider="{val_transcript_provider}"/>
</transcripts>
</video_asset>
</video>
""".format(
edx_video_id=edx_video_id,
val_transcript_language_code=val_transcript_language_code,
val_transcript_provider=val_transcript_provider
)
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Create VAL transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'test_edx_video_id-es.srt',
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
# Verify edx_video_id is empty before.
assert self.descriptor.edx_video_id == ''
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is populated after the import.
assert video.edx_video_id != ''
video_data = get_video_info(video.edx_video_id)
assert video_data['status'] == 'external'
# Verify that VAL transcript is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=val_transcript_language_code,
provider=val_transcript_provider
),
get_video_transcript(video.edx_video_id, val_transcript_language_code)
)
@ddt.data(
(
'test_sub_id',
{'en': 'The_Flash.srt'},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'',
{'en': 'The_Flash.srt'},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'test_sub_id',
{},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'test_sub_id',
{'en': 'The_Flash.srt'},
'',
# self.sub transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'sjson',
'provider': 'Custom'
}
),
(
'',
{'en': 'The_Flash.srt'},
'',
# self.transcripts would be saved.
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Custom'
}
)
)
@ddt.unpack
def test_import_val_transcript_priority(self, sub_id, external_transcripts, val_transcripts, expected_transcript):
"""
Test that importing a video with different type of transcripts for same language,
creates expected transcript record.
"""
edx_video_id = 'test_edx_video_id'
language_code = 'en'
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
xml_data = "<video edx_video_id='test_edx_video_id'"
# Prepare self.sub transcript data.
if sub_id:
create_file_in_fs(
TRANSCRIPT_FILE_SJSON_DATA,
subs_filename(sub_id, language_code),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += " sub='{sub_id}'".format(
sub_id=sub_id
)
# Prepare self.transcripts transcripts data.
if external_transcripts:
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
external_transcripts['en'],
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += " transcripts='{transcripts}'".format(
transcripts=json.dumps(external_transcripts),
)
xml_data += '><video_asset client_video_id="test_client_video_id" duration="111.0">'
# Prepare VAL transcripts data.
if val_transcripts:
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'{edx_video_id}-{language_code}.srt'.format(
edx_video_id=edx_video_id,
language_code=language_code
),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += val_transcripts
xml_data += '</video_asset></video>'
# Verify edx_video_id is empty before import.
assert self.descriptor.edx_video_id == ''
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is not empty after import.
assert video.edx_video_id != ''
video_data = get_video_info(video.edx_video_id)
assert video_data['status'] == 'external'
# Verify that correct transcripts are imported.
self.assertDictContainsSubset(
expected_transcript,
get_video_transcript(video.edx_video_id, language_code)
)
def test_import_val_data_invalid(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
# Negative file_size is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
| |
with different weights. Conceptually it
computes:
output= [f(theta[i], input[i]) for i in range(p.repeat)]
where f is the fprop function of the sublayer, theta[i] and input[i] are
the weights and inputs of the i-th sublayer.
"""
def _InferOutShapes(self, args):
input_shapes = [
None if arg is None else tshape.Shape(arg.get_shape().as_list()[1:])
for arg in args
]
out_shapes = self.body.FPropMeta(self.body.params, *input_shapes).out_shapes
return [None if s is None else s.ToTensorShape() for s in out_shapes]
def FProp(self, theta, *args):
"""Runs p.repeat copies of self.body.FProp independently.
Args:
theta: Layer model parameters. The shape of each variable in theta is
always [p.repeat, ...]. And the i-th slice theta[i] becomes theta of the
i-th copy of self.body.
*args: Input arguments. The shape of each tensor in args is always
[p.repeat, ....]. And the list [arg[i] for arg in args] becomes inputs
to the i-th copy of self.body.FProp.
Returns:
The accumulated output_tensors. Each tensor t in the return has the shape
[p.repeat, ....] and the tuple (t[i] for i in output_tensors) is the
return tuple of the i-th self.body.FProp.
"""
p = self.params
for arg in args:
if arg is not None:
arg = py_utils.HasShape(arg, [p.repeat], ndims=1)
theta_stack = _MaybeStackExtraTheta(theta.body, self.body.vars, p.repeat)
inputs = py_utils.NestedMap(theta=theta_stack, args=list(args))
# Infer out_shapes from FPropMeta.
out_shapes = self._InferOutShapes(args)
def _CellFn(unused_theta, unused_state0, inputs):
"""Recurrent cell function wrapper of body.FProp."""
# Sets shapes for both theta and inputs to self.body.FProp.
for dst, src in zip(inputs.args + inputs.theta.Flatten(),
list(args) + theta_stack.Flatten()):
if src is not None:
dst.set_shape(tf.TensorShape(src.shape.as_list()[1:]))
# Runs the actual body.FProp
fprop_outputs = self.body.FProp(inputs.theta, *inputs.args)
fprop_outputs = _ToTuple(fprop_outputs)
assert len(fprop_outputs) == len(out_shapes)
# Passes fprop outputs to the next layer through state.
state1 = py_utils.NestedMap(outputs=list(fprop_outputs))
return state1, py_utils.NestedMap()
with tf.name_scope(p.name):
# Initiate state0 with inferred output shapes.
state0 = py_utils.NestedMap(
outputs=[tf.zeros(shape, args[0].dtype) for shape in out_shapes])
# Runs body.FProp p.repeat times using Recurrent.
acc_states, _ = recurrent.Recurrent(
theta=py_utils.NestedMap(),
state0=state0,
inputs=inputs,
cell_fn=_CellFn)
# Retrieves fprop outputs from state1 and sets shapes.
output_tensors = tuple(acc_states.outputs)
for out_idx in range(len(output_tensors)):
output_tensors[out_idx].set_shape(
tf.TensorShape([p.repeat] + out_shapes[out_idx].as_list()))
return output_tensors[0] if len(args) == 1 else tuple(output_tensors)
@classmethod
def FPropMeta(cls, p, *args):
py_utils.CheckShapes(args)
input_shapes = [
None if arg is None else tshape.Shape(arg.get_shape().as_list()[1:])
for arg in args
]
meta = p.body.cls.FPropMeta(p.body, *input_shapes)
py_utils.CheckShapes(meta.out_shapes)
total = meta.flops * p.repeat
out_shapes = [
None if s is None else tshape.Shape([p.repeat] + s[:])
for s in meta.out_shapes
]
return py_utils.NestedMap(flops=total, out_shapes=tuple(out_shapes))
class SequentialLayer(base_layer.BaseLayer):
"""A layer which connects a few layers in a sequence."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('sub', [], 'A list of layers\' params.')
p.Define('repeat', 1, 'Repeat layers specified in \'sub\' '
'this many times.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
if p.repeat <= 1:
self._seq = []
for sub in p.sub:
self.CreateChild(sub.name, sub)
self._seq.append((sub.name, self.children[sub.name]))
else:
# We create 'repeat' number of sub layers. Each sub layer is a
# sequential layer specified by 'sub'. This allows us to name each
# repetition with a unique name.
children = []
for i in range(p.repeat):
children.append(p.Copy().Set(name='%03d' % i, repeat=1))
self.CreateChildren('rep', children)
def FProp(self, theta, *args):
p = self.params
with tf.name_scope(p.name):
tf.logging.vlog(1, 'layer %s', self.params.name)
if p.repeat <= 1:
for (name, ch) in self._seq:
th = theta[name]
args = _ToTuple(args)
tf.logging.vlog(1, 'SequentialLayer: call %s %s %d %s',
ch.params.name, ch, len(args), str(args))
args = ch.FProp(th, *args)
else:
for (ch, th) in zip(self.rep, theta.rep):
args = _ToTuple(args)
tf.logging.vlog(1, ' call %s %s %d %s', ch.params.name, ch,
len(args), str(args))
args = ch.FProp(th, *args)
args = _ToTuple(args)
return args[0] if len(args) == 1 else args
@classmethod
def FPropMeta(cls, p, *args):
py_utils.CheckShapes(args)
total = 0
for _ in range(p.repeat):
for sub in p.sub:
tf.logging.vlog(1, ' seq abs fprop %s %s %d %s', sub.name, sub.cls,
len(args), str(args))
meta = sub.cls.FPropMeta(sub, *args)
py_utils.CheckShapes(meta.out_shapes)
total += meta.flops
args = meta.out_shapes
return py_utils.NestedMap(flops=total, out_shapes=args)
class UnarySequentialLayer(base_layer.BaseLayer):
"""A layer which connects a few layers in a sequence.
Each layer FProp must take a single input arg (besides theta) and its return
value will be used as the input for the next layer or as the final output
if it's the last layer.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('sub', [], 'A list of layers\' params.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
self._seq = []
for sub in p.sub:
self.CreateChild(sub.name, sub)
self._seq.append((sub.name, self.children[sub.name]))
def FProp(self, theta, x):
tf.logging.vlog(1, 'layer %s', self.params.name)
with tf.name_scope(self.params.name):
for (name, ch) in self._seq:
th = theta[name]
tf.logging.vlog(1, ' call %s %s %s', ch.params.name, ch, x)
x = ch.FProp(th, x)
return x
@classmethod
def FPropMeta(cls, p, x):
total = 0
for sub in p.sub:
tf.logging.vlog(1, ' seq abs fprop %s %s %s', sub.name, sub.cls, x)
meta = sub.cls.FPropMeta(sub, x)
total += meta.flops
x = meta.out_shapes
return py_utils.NestedMap(flops=total, out_shapes=x)
class GraphTensors:
"""A collection of named tensors (or NestedMaps of tensors)."""
def __init__(self):
self._named_tensors = py_utils.NestedMap()
def StoreTensor(self, path, tensor):
"""Add tensor 't' to 'named_tensors' at 'path'.
A path may be a name or a path into a NestedMap. For instance,
StoreTensor('a.b.c', [1]), is equivalent to this:
{'a', {'b': {'c': [1]}}.
NestedMaps will be created if they do not already exist, or modified if they
do exist. However, tensors cannot be overwritten.
Args:
path: A path input a NestedMap.
tensor: The item to store (may be a NestedMap or a tensor).
"""
names = path.strip().split('.')
named_tensors = self._named_tensors
while len(names) > 1:
n = names.pop(0)
assert isinstance(named_tensors, py_utils.NestedMap), named_tensors
if n not in named_tensors:
named_tensors[n] = py_utils.NestedMap()
named_tensors = named_tensors[n]
n = names.pop(0)
if n in named_tensors:
raise ValueError('A tensor named "%s" (%s) already exists.' % (n, path))
named_tensors[n] = tensor
def GetTensor(self, path):
"""Returns the tensor at 'path' in 'named_tensors'.
Path may be a NestedMap key or a path through a series of NestedMaps.
For instance, a path of 'a.b.c' could be used to retrieve [1] from
this structure:
{'a': {'b': {'c': [1]}}}
Args:
path: A path through a series of NestedMaps.
"""
names = path.strip().split('.')
named_tensors = self._named_tensors
while names:
assert isinstance(named_tensors, py_utils.NestedMap), named_tensors
n = names.pop(0)
assert n in named_tensors, '%s not found in %s' % (n, named_tensors)
named_tensors = named_tensors[n]
return named_tensors
class GraphSignature:
"""Represents the input/output signature of a GraphLayer.
A signature is of the form:
input->output
or, optionally just an input:
input
The output part may be:
out1 (a tensor name)
out1.a.b (a path into a NestedMap)
out1,myvar,out2.x (or a sequence of these things)
The input part may be:
in1 (a tensor name)
in1.b (a path into a NestedMap)
[in1,myvar.c] (a list)
(a=in1,b=in2) (a NestedMap, using the syntax of the NestedMap constructor)
in1,[a,b] (or a sequence of these things)
In BNF::
key := [A-Za-z_][A-Za-z0-9_]*
path := key | path '.' key
path_seq := <empty> | path | path_seq ',' path
item := path | list | map
list := '[' item_seq ']'
item_seq := <empty> | item | item_seq ',' item
map := '(' map_pair_seq ')'
map_pair := key '=' item
map_pair_seq := <empty> | map_pair | map_pair_seq ',' map_pair
input := item_seq
output := path_seq
"""
def __init__(self, signature):
self.signature = signature
self.symbols = set(['[', ']', '=', '(', ')', ','])
parts = self.signature.split('->')
self._input_signature = parts[0]
self._output_signature = None
if len(parts) > 1:
self._output_signature = parts[1]
self._tokens = self._TokenizeInputs(self._input_signature)
self._outputs = self._ParseOutputs(self._output_signature)
self._inputs = self._ParseInputs()
def __str__(self):
return '(%s->%s)' % (self._inputs, self.outputs)
@property
def inputs(self):
return self._inputs
@property
def outputs(self):
return self._outputs
def _ParseOutputs(self, outputs):
"""Splits the output spec into comma-delimited parts.
Args:
outputs: A string containing the output specification, like, 'a,b'.
Returns:
The parsed representation, e.g. ['a', 'b']
"""
if outputs is None:
return []
o_tensors = [x.strip() for x in outputs.split(',')]
for x in o_tensors:
assert x
return o_tensors
def _TokenizeInputs(self, inputs):
"""Splits the input signature into tokens (not data structures).
Args:
inputs: A string containing the input speficiation, like "[a,b],c"
Returns:
The tokenized representation, like ['[', 'a', ',', 'b', ']', ',', 'c']
"""
start = -1
tokens = | |
# program to extract data from FITS files into files to repressent a box of the stellar atmosphere, including turbulent velocity.
import pyfits
import numpy as np
import os.path
import os
# Currently using Slice_rot_3D, set vbswitch to 0 if v and B files are not required.
def Slice(locin, locout, star, mag, num, dz):
# slice along one horizontal axis.
# dz = height resolution is
# 11.25 km for F3V
# 10.03 km for G2V
# 6 km for K0V
# 4 km for M0V
# for the width and length, if you wish dx=dy, the resolution is
# 58.5938 km for F3V,
# 17.5781 km for G2V,
# 11.7188 km for K0V
# 4.8828 km for M0V.
folderin = locin+'/'+star+'/'+star+'_'+mag
folderout = locout+'/'+ star+'/'+star+'_'+mag+'/1000/'+num+'/'
pref = star+'_'+mag+'_'+num+'_'
dataT = pyfits.getdata(folderin + '/eosT.' + num + '.fits')
dataP = pyfits.getdata(folderin + '/eosP.' + num + '.fits')
datarho = pyfits.getdata(folderin + '/result_0.' + num + '.fits')
datavz = pyfits.getdata(folderin + '/result_2.' + num + '.fits')
dataB = pyfits.getdata(folderin + '/result_6.' + num + '.fits')
""" for disk centre v line of sight = v2 (vz)
therefore datav1 = pyfits.getdata(loc + '/result_1.' + num + '.fits')
datav3 = pyfits.getdata(loc + '/result_3.' + num + '.fits')
and calculation for v line of sight not required
"""
v = datavz/datarho
maxind = dataT.shape[1]-1
minind = 0 #300
for xi in range(0,dataT.shape[0]):
print(str(xi))
f = open(folderout+pref + str(xi),'w')
fv = open(folderout+'v/v_'+pref + str(xi),'w')
fB = open(folderout+'B/B_'+pref + str(xi),'w')
f.write(str(dataT.shape[2]) + '\n')
f.write(str(dataT.shape[1]-minind) + '\n')
fv.write(str(dataT.shape[2]) + '\n')
fv.write(str(dataT.shape[1]-minind) + '\n')
fB.write(str(dataT.shape[2]) + '\n')
fB.write(str(dataT.shape[1]-minind) + '\n')
for yi in range(0,dataT.shape[2]):
for zi in range(maxind,minind-1,-1):
#print(str(zi))
f.write('{0:12.6}{1:12.6}{2:12.6}{3:12.6}\n'.format(dz*(maxind-zi), dataT[xi,zi,yi], dataP[xi,zi,yi], datarho[xi,zi,yi]))
fv.write(str(v[xi,zi,yi])+"\n")
fB.write(str(dataB[xi,zi,yi])+"\n")
f.close()
fv.close()
fB.close()
"""print(str(dataT[0,zi,0]) + ' ' + str(dataP[0,zi,0]) + ' ' + str(datarho[0,zi,0]) + ' ' + str(vturb[0,zi,0]))"""
return
def Slice_rot(locin, locout, star, mag, num, dz):
# Slice along the other horizontal axis. Generally now used as matches with Kok Lengs slices.
# dz = height resolution is 11.25 km for F3V
# 10 km for G2V
# 6 km for K0V
# 4 km for M0V
# for the width and length, if you wish dx=dy, the resolution is
# 58.5938 km for F3V,
# 17.5781 km for G2V,
# 11.7188 km for K0V 4.8828 km for M0V.
#star = 'G2'
#mag = '500G'
folderin = locin+'/'+star+'/'+star+'_'+mag
folderout = locout+'/'+star+'/'+star+'_'+mag+'/1000/'+num+'/'
if os.path.exists(folderout):
print(folderout)
else:
os.makedirs(folderout)
os.makedirs(folderout+'/v')
if mag!='hydro':
os.makedirs(folderout+'/B')
print('created:')
print(folderout)
pref = star+'_'+mag+'_'+num+'_'
dataT = pyfits.getdata(folderin + '/eosT.' + num + '.fits')
dataP = pyfits.getdata(folderin + '/eosP.' + num + '.fits')
datarho = pyfits.getdata(folderin + '/result_0.' + num + '.fits')
datavz = pyfits.getdata(folderin + '/result_2.' + num + '.fits')
print(dataT.shape)
if mag!='hydro':
dataB = pyfits.getdata(folderin + '/result_6.' + num + '.fits')
""" for disk centre v line of sight = v2 (vz)
therefore datav1 = pyfits.getdata(loc + '/result_1.' + num + '.fits')
datav3 = pyfits.getdata(loc + '/result_3.' + num + '.fits')
and calculation for v line of sight not required
"""
v = datavz/datarho
maxind = dataT.shape[1]-1
minind = 0
for xi in range(0,dataT.shape[2]):#0,dataT.shape[2]):
print(str(xi))
f = open(folderout+pref + 'rot_'+str(xi),'w')
fv = open(folderout+'v/v_'+pref + 'rot_' + str(xi),'w')
if mag!='hydro':
fB = open(folderout+'B/B_'+pref + 'rot_' + str(xi),'w')
f.write(str(dataT.shape[0]) + '\n')
f.write(str(dataT.shape[1]-minind) + '\n')
fv.write(str(dataT.shape[0]) + '\n')
fv.write(str(dataT.shape[1]-minind) + '\n')
if mag!='hydro':
fB.write(str(dataT.shape[0]) + '\n')
fB.write(str(dataT.shape[1]-minind) + '\n')
for yi in range(0,dataT.shape[0]):
for zi in range(maxind,minind-1,-1):
#print(str(zi))
f.write('{0:12.6}{1:12.6}{2:12.6}{3:12.6}\n'.format(dz*(maxind-zi), dataT[yi,zi,xi], dataP[yi,zi,xi], datarho[yi,zi,xi]))
fv.write(str(v[yi,zi,xi])+"\n")
if mag!='hydro':
fB.write(str(dataB[yi,zi,xi])+"\n")
f.close()
fv.close()
if mag!='hydro':
fB.close()
"""print(str(dataT[0,zi,0]) + ' ' + str(dataP[0,zi,0]) + ' ' + str(datarho[0,zi,0]) + ' ' + str(vturb[0,zi,0]))"""
return
def Slice_rot_3D(locin, locout, star, mag, num, dz, vbswitch, minind=0, endstr=''):
"""
Extracts relevant infro from eosT, eosP and result MURaM files and writes out slice files ready to be used by modcon.
e.g. ss.Slice_rot_3D('/media/BENJAMINS SOLAR CUBES/New_Solar_Cubes_untar','/media/Stellar3/slices_v','M0','hydro','030000',4.0,1)
Same as Slice_rot, but slices v and B with all 3 dimensions, if vbswitch is 1.
dz = height resolution:
1.25 km for F3V
10 km for G2V
6 km for K0V
4 km for M0V
3.2 km for M2V
vbswitch = 1 to extract 3D v and B files
minind = 0 (for most stars) = 300 (for F3 stars) how many points to cut off the bottom.
for the width and length, dx=dy, the resolution is
58.5938 km for F3V,
17.5781 km for G2V,
11.7188 km for K0V,
4.8828 km for M0V,
3.0469 km for M2V.
"""
folderin = locin+'/'+star+'/'+star+'_'+mag
folderout = locout+'/'+star+'/'+star+'_'+mag+'/1000/'+num+'/'
if os.path.exists(folderout):
print(folderout)
else:
os.makedirs(folderout)
print('created:')
print(folderout)
pref = star+'_'+mag+'_'+num+'_'
dataT = pyfits.getdata(folderin + '/eosT.' + num + '.fits')
dataP = pyfits.getdata(folderin + '/eosP.' + num + '.fits')
datarho = pyfits.getdata(folderin + '/result_0.' + num + '.fits')
print(dataT.shape)
maxind = dataT.shape[1]-1
for xi in range(0,dataT.shape[2]):#0,dataT.shape[2]):
print(str(xi))
f = open(folderout+pref + 'rot_'+endstr+str(xi),'w')
f.write(str(dataT.shape[0]) + '\n')
f.write(str(dataT.shape[1]-minind) + '\n')
#print(str(dataT.shape[1]-minind))
for yi in range(0,dataT.shape[0]):
for zi in range(maxind,minind-1,-1):
#print(str(zi))
f.write('{0:12.6}{1:12.6}{2:12.6}{3:12.6}\n'.format(dz*(maxind-zi), dataT[yi,zi,xi], dataP[yi,zi,xi], datarho[yi,zi,xi]))
f.close()
if vbswitch:
Slice_rot_v(locin, locout, star, mag, num, datarho, minind, endstr)
if mag!='hydro':
Slice_rot_B(locin, locout, star, mag, num, minind, endstr)
return
def Slice_rot_B(locin, locout, star, mag, num, minind, endstr=''):
folderin = locin+'/'+star+'/'+star+'_'+mag
folderout = locout+'/'+star+'/'+star+'_'+mag+'/1000/'+num+'/B'
if os.path.exists(folderout):
print(folderout)
else:
os.makedirs(folderout)
print('created:')
print(folderout)
pref = star+'_'+mag+'_'+num+'_'
dataBx = pyfits.getdata(folderin + '/result_5.' + num + '.fits')
dataBz = pyfits.getdata(folderin + '/result_6.' + num + '.fits')
dataBy = pyfits.getdata(folderin + '/result_7.' + num + '.fits')
maxind = dataBx.shape[1]-1
for xi in range(0,dataBx.shape[2]):
fB = open(folderout+'/B_'+pref + 'rot_'+endstr + str(xi),'w')
fB.write(str(dataBx.shape[0]) + '\n')
fB.write(str(dataBx.shape[1]-minind) + '\n')
fB.write('x \t y \t z \n')
for yi in range(0,dataBx.shape[0]):
for zi in range(maxind,minind-1,-1):
fB.write(str(dataBx[yi,zi,xi])+'\t'+str(dataBy[yi,zi,xi])+'\t'+str(dataBz[yi,zi,xi])+"\n")
fB.close()
return
def Slice_rot_v(locin, locout, star, mag, num, datarho, minind, endstr=''):
folderin = locin+'/'+star+'/'+star+'_'+mag
folderout = locout+'/'+star+'/'+star+'_'+mag+'/1000/'+num+'/v'
if os.path.exists(folderout):
print(folderout)
else:
os.makedirs(folderout)
print('created:')
print(folderout)
pref = star+'_'+mag+'_'+num+'_'
datavx = pyfits.getdata(folderin + '/result_1.' + num + '.fits')
datavz = pyfits.getdata(folderin + '/result_2.' + num + '.fits')
datavy = pyfits.getdata(folderin + '/result_3.' + num + '.fits')
maxind = datavx.shape[1]-1
vx = datavx/datarho
vz = datavz/datarho
vy = datavy/datarho
for xi in range(0,datavx.shape[2]):
print(xi)
fv = open(folderout+'/v_'+pref + 'rot_'+endstr + str(xi),'w')
fv.write(str(datavx.shape[0]) + '\n')
fv.write(str(datavx.shape[1]-minind) + '\n')
fv.write('x \t y \t z \n')
for yi in range(0,datavx.shape[0]):
for zi in range(maxind,minind-1,-1):
fv.write(str(vx[yi,zi,xi])+'\t'+str(vy[yi,zi,xi])+'\t'+str(vz[yi,zi,xi])+"\n")
fv.close()
return
def writefile(filenam, num, dz, dataT, dataP, datarho):
"""
dz = height resolution is
11.25 km for F3V
10 km for G2V
6 km for K0V
4 km for M0V
for the width and length, if you wish dx=dy, the resolution is
58.5938 km for F3V,
17.5781 km for G2V,
11.7188 km for K0V 4.8828 km for M0V.
star = 'G2'
mag = 'hydro'
folder = './slices_v/'+star+'/'+star+'_'+mag+'/1000/'+num+'/'
pref = star+'_'+mag+'_'+num+'_'
for disk centre v line of sight = v2 (vz)
therefore datav1 = pyfits.getdata(loc + '/result_1.' + num + '.fits')
datav3 = pyfits.getdata(loc + '/result_3.' + num + '.fits')
and calculation for v line of sight not required
"""
f = open(folder+pref+'avg2','w')
f.write(str(1) + '\n')
f.write(str(dataT.shape[0]) + '\n')
for zi in range(0,dataT.shape[0]):
#print(str(zi))
f.write('{0:12.6}{1:12.6}{2:12.6}{3:12.6}\n'.format(dz*(zi), dataT[zi], dataP[zi], datarho[zi]))
f.close()
"""print(str(dataT[0,zi,0]) + ' ' + str(dataP[0,zi,0]) + ' ' + str(datarho[0,zi,0]) + ' ' + str(vturb[0,zi,0]))"""
return
def writefile_newres(filenam, num, dz, numx, numy, numdepths):
# dz = height resolution is 11.25 km for F3V
# 10 km for G2V
# 6 km for K0V
# 4 km for M0V
# for the width and length, if you wish dx=dy, the resolution is
# 58.5938 km for F3V,
# 17.5781 km for G2V,
# 11.7188 km for K0V 4.8828 km for M0V.
star = 'G2'
mag = '500G'
folder = './slices_v/'+star+'/'+star+'_'+mag+'/1000/'+num+'/'
pref = star+'_'+mag+'_'+num+'_'
""" for disk centre v line of sight = v2 (vz)
therefore datav1 = pyfits.getdata(loc + '/result_1.' + num + '.fits')
datav3 = pyfits.getdata(loc + '/result_3.' + num + '.fits')
and calculation for v line of sight not required
"""
for ix in range(0,numx):
data = np.genfromtxt(folder+pref+str(ix),dtype = 'float',skip_header = 2)
f = open(folder+pref+'_newres_'+str(ix),'w')
f.write(str(numy) + | |
atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
for _ in range(10):
# Check nrandom
rays = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
lx=1.0, nx=1,
nrandom=1000, dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
# Check that projected points are inside region
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.z, 0.0)
np.testing.assert_array_less(rays.x, 0.5)
np.testing.assert_array_less(rays.y, 0.5)
np.testing.assert_array_less(-0.5, rays.x)
np.testing.assert_array_less(-0.5, rays.y)
assert len(rays) == 1000
@timer
def test_asPolar():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
nrad = rng.integers(1, 11)
naz = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays)%6 == 0
# If we set inner=0, then last ray should
# intersect the center of the pupil
inner = 0.0
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
assert len(rays)%6 == 1
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.x[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.y[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.z[-1], 0, atol=1e-14)
@timer
def test_asSpokes():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
rings = rng.integers(1, 11)
spokes = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
spokes=spokes, rings=rings,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings*i:rings*(i+1)],
np.linspace(inner, outer, rings, endpoint=True)
)
for i in range(rings):
checkAngle(ths[i::rings], np.linspace(0, 2*np.pi, spokes, endpoint=False))
# Check explicit rings and spokes
rings = rng.uniform(inner, outer, rings)
spokes = rng.uniform(0, 2*np.pi, spokes)
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
rings=rings, spokes=spokes,
dirCos=dirCos
)
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Check Gaussian Quadrature
rings = rng.integers(5, 11)
spokes = 2*rings+1
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer,
rings=rings,
spacing='GQ',
dirCos=dirCos
)
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
Li, w = np.polynomial.legendre.leggauss(rings)
rings = np.sqrt((1+Li)/2)*outer
flux = w*np.pi/(2*spokes)
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
np.testing.assert_allclose(
rays.flux[len(rings)*i:len(rings)*(i+1)],
flux
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Sanity check GQ grids against literature
# Values from Forbes JOSA Vol. 5, No. 11 (1988) Table 1
rings = [1, 2, 3, 4, 5, 6]
rad = [
[0.70710678],
[0.45970084, 0.88807383],
[0.33571069, 0.70710678, 0.94196515],
[0.26349923, 0.57446451, 0.81852949, 0.96465961],
[0.21658734, 0.48038042, 0.70710678, 0.87706023, 0.97626324],
[0.18375321, 0.41157661, 0.61700114, 0.78696226, 0.91137517, 0.98297241]
]
w = [
[0.5],
[0.25, 0.25],
[0.13888889, 0.22222222, 0.13888889],
[0.08696371, 0.16303629, 0.16303629, 0.08696371],
[0.05923172, 0.11965717, 0.14222222, 0.11965717, 0.05923172],
[0.04283112, 0.09019039, 0.11697848, 0.11697848, 0.09019039, 0.04283112]
]
for rings_, rad_, w_ in zip(rings, rad, w):
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=1,
rings=rings_,
spacing='GQ',
dirCos=[0,0,-1]
)
spokes = rings_*2+1
radii = np.hypot(rays.x, rays.y)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings_*i:rings_*(i+1)],
rad_
)
np.testing.assert_allclose(
rays.flux[rings_*i:rings_*(i+1)]*spokes/(2*np.pi),
w_
)
@timer
def test_factory_optic():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
grid1 = batoid.RayVector.asGrid(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
nx=16
)
grid2 = batoid.RayVector.asGrid(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, lx=telescope.pupilSize,
nx=16
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asPolar(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
naz=100, nrad=20
)
grid2 = batoid.RayVector.asPolar(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
inner=telescope.pupilSize/2*telescope.pupilObscuration,
naz=100, nrad=20
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asSpokes(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
rings=10, spokes=21
)
grid2 = batoid.RayVector.asSpokes(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
rings=10, spokes=21
)
rays_allclose(grid1, grid2)
@timer
def test_getitem():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=10, naz=60
)
telescope.trace(rv)
# Single item indexing
for i in range(-len(rv), len(rv)):
rv1 = rv[i]
np.testing.assert_equal(rv1.r[0], rv.r[i])
np.testing.assert_equal(rv1.x[0], rv.x[i])
np.testing.assert_equal(rv1.y[0], rv.y[i])
np.testing.assert_equal(rv1.z[0], rv.z[i])
np.testing.assert_equal(rv1.v[0], rv.v[i])
np.testing.assert_equal(rv1.vx[0], rv.vx[i])
np.testing.assert_equal(rv1.vy[0], rv.vy[i])
np.testing.assert_equal(rv1.vz[0], rv.vz[i])
np.testing.assert_equal(rv1.t[0], rv.t[i])
np.testing.assert_equal(rv1.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv1.flux[0], rv.flux[i])
np.testing.assert_equal(rv1.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv1.failed[0], rv.failed[i])
assert rv1.r.flags.f_contiguous
assert rv1.v.flags.f_contiguous
# slice indexing
for i in range(-len(rv)//10, len(rv)//10):
slc = slice(i*10, (i+1)*10, 2)
rv2 = rv[slc]
np.testing.assert_equal(rv2.r, rv.r[slc])
np.testing.assert_equal(rv2.x, rv.x[slc])
np.testing.assert_equal(rv2.y, rv.y[slc])
np.testing.assert_equal(rv2.z, rv.z[slc])
np.testing.assert_equal(rv2.v, rv.v[slc])
np.testing.assert_equal(rv2.vx, rv.vx[slc])
np.testing.assert_equal(rv2.vy, rv.vy[slc])
np.testing.assert_equal(rv2.vz, rv.vz[slc])
np.testing.assert_equal(rv2.t, rv.t[slc])
np.testing.assert_equal(rv2.wavelength, rv.wavelength[slc])
np.testing.assert_equal(rv2.flux, rv.flux[slc])
np.testing.assert_equal(rv2.vignetted, rv.vignetted[slc])
np.testing.assert_equal(rv2.failed, rv.failed[slc])
assert rv2.r.flags.f_contiguous
assert rv2.v.flags.f_contiguous
# integer array indexing
idx = [0, -1, 1, -2, 2, -3, 50]
rv3 = rv[idx]
np.testing.assert_equal(rv3.r, rv.r[idx])
np.testing.assert_equal(rv3.x, rv.x[idx])
np.testing.assert_equal(rv3.y, rv.y[idx])
np.testing.assert_equal(rv3.z, rv.z[idx])
np.testing.assert_equal(rv3.v, rv.v[idx])
np.testing.assert_equal(rv3.vx, rv.vx[idx])
np.testing.assert_equal(rv3.vy, rv.vy[idx])
np.testing.assert_equal(rv3.vz, rv.vz[idx])
np.testing.assert_equal(rv3.t, rv.t[idx])
np.testing.assert_equal(rv3.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv3.flux, rv.flux[idx])
np.testing.assert_equal(rv3.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv3.failed, rv.failed[idx])
assert rv3.r.flags.f_contiguous
assert rv3.v.flags.f_contiguous
# boolean array indexing
idx = np.zeros(len(rv), dtype=bool)
idx[[0, -1, 5]] = True
rv4 = rv[idx]
np.testing.assert_equal(rv4.r, rv.r[idx])
np.testing.assert_equal(rv4.x, rv.x[idx])
np.testing.assert_equal(rv4.y, rv.y[idx])
np.testing.assert_equal(rv4.z, rv.z[idx])
np.testing.assert_equal(rv4.v, rv.v[idx])
np.testing.assert_equal(rv4.vx, rv.vx[idx])
np.testing.assert_equal(rv4.vy, rv.vy[idx])
np.testing.assert_equal(rv4.vz, rv.vz[idx])
np.testing.assert_equal(rv4.t, rv.t[idx])
np.testing.assert_equal(rv4.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv4.flux, rv.flux[idx])
np.testing.assert_equal(rv4.vignetted, rv.vignetted[idx])
np.testing.assert_equal(rv4.failed, rv.failed[idx])
assert rv4.r.flags.f_contiguous
assert rv4.v.flags.f_contiguous
# test iteration
for i, rv5 in enumerate(rv):
np.testing.assert_equal(rv5.r[0], rv.r[i])
np.testing.assert_equal(rv5.x[0], rv.x[i])
np.testing.assert_equal(rv5.y[0], rv.y[i])
np.testing.assert_equal(rv5.z[0], rv.z[i])
np.testing.assert_equal(rv5.v[0], rv.v[i])
np.testing.assert_equal(rv5.vx[0], rv.vx[i])
np.testing.assert_equal(rv5.vy[0], rv.vy[i])
np.testing.assert_equal(rv5.vz[0], rv.vz[i])
np.testing.assert_equal(rv5.t[0], rv.t[i])
np.testing.assert_equal(rv5.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv5.flux[0], rv.flux[i])
np.testing.assert_equal(rv5.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv5.failed[0], rv.failed[i])
assert rv5.r.flags.f_contiguous
assert rv5.v.flags.f_contiguous
for i, rv6 in enumerate(reversed(rv)):
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.r[0], rv.r[-i-1])
np.testing.assert_equal(rv6.x[0], rv.x[-i-1])
np.testing.assert_equal(rv6.y[0], rv.y[-i-1])
np.testing.assert_equal(rv6.z[0], rv.z[-i-1])
np.testing.assert_equal(rv6.v[0], rv.v[-i-1])
np.testing.assert_equal(rv6.vx[0], rv.vx[-i-1])
np.testing.assert_equal(rv6.vy[0], rv.vy[-i-1])
np.testing.assert_equal(rv6.vz[0], rv.vz[-i-1])
np.testing.assert_equal(rv6.t[0], rv.t[-i-1])
np.testing.assert_equal(rv6.wavelength[0], rv.wavelength[-i-1])
np.testing.assert_equal(rv6.flux[0], rv.flux[-i-1])
np.testing.assert_equal(rv6.vignetted[0], rv.vignetted[-i-1])
np.testing.assert_equal(rv6.failed[0], rv.failed[-i-1])
assert rv6.r.flags.f_contiguous
assert rv6.v.flags.f_contiguous
with np.testing.assert_raises(IndexError):
rv[len(rv)]
with np.testing.assert_raises(IndexError):
rv[-len(rv)-1]
def test_fromStop():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=4, naz=10
)
rv_traced = telescope.trace(rv.copy())
rv_stop = telescope.stopSurface.interact(rv.copy())
for rv1, rv_traced1, rv_stop1 in zip(rv, rv_traced, rv_stop):
rv_test1 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test2 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, backDist=telescope.backDist, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test3 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, medium=telescope.inMedium, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test4 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, stopSurface=telescope.stopSurface,
wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2)
)
rv_test5 = batoid.RayVector.fromStop(
rv_stop1.x[0], rv_stop1.y[0],
optic=telescope, stopSurface=telescope.stopSurface,
wavelength=625e-9,
dirCos=batoid.utils.fieldToDirCos(np.deg2rad(1.0), np.deg2rad(0.2))
)
for rv_test in [rv_test1, rv_test2, rv_test3, rv_test4, rv_test5]:
telescope.trace(rv_test)
np.testing.assert_allclose(
rv_test.x, rv_traced1.x, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.y, rv_traced1.y, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.z, rv_traced1.z, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vx, rv_traced1.vx, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vy, rv_traced1.vy, rtol=0, atol=1e-14
)
np.testing.assert_allclose(
rv_test.vz, rv_traced1.vz, rtol=0, atol=1e-14
)
# A few more coverage checks
with np.testing.assert_raises(ValueError):
rv = batoid.RayVector.fromStop(
0, 0, theta_x=0.0, theta_y=0.0
)
rv = batoid.RayVector.fromStop(
0, 0, theta_x=0.0, theta_y=0.0, wavelength=625e-9
)
rv2 = batoid.RayVector.fromStop(
0, 0, theta_x=0.0, theta_y=0.0, wavelength=625e-9,
backDist=40.0,
stopSurface=batoid.Interface(batoid.Plane()),
medium=batoid.vacuum
)
assert rv == rv2
def test_fromFieldAngles():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
thx = np.linspace(-0.5, 0.5, 10)
thy = np.linspace(-0.5, 0.5, 10)
rv = batoid.RayVector.fromFieldAngles(
np.deg2rad(thx), np.deg2rad(thy),
optic=telescope, wavelength=625e-9,
)
rv_traced = telescope.trace(rv.copy())
rv_stop = telescope.stopSurface.interact(rv.copy())
for rv1, rv_traced1, rv_stop1 in zip(rv, rv_traced, rv_stop):
dc = rv_stop1.v[0]/np.sqrt(np.sum(np.square(rv_stop1.v)))
thx, thy = batoid.utils.dirCosToField(*dc)
rv_test1 = batoid.RayVector.fromStop(
0.0, 0.0,
optic=telescope, wavelength=625e-9,
theta_x=thx, theta_y=thy
)
rv_test2 = batoid.RayVector.fromStop(
0.0, 0.0,
optic=telescope, backDist=telescope.backDist, wavelength=625e-9,
theta_x=thx, theta_y=thy
| |
<gh_stars>10-100
#!/usr/bin/python3
# COPYRIGHT 2021 BY EXTRAHOP NETWORKS, INC.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
import json
import boto3
import os
from netaddr import IPNetwork, IPAddress
EC2_CLIENT = boto3.client("ec2")
# The ID of the traffic mirror filter.
FILTER_ID = os.environ["filter_id"]
# The IDs of the traffic mirror targets for your ExtraHop sensors.
TARGETS = ["tmt-abcdefg0123456789"]
# Determines whether the function will mirror traffic across availabilty
# zones. If set to True, and there are no traffic mirror targets in the
# availability zone of the source EC2 instance, the function does not create
# a mirror session.
LOCAL_ZONE_ONLY = True
def create_mirror(interface_id, targets):
"""
Method that creates a traffic mirror session for a network interface (ENI).
The method selects the best traffic mirror target by looking for a target
within the current availability zone with the lowest mirror sessions. Targets
that do not allow traffic on required ports and protocols are not selected.
Parameters:
interface_id (str): The ID of the ENI
targets (list): A list of traffic mirror target IDs
Returns:
name (str): The name of the newly created traffic mirror session
"""
targets = find_local_targets(interface_id, targets)
if len(targets) == 0:
return {
"error": "None of the specified traffic mirror targets are in the same availability zone as "
+ interface_id
}
targets = find_allowed_targets(interface_id, targets, ["udp"], [4789])
if len(targets) == 0:
return {
"error": "None of the traffic mirror targets support UDP traffic on port 4789"
}
target_id = find_target_with_lowest(targets)
# Create a name by combining network interface ID and target ID. For example:
# interface_id = eni-055061bf5573d0a3b
# target_id = tmt-0bce8038d06d6c745
name = "eh-mirror-" + interface_id[4:8] + target_id[4:8]
tags = [
{
"ResourceType": "traffic-mirror-session",
"Tags": [{"Key": "Name", "Value": name}],
}
]
response = EC2_CLIENT.create_traffic_mirror_session(
NetworkInterfaceId=interface_id,
TrafficMirrorTargetId=target_id,
TrafficMirrorFilterId=FILTER_ID,
SessionNumber=1,
TagSpecifications=tags,
)
return {"name": name}
def find_local_targets(interface_id, targets):
"""
Method that returns a list of mirror targets that are in the same availability
zone as the specified network interface. If none of the specified mirror targets
are in the availability zone and LOCAL_ZONE_ONLY is false, the method returns
the original list; if LOCAL_ZONE_ONLY is true, the method returns an empty list.
Parameters:
interface_id (str): The ID of the ENI
targets (list): A list of traffic mirror target IDs
Returns:
local (list): A list of local traffic mirror target IDs
targets (list): The original list of traffic mirror target IDs
"""
source_zone = EC2_CLIENT.describe_network_interfaces(
NetworkInterfaceIds=[interface_id]
)["NetworkInterfaces"][0]["AvailabilityZone"]
local = []
# Create a list of all targets that are in the same
# availability zone as the interface
for target in targets:
eni = EC2_CLIENT.describe_traffic_mirror_targets(
Filters=[{"Name": "traffic-mirror-target-id", "Values": [target]}]
)["TrafficMirrorTargets"][0]["NetworkInterfaceId"]
target_zone = EC2_CLIENT.describe_network_interfaces(
NetworkInterfaceIds=[eni]
)["NetworkInterfaces"][0]["AvailabilityZone"]
if target_zone == source_zone:
local.append(target)
if local:
return local
# If there are no targets in the same zone, and LOCAL_ZONE_ONLY is true,
# return an empty list
elif LOCAL_ZONE_ONLY:
return []
# If there are no targets in the same zone, but LOCAL_ZONE_ONLY is false,
# return the original list
else:
return targets
def find_allowed_targets(interface_id, targets, protocols, port_range):
"""
Method that returns a list of mirror targets that can accept traffic from the
specified ENI on the specified port range over the specified protocols.
The method checks both the security group of the EC2 instance and the ACLs
of the subnet that the EC2 instance is on.
Parameters:
interface_id (str): The ID of the ENI
targets (list): A list of traffic mirror target IDs
Returns:
str: The ID of the mirror target with the least mirror sessions
"""
source_interface = EC2_CLIENT.describe_network_interfaces(
NetworkInterfaceIds=[interface_id]
)["NetworkInterfaces"][0]
# Find all IP addresses for the interface
source_ips = []
for addr in source_interface["PrivateIpAddresses"]:
if addr["PrivateIpAddress"]:
source_ips.append(addr["PrivateIpAddress"])
if addr["Association"]["PublicIp"]:
source_ips.append(addr["Association"]["PublicIp"])
eligible_targets = []
for target in targets:
target_interface_id = EC2_CLIENT.describe_traffic_mirror_targets(
TrafficMirrorTargetIds=[target]
)["TrafficMirrorTargets"][0]["NetworkInterfaceId"]
target_interface = EC2_CLIENT.describe_network_interfaces(
NetworkInterfaceIds=[target_interface_id]
)["NetworkInterfaces"][0]
# Check if source and target are on the same VPC. If so, don't check the ACL
same_vpc = False
source_vpc_id = source_interface["VpcId"]
target_vpc_id = target_interface["VpcId"]
if source_vpc_id == target_vpc_id:
same_vpc = True
if not same_vpc:
if not aclAllow(
target_interface,
port_range,
source_ips,
protocols,
target_vpc_id,
):
continue
interface_groups = target_interface["Groups"]
if groupsAllow(source_ips, port_range, interface_groups, protocols):
eligible_targets.append(target)
return eligible_targets
def aclAllow(
target_interface, required_range, source_ips, protocols, target_vpc_id
):
"""
Method that indicates whether at least one IP address in a list can send
traffic through the ACL of the specified target interface over the given protocols
on at least one port in the required range.
Parameters:
target_interface (obj): The target interface
required_range (range or list): The range of ports
source_ips (list): The list of IP addresses
protocols (list): The list of protocols
Returns:
bool: Whether the ACLs allow the communication
"""
confirmed_protocols = []
subnet_id = target_interface["SubnetId"]
acls = EC2_CLIENT.describe_network_acls(
Filters=[{"Name": "vpc-id", "Values": [target_vpc_id]}]
)["NetworkAcls"]
acl = findSubnetAcl(subnet_id, acls)
if acl == None:
return confirmed_protocols
for entry in acl["Entries"]:
# Skip outbound rules
if entry["Egress"] == True:
continue
proto = entry["Protocol"]
if proto == "-1" or proto in protocols:
# Skip rules that do not apply to the required port range
if "PortRange" in entry:
rule_range = range(
entry["PortRange"]["From"], permission["PortRange"]["To"]
)
if len(rule_range) == 0:
rule_range = [entry["PortRange"]["From"]]
port_matches = False
for port in rule_range:
if port in required_range:
port_matches = True
break
if port_matches == False:
continue
for source_ip in source_ips:
if "CidrBlock" in entry:
block = entry["CidrBlock"]
else:
block = entry["Ipv6CidrBlock"]
if IPAddress(source_ip) in IPNetwork(block):
if entry["RuleAction"] == "allow":
if proto == "-1":
return True
else:
confirmed_protocols.append(proto)
if set(confirmed_protocols) == set(protocols):
return True
# If there is a rule denying this particular IP
# move on to the next IP address, since any allow rules
# after this will be ignored
if entry["RuleAction"] == "deny":
continue
return False
def groupsAllow(source_ips, required_range, interface_groups, protocols):
"""
Method that evaluates EC2 instance security groups to determine whether at
least one IP address in a list can send traffic to the instance over the
given protocols on at least one port in the required range.
Parameters:
source_ips (list): The list of IP addresses
required_range (range or list): The range of ports
interface_groups (list): The specified security groups
protocols (list): The list of protocols
Returns:
bool: Whether the security groups allow the communication
"""
confirmed_protocols = []
for int_group in interface_groups:
sec_groups = EC2_CLIENT.describe_security_groups(
GroupIds=[int_group["GroupId"]]
)["SecurityGroups"]
for sec_group in sec_groups:
for permission in sec_group["IpPermissions"]:
proto = permission["IpProtocol"]
# Skip non-UDP/TCP rules
if proto == -1 or proto in protocols:
# Skip rules that do not apply to the required port range
rule_range = range(
permission["FromPort"], permission["ToPort"]
)
if len(rule_range) == 0:
rule_range = [permission["FromPort"]]
port_matches = False
for port in rule_range:
if port in required_range:
port_matches = True
break
if port_matches == False:
continue
for ip_range in permission["IpRanges"]:
for source_ip in source_ips:
if IPAddress(source_ip) in IPNetwork(
ip_range["CidrIp"]
):
# If the rule applies to all protocols, then do not
# check for other protocols
if proto == -1:
return True
else:
confirmed_protocols.append(proto)
if set(confirmed_protocols) == set(
protocols
):
return True
return False
def findSubnetAcl(subnet_id, acls):
"""
Method that returns the acl assigned to the specified subnet
Parameters:
subnet_id (str): The ID of the subnet
acls (obj): The ACLs of a VPC
Returns:
acl (obj): The ACL assigned to the subnet
"""
for acl in acls:
for association in acl["Associations"]:
if association["SubnetId"] == subnet_id:
return acl
return None
def find_target_with_lowest(targets):
"""
Method that searches a list of mirror targets for the target with the least
mirror sessions.
Parameters:
targets (list): A list of traffic mirror target IDs
Returns:
str: The ID of the mirror target with the least mirror sessions
"""
t_map = {}
for target in targets:
sessions = EC2_CLIENT.describe_traffic_mirror_sessions(
Filters=[{"Name": "traffic-mirror-target-id", "Values": [target]}]
)["TrafficMirrorSessions"]
t_map[target] = len(sessions)
return min(t_map, key=t_map.get)
def lambda_handler(event, context):
newId = event["detail"]["instance-id"]
response = EC2_CLIENT.describe_instances(InstanceIds=[newId])
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
for interface in instance["NetworkInterfaces"]:
interface_id = interface["NetworkInterfaceId"]
# Only create the mirror session if no mirror
# session exists for this instance
sessions | |
88
new_vm.memory[11] = 88
new_vm.memory[12] = 88
new_vm.memory[13] = 88
new_vm.memory[14] = 88
new_vm.memory[15] = 88
new_vm.memory[16] = 88
new_vm.memory[17] = 88
new_vm.memory[18] = 88
new_vm.memory[19] = 88
new_vm.memory[20] = 88
new_vm.memory[21] = 88
new_vm.memory[22] = 88
new_vm.memory[23] = 88
new_vm.memory[24] = 88
new_vm.memory[25] = 88
new_vm.memory[26] = 88
new_vm.memory[27] = 88
new_vm.memory[28] = 88
new_vm.memory[29] = 88
new_vm.memory[30] = 88
new_vm.memory[31] = 88
new_vm.memory[32] = 88
new_vm.memory[33] = 88
new_vm.memory[34] = 88
new_vm.memory[35] = 88
new_vm.memory[36] = 88
new_vm.memory[37] = 88
new_vm.memory[38] = 88
new_vm.memory[39] = 88
new_vm.memory[40] = 88
new_vm.memory[41] = 88
new_vm.memory[42] = 88
new_vm.memory[43] = 88
new_vm.memory[44] = 88
new_vm.memory[45] = 88
new_vm.memory[46] = 88
new_vm.memory[47] = 88
new_vm.memory[48] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [87315138422451183025224871972802370450373932520512056513148796263698858401046])
def test_SHA3_67(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_68(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_69(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(16)
new_vm.memory[16] = 88
new_vm.memory[17] = 88
new_vm.memory[18] = 88
new_vm.memory[19] = 88
new_vm.memory[20] = 88
new_vm.memory[21] = 88
new_vm.memory[22] = 88
new_vm.memory[23] = 88
new_vm.memory[24] = 88
new_vm.memory[25] = 88
new_vm.memory[26] = 88
new_vm.memory[27] = 88
new_vm.memory[28] = 88
new_vm.memory[29] = 88
new_vm.memory[30] = 88
new_vm.memory[31] = 88
new_vm.memory[32] = 88
new_vm.memory[33] = 88
new_vm.memory[34] = 88
new_vm.memory[35] = 88
new_vm.memory[36] = 88
new_vm.memory[37] = 88
new_vm.memory[38] = 88
new_vm.memory[39] = 88
new_vm.memory[40] = 88
new_vm.memory[41] = 88
new_vm.memory[42] = 88
new_vm.memory[43] = 88
new_vm.memory[44] = 88
new_vm.memory[45] = 88
new_vm.memory[46] = 88
new_vm.memory[47] = 88
new_vm.memory[48] = 88
new_vm.memory[49] = 88
new_vm.memory[50] = 88
new_vm.memory[51] = 88
new_vm.memory[52] = 88
new_vm.memory[53] = 88
new_vm.memory[54] = 88
new_vm.memory[55] = 88
new_vm.memory[56] = 88
new_vm.memory[57] = 88
new_vm.memory[58] = 88
new_vm.memory[59] = 88
new_vm.memory[60] = 88
new_vm.memory[61] = 88
new_vm.memory[62] = 88
new_vm.memory[63] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [87315138422451183025224871972802370450373932520512056513148796263698858401046])
def test_SHA3_70(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(32)
new_vm.memory[32] = 88
new_vm.memory[33] = 88
new_vm.memory[34] = 88
new_vm.memory[35] = 88
new_vm.memory[36] = 88
new_vm.memory[37] = 88
new_vm.memory[38] = 88
new_vm.memory[39] = 88
new_vm.memory[40] = 88
new_vm.memory[41] = 88
new_vm.memory[42] = 88
new_vm.memory[43] = 88
new_vm.memory[44] = 88
new_vm.memory[45] = 88
new_vm.memory[46] = 88
new_vm.memory[47] = 88
new_vm.memory[48] = 88
new_vm.memory[49] = 88
new_vm.memory[50] = 88
new_vm.memory[51] = 88
new_vm.memory[52] = 88
new_vm.memory[53] = 88
new_vm.memory[54] = 88
new_vm.memory[55] = 88
new_vm.memory[56] = 88
new_vm.memory[57] = 88
new_vm.memory[58] = 88
new_vm.memory[59] = 88
new_vm.memory[60] = 88
new_vm.memory[61] = 88
new_vm.memory[62] = 88
new_vm.memory[63] = 88
new_vm.memory[64] = 88
new_vm.memory[65] = 88
new_vm.memory[66] = 88
new_vm.memory[67] = 88
new_vm.memory[68] = 88
new_vm.memory[69] = 88
new_vm.memory[70] = 88
new_vm.memory[71] = 88
new_vm.memory[72] = 88
new_vm.memory[73] = 88
new_vm.memory[74] = 88
new_vm.memory[75] = 88
new_vm.memory[76] = 88
new_vm.memory[77] = 88
new_vm.memory[78] = 88
new_vm.memory[79] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [87315138422451183025224871972802370450373932520512056513148796263698858401046])
def test_SHA3_71(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(48)
new_vm.memory[48] = 88
new_vm.memory[49] = 88
new_vm.memory[50] = 88
new_vm.memory[51] = 88
new_vm.memory[52] = 88
new_vm.memory[53] = 88
new_vm.memory[54] = 88
new_vm.memory[55] = 88
new_vm.memory[56] = 88
new_vm.memory[57] = 88
new_vm.memory[58] = 88
new_vm.memory[59] = 88
new_vm.memory[60] = 88
new_vm.memory[61] = 88
new_vm.memory[62] = 88
new_vm.memory[63] = 88
new_vm.memory[64] = 88
new_vm.memory[65] = 88
new_vm.memory[66] = 88
new_vm.memory[67] = 88
new_vm.memory[68] = 88
new_vm.memory[69] = 88
new_vm.memory[70] = 88
new_vm.memory[71] = 88
new_vm.memory[72] = 88
new_vm.memory[73] = 88
new_vm.memory[74] = 88
new_vm.memory[75] = 88
new_vm.memory[76] = 88
new_vm.memory[77] = 88
new_vm.memory[78] = 88
new_vm.memory[79] = 88
new_vm.memory[80] = 88
new_vm.memory[81] = 88
new_vm.memory[82] = 88
new_vm.memory[83] = 88
new_vm.memory[84] = 88
new_vm.memory[85] = 88
new_vm.memory[86] = 88
new_vm.memory[87] = 88
new_vm.memory[88] = 88
new_vm.memory[89] = 88
new_vm.memory[90] = 88
new_vm.memory[91] = 88
new_vm.memory[92] = 88
new_vm.memory[93] = 88
new_vm.memory[94] = 88
new_vm.memory[95] = 88
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [87315138422451183025224871972802370450373932520512056513148796263698858401046])
def test_SHA3_72(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_73(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_74(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_75(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_76(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_77(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, 'OOG')
def test_SHA3_78(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode=' '
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
| |
namespace_, name_='grid')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='t:', name_='grid'):
if self.distance is not None and 'distance' not in already_processed:
already_processed.append('distance')
outfile.write(' distance=%s' % (self.gds_format_string(quote_attrib(self.distance).encode(ExternalEncoding), input_name='distance'), ))
if self.style is not None and 'style' not in already_processed:
already_processed.append('style')
outfile.write(' style=%s' % (self.gds_format_string(quote_attrib(self.style).encode(ExternalEncoding), input_name='style'), ))
if self.multiple is not None and 'multiple' not in already_processed:
already_processed.append('multiple')
outfile.write(' multiple=%s' % (self.gds_format_string(quote_attrib(self.multiple).encode(ExternalEncoding), input_name='multiple'), ))
if self.altdistance is not None and 'altdistance' not in already_processed:
already_processed.append('altdistance')
outfile.write(' altdistance=%s' % (self.gds_format_string(quote_attrib(self.altdistance).encode(ExternalEncoding), input_name='altdistance'), ))
if self.altunit is not None and 'altunit' not in already_processed:
already_processed.append('altunit')
outfile.write(' altunit=%s' % (self.gds_format_string(quote_attrib(self.altunit).encode(ExternalEncoding), input_name='altunit'), ))
if self.unitdist is not None and 'unitdist' not in already_processed:
already_processed.append('unitdist')
outfile.write(' unitdist=%s' % (self.gds_format_string(quote_attrib(self.unitdist).encode(ExternalEncoding), input_name='unitdist'), ))
if self.altunitdist is not None and 'altunitdist' not in already_processed:
already_processed.append('altunitdist')
outfile.write(' altunitdist=%s' % (self.gds_format_string(quote_attrib(self.altunitdist).encode(ExternalEncoding), input_name='altunitdist'), ))
if self.display is not None and 'display' not in already_processed:
already_processed.append('display')
outfile.write(' display=%s' % (self.gds_format_string(quote_attrib(self.display).encode(ExternalEncoding), input_name='display'), ))
if self.unit is not None and 'unit' not in already_processed:
already_processed.append('unit')
outfile.write(' unit=%s' % (self.gds_format_string(quote_attrib(self.unit).encode(ExternalEncoding), input_name='unit'), ))
def exportChildren(self, outfile, level, namespace_='t:', name_='grid', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='grid'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.distance is not None and 'distance' not in already_processed:
already_processed.append('distance')
showIndent(outfile, level)
outfile.write('distance = "%s",\n' % (self.distance,))
if self.style is not None and 'style' not in already_processed:
already_processed.append('style')
showIndent(outfile, level)
outfile.write('style = "%s",\n' % (self.style,))
if self.multiple is not None and 'multiple' not in already_processed:
already_processed.append('multiple')
showIndent(outfile, level)
outfile.write('multiple = "%s",\n' % (self.multiple,))
if self.altdistance is not None and 'altdistance' not in already_processed:
already_processed.append('altdistance')
showIndent(outfile, level)
outfile.write('altdistance = "%s",\n' % (self.altdistance,))
if self.altunit is not None and 'altunit' not in already_processed:
already_processed.append('altunit')
showIndent(outfile, level)
outfile.write('altunit = "%s",\n' % (self.altunit,))
if self.unitdist is not None and 'unitdist' not in already_processed:
already_processed.append('unitdist')
showIndent(outfile, level)
outfile.write('unitdist = "%s",\n' % (self.unitdist,))
if self.altunitdist is not None and 'altunitdist' not in already_processed:
already_processed.append('altunitdist')
showIndent(outfile, level)
outfile.write('altunitdist = "%s",\n' % (self.altunitdist,))
if self.display is not None and 'display' not in already_processed:
already_processed.append('display')
showIndent(outfile, level)
outfile.write('display = "%s",\n' % (self.display,))
if self.unit is not None and 'unit' not in already_processed:
already_processed.append('unit')
showIndent(outfile, level)
outfile.write('unit = "%s",\n' % (self.unit,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('distance', node)
if value is not None and 'distance' not in already_processed:
already_processed.append('distance')
self.distance = value
value = find_attr_value_('style', node)
if value is not None and 'style' not in already_processed:
already_processed.append('style')
self.style = value
value = find_attr_value_('multiple', node)
if value is not None and 'multiple' not in already_processed:
already_processed.append('multiple')
self.multiple = value
value = find_attr_value_('altdistance', node)
if value is not None and 'altdistance' not in already_processed:
already_processed.append('altdistance')
self.altdistance = value
value = find_attr_value_('altunit', node)
if value is not None and 'altunit' not in already_processed:
already_processed.append('altunit')
self.altunit = value
value = find_attr_value_('unitdist', node)
if value is not None and 'unitdist' not in already_processed:
already_processed.append('unitdist')
self.unitdist = value
value = find_attr_value_('altunitdist', node)
if value is not None and 'altunitdist' not in already_processed:
already_processed.append('altunitdist')
self.altunitdist = value
value = find_attr_value_('display', node)
if value is not None and 'display' not in already_processed:
already_processed.append('display')
self.display = value
value = find_attr_value_('unit', node)
if value is not None and 'unit' not in already_processed:
already_processed.append('unit')
self.unit = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class grid
class layer(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, color=None, number=None, visible=None, active=None, fill=None):
self.name = _cast(None, name)
self.color = _cast(None, color)
self.number = _cast(None, number)
self.visible = _cast(None, visible)
self.active = _cast(None, active)
self.fill = _cast(None, fill)
pass
def factory(*args_, **kwargs_):
if layer.subclass:
return layer.subclass(*args_, **kwargs_)
else:
return layer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_color(self): return self.color
def set_color(self, color): self.color = color
def get_number(self): return self.number
def set_number(self, number): self.number = number
def get_visible(self): return self.visible
def set_visible(self, visible): self.visible = visible
def get_active(self): return self.active
def set_active(self, active): self.active = active
def get_fill(self): return self.fill
def set_fill(self, fill): self.fill = fill
def export(self, outfile, level, namespace_='t:', name_='layer', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='layer')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='t:', name_='layer'):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.color is not None and 'color' not in already_processed:
already_processed.append('color')
outfile.write(' color=%s' % (self.gds_format_string(quote_attrib(self.color).encode(ExternalEncoding), input_name='color'), ))
if self.number is not None and 'number' not in already_processed:
already_processed.append('number')
outfile.write(' number=%s' % (self.gds_format_string(quote_attrib(self.number).encode(ExternalEncoding), input_name='number'), ))
if self.visible is not None and 'visible' not in already_processed:
already_processed.append('visible')
outfile.write(' visible=%s' % (self.gds_format_string(quote_attrib(self.visible).encode(ExternalEncoding), input_name='visible'), ))
if self.active is not None and 'active' not in already_processed:
already_processed.append('active')
outfile.write(' active=%s' % (self.gds_format_string(quote_attrib(self.active).encode(ExternalEncoding), input_name='active'), ))
if self.fill is not None and 'fill' not in already_processed:
already_processed.append('fill')
outfile.write(' fill=%s' % (self.gds_format_string(quote_attrib(self.fill).encode(ExternalEncoding), input_name='fill'), ))
def exportChildren(self, outfile, level, namespace_='t:', name_='layer', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='layer'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.color is not None and 'color' not in already_processed:
already_processed.append('color')
showIndent(outfile, level)
outfile.write('color = "%s",\n' % (self.color,))
if self.number is not None and 'number' not in already_processed:
already_processed.append('number')
showIndent(outfile, level)
outfile.write('number = "%s",\n' % (self.number,))
if self.visible is not None and 'visible' not in already_processed:
already_processed.append('visible')
showIndent(outfile, level)
outfile.write('visible = "%s",\n' % (self.visible,))
if self.active is not None and 'active' not in already_processed:
already_processed.append('active')
showIndent(outfile, level)
outfile.write('active = "%s",\n' % (self.active,))
if self.fill is not None and 'fill' not in already_processed:
already_processed.append('fill')
showIndent(outfile, level)
outfile.write('fill = "%s",\n' % (self.fill,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
value = find_attr_value_('color', node)
if value is not None and 'color' not in already_processed:
already_processed.append('color')
self.color = value
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.append('number')
self.number = value
value = find_attr_value_('visible', node)
if value is not None and 'visible' not in already_processed:
already_processed.append('visible')
self.visible = value
value = find_attr_value_('active', node)
if value is not None and 'active' not in already_processed:
already_processed.append('active')
self.active = value
value = find_attr_value_('fill', node)
if value is not None and 'fill' not in already_processed:
already_processed.append('fill')
self.fill = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class layer
class classxx(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, width=None, number=None, drill=None, name=None, clearance=None):
self.width = _cast(None, width)
self.number = _cast(None, number)
self.drill = _cast(None, drill)
self.name = _cast(None, name)
self.clearance = clearance
def factory(*args_, **kwargs_):
if classxx.subclass:
return classxx.subclass(*args_, **kwargs_)
else:
return classxx(*args_, **kwargs_)
factory = staticmethod(factory)
def get_clearance(self): return self.clearance
def set_clearance(self, clearance): self.clearance = clearance
def get_width(self): return self.width
def set_width(self, width): self.width = width
def get_number(self): return self.number
def set_number(self, number): self.number = number
def get_drill(self): return self.drill
def set_drill(self, drill): self.drill = drill
def get_name(self): return self.name
def set_name(self, name): self.name = name
def export(self, outfile, level, namespace_='t:', name_='class', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='class')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, | |
n)
if hash in _dict_conjugate:
return _dict_conjugate[hash].copy()
cc = np.zeros(n, dtype = np.int)
if n > 0:
for j, k in enumerate(c):
if k >= n:
cc[n-1] += 1
elif k >= 1:
cc[k-1] += 1
s = cc[n-1]
for j in xrange(n-2, -1, -1):
s += cc[j]
cc[j] = s
_dict_conjugate[hash] = cc
return cc
# Eliminate extreme rows and columns recursively until all remaining
# rows and columns are non-extreme. Perform the matching pruning on
# supplied arrays. Provide a function that can undo this pruning
# (after sampling).
def _prune(r, c, *arrays):
r = r.copy()
c = c.copy()
arrays = list([a.copy() for a in arrays])
A = len(arrays)
unprune_ones = []
r_unprune = np.arange(len(r))
c_unprune = np.arange(len(c))
while True:
m, n = len(r), len(c)
r_0 = (r == 0)
if r_0.any():
r = r[~r_0]
for a in xrange(A):
arrays[a] = arrays[a][~r_0]
r_unprune = r_unprune[~r_0]
continue
r_n = (r == n)
if r_n.any():
r = r[~r_n]
unprune_ones.extend([(r_u,c_u)
for r_u in r_unprune[r_n]
for c_u in c_unprune])
c -= np.sum(r_n)
for a in xrange(A):
arrays[a] = arrays[a][~r_n]
r_unprune = r_unprune[~r_n]
continue
c_0 = (c == 0)
if c_0.any():
c = c[~c_0]
for a in xrange(A):
arrays[a] = arrays[a][:,~c_0]
c_unprune = c_unprune[~c_0]
continue
c_m = (c == m)
if c_m.any():
c = c[~c_m]
unprune_ones.extend([(r_u,c_u)
for r_u in r_unprune
for c_u in c_unprune[c_m]])
r -= np.sum(c_m)
for a in xrange(A):
arrays[a] = arrays[a][:,~c_m]
c_unprune = c_unprune[~c_m]
continue
break
unprune_ones = np.array(unprune_ones)
def unprune(x):
# Unpack into actual samples, logP, logQ
x_a, x_b, x_c = x
if not x_a.shape[0] == 0:
x_a[:,0] = r_unprune[x_a[:,0]]
x_a[:,1] = c_unprune[x_a[:,1]]
if unprune_ones.shape[0] == 0:
return (x_a, x_b, x_c)
else:
return (np.vstack([x_a, unprune_ones]), x_b, x_c)
# Copy (views of) arrays to put them in C-contiguous form
return r, c, [a.copy() for a in arrays], unprune
def approximate_from_margins_weights(r, c, w, T = None,
sort_by_wopt_var = True):
"""Return approximate samples from row/column-conditional binary matrices.
Return a binary matrix (or a list of binary matrices) sampled
approximately according to the specified Bernoulli weights,
conditioned on having the specified margins.
Inputs:
r: row margins, length m
c: column margins, length n
w: weight matrix, (m x n) matrix with values in (0, +infty)
T: number of matrices to sample
sort_by_wopt_var: when enabled, column ordering depends on w
Output:
B_sample_sparse: (T default) sparse representation of (m x n) binary matrix
(T >= 1) list of (sparse binary matrices, logQ, logP)
More explicitly, consider independent Bernoulli random variables
B(i,j) arranged as an m x n matrix B given the m-vector of row sums
r and the n-vector of column sums c of the sample, i.e., given that
sum(B_sample, 1) = r and sum(B_sample, 0) = c.
An error is generated if no binary matrix agrees with r and c.
B(i,j) is Bernoulli(p(i,j)) where p(i,j) = w(i,j)/(1+w(i,j)), i.e.,
w(i,j) = p(i,j)/(1-p(i,j)). [The case p(i,j) = 1 must be handled by
the user in a preprocessing step, by converting to p(i,j) = 0 and
decrementing the row and column sums appropriately.]
The sparse representation used for output is a matrix giving the
locations of the ones in the sample. If d = sum(r) = sum(c), then
B_sample_sparse has dimensions (d x 2). If something goes wrong (due
to undetected improper input), some of the rows of B_sample_sparse
may [-1,-1], indicating no entry of B_sample.
B_sample can be recovered from B_sample_sparse via:
B_sample = np.zeros((m,n), dtype=np.bool)
for i, j in B_sample_sparse:
if i == -1: break
B_sample[i,j] = 1
"""
r_prune, c_prune, arrays_prune, unprune = _prune(r, c, w)
w_prune = arrays_prune[0]
_check_margins(r_prune, c_prune)
### Preprocessing
# Sizing (making copies of m and n, as they are mutated during sampling)
r_init = r_prune.copy()
m, n = len(r_prune), len(c_prune)
if (m == 0) or (n == 0):
if T:
return [unprune([np.empty((0,2)), 0, 0]) for t in xrange(T)]
else:
return np.empty((0,0))
m_init, n_init = m, n
assert((m,n) == w_prune.shape)
# Sort the row margins (descending)
rndx_init = np.argsort(-r_prune)
rsort = r_prune[rndx_init]
# Balance the weights
a_scale, b_scale = canonical_scalings(w_prune, r_prune, c_prune)
wopt = apply_scale(w_prune, a_scale, b_scale)
# Reorder the columns
if sort_by_wopt_var:
cndx = np.lexsort((-wopt.var(0), c_prune))
else:
cndx = np.argsort(c_prune)
csort = c_prune[cndx]
wopt = wopt[:,cndx]
# Precompute log weights
logw = np.log(w_prune)
# Compute G
G = _compute_G(r_prune, m, n, wopt)
# Generate the inverse index for the row orders to facilitate fast
# sorting during the updating
irndx_init = np.argsort(rndx_init)
# Compute the conjugate of c
cconj_init = conjugate(csort, m)
# Get the running total of number of ones to assign
count_init = np.sum(rsort)
def do_sample():
sample_prune = _compute_sample(logw,
count_init, m_init, n_init,
r_init, rndx_init, irndx_init,
csort, cndx, cconj_init,
G)
return unprune(sample_prune)
if T:
return [do_sample() for t in xrange(T)]
else:
return do_sample()[0]
def approximate_conditional_nll(A, w, sort_by_wopt_var = True):
"""Return approximate row/column-conditional NLL of binary matrix.
Return the approximate nll of an observed binary matrix given
specified Bernoulli weights, conditioned on having the observed
margins.
Inputs:
A: observed data, (m x n) binary matrix
w: weight matrix, (m x n) matrix with values in (0, +infty)
Output:
ncll: negative conditional log-likelihood
"""
assert(A.shape == w.shape)
r = A.sum(1, dtype=np.int)
c = A.sum(0, dtype=np.int)
r, c, arrays, _ = _prune(r, c, A, w)
A, w = arrays
# Sizing
m, n = len(r), len(c)
if (m == 0) or (n == 0):
return 0.0
# Sort the row margins (descending)
rndx = np.argsort(-r)
rsort = r[rndx]
# Balance the weights
a_scale, b_scale = canonical_scalings(w, r, c)
wopt = apply_scale(w, a_scale, b_scale)
if np.isnan(wopt).any():
wopt = w
# Reorder the columns
if sort_by_wopt_var:
cndx = np.lexsort((-wopt.var(0), c))
else:
cndx = np.argsort(c)
csort = c[cndx]
wopt = wopt[:,cndx]
# Compute G
G = _compute_G(r, m, n, wopt)
return _compute_cnll(A, r, rsort, rndx, csort, cndx, m, n, G)
_G_pool = {}
def _compute_G(r, m, n, wopt):
logwopt = np.log(wopt)
r_max = max(1, r.max())
G_shape = (r_max+1, m, n-1)
if G_shape in _G_pool:
G = _G_pool[G_shape]
else:
G = np.empty(G_shape)
_G_pool[G_shape] = G
G[:] = -np.inf
G[0,:,:] = 0.0
G[1,:,n-2] = logwopt[:,n-1]
if c_support_loaded:
fill_G(r, r_max, m, n, wopt, logwopt, G)
else:
for i, ri in enumerate(r):
for j in xrange(n-2, 0, -1):
wij = logwopt[i,j]
for k in xrange(1, ri+1):
b = G[k-1,i,j] + wij
a = G[k,i,j]
if a == -np.inf and b == -np.inf: continue
if a > b:
G[k,i,j-1] = a + np.log(1.0 + np.exp(b-a))
else:
G[k,i,j-1] = b + np.log(1.0 + np.exp(a-b))
for j in xrange(n-1):
for k in xrange(r_max):
Gk_num = G[k,i,j]
Gk_den = G[k+1,i,j]
if np.isinf(Gk_den):
G[k,i,j] = -1.0
else:
G[k,i,j] = wopt[i,j] * np.exp(Gk_num-Gk_den) * \
((n - j - k - 1.0) / (k + 1.0))
if np.isinf(Gk_den):
G[r_max,i,j] = -1.0
return G
def _compute_cnll(A, r, rsort, rndx, csort, cndx, m, n, G):
# Generate the inverse index for the row orders to facilitate fast
# sorting during the updating
irndx = np.argsort(rndx)
# Compute the conjugate of c
cconj = conjugate(csort, m)
# Get the running total of number of ones to assign
count = np.sum(rsort)
# Initialize B_sample_sparse
B_sample_sparse = np.empty((count,2), dtype=np.int)
B_sample_sparse[:] = -1
# Initialize intermediate storage
#
# Index 0 corresponds to -1, index 1 corresponds to 0, index 2
# corresponds to 1, ..., index M-1 corresponds to c[0]+1
M = csort[-1] + 3
S = np.zeros((M,m))
SS = np.zeros(M)
if c_support_loaded:
return core_cnll(A,
count, m, n,
r, rndx, irndx, csort, cndx, cconj, G,
S, SS, B_sample_sparse)
else:
# Most recent assigned column in B_sample_sparse
place = -1
# Initialize nll
cnll = 0.0
# Loop over columns for column-wise sampling
#
# Warning: things that "should" be fixed are modified in this
# loop, e.g., n, the number of columns!
for c1 in xrange(n):
### Sample the next column
# Remember the starting point for this column in B_sample_sparse
placestart = place + 1
# Inspect column
clabel, colval = cndx[c1], csort[c1]
if count == 0: break
# Update the conjugate
cconj[0:colval] -= 1
# Update the | |
if we should use homosapiens dataset on gene dates
mindistance: int the minimal phylogenetic distance between in average in this homology to consider it
highly conserved
preserved: bool to true if we should find highly preserved genes or not
size: the average size of the datapoints in the pointcloud representation of this dataset
minpreserv: float minimal percentage of homologous species that have this homology
minsimi: float minimal avg similarity between genes to consider them highly preserved
showvar: bool to true, show the mean variance in CUB values accros this homology as a variation in dot sizes
eps: float the hyperparamter of the clustering algorithm applied to this dataset
homoset: PyCUB.homoset the homoset to use
reducer: str the reducer to use 'tsne' or 'PCA'
perplexity: int the perplexity hyperparam for tSNE
Raises:
UnboundLocalError: "you need to compute the averages of the all_homoset. use PyCUB.compute_averages(homoset)"
"""
if not homosapiens:
if homoset[-1].isrecent is None:
homoset.compute_ages(preserved=preserved, minpreserv=minpreserv, minsimi=minsimi)
else:
pass
# TODO: code the version for homo sapiens where we know exactly this distance with more data
# and better inference metrics
# display the differences between recent homologies and older ones
pdb.set_trace()
averagehomo_matrix = np.zeros((len(homoset), utils.CUBD))
for i, homo in enumerate(homoset.homo_namelist):
averagehomo_matrix[i] = homoset[homo].mean
if reducer == 'tsne':
red = man.TSNE(n_components=2, perplexity=perplexity).fit_transform(averagehomo_matrix)
elif reducer == 'pca':
red = PCA(n_components=2).fit_transform(averagehomo_matrix)
else:
raise AttributeError("wrong algorithm")
alg = cluster.DBSCAN(eps=eps, min_samples=7, algorithm='auto', n_jobs=-1)
clusters = alg.fit_predict(averagehomo_matrix).tolist()
n_clusters_ = len(set(clusters))
if n_clusters_ > 10:
print "ooups you have more than 10 clusters"
colormap = list(utils.colormap)
colors = [colormap[int(homoset[homo].ishighpreserved)] if not homoset[homo].isrecent else
utils.rgb2hex((126, 88, np.floor(156 * homoset[homo].isrecent))) for homo in homoset.homo_namelist]
data = dict(x=red[:, 0], y=red[:, 1],
homologies=homoset.homo_namelist,
meanentropy=["%.2f" % averagehomo_matrix[i].mean()
for i in range(len(averagehomo_matrix))],
color=colors,
recent=colors,
clusters=clusters,
size=[size + (varsize * self.all_homoset[homo].var.mean()) if self.all_homoset[homo].var is not None
else size for homo in self.all_homoset.homo_namelist] if showvar else size)
# add average of similar protein name
values = ["similarity_scores", "KaKs_Scores", "nans", "lenmat", "GCcount", "weight",
"protein_abundance", "mRNA_abundance", "decay_rate", "is_secreted", "cys_elements",
"tot_volume", "mean_hydrophobicity", "glucose_cost", "synthesis_steps", "isoelectricpoint", "meanecai", "meancai", "conservation"]
labe = ["show Recent/preserved", "showclusters", "show avg similarity_scores", "show avg KaKs_Scores", "show Nans avg",
"show avg Length", "show avg GCcount", "Show weight", "Show prot abundance", "Show mRNA abundance",
"Show half life", "Show secreted", "Show num of cys", "Show volume", "Show hydrophobicity", "show cost (glucose)",
"Show synthesis cost", "Show Pi", "Show ECAI", "Show CAI", "show amino Conservation"] # 21
templabe = labe[:2]
i = 2
for val in values[:5]:
if getattr(homoset[0], val) is not None:
data.update({val: np.array([getattr(homoset[homo], val).mean() for homo in homoset.homo_namelist])})
templabe.append(labe[i])
else:
templabe.append(" ")
i += 1
for val in values[5:]:
data.update({val: np.nan_to_num(np.array([getattr(homoset[homo], val) if getattr(homoset[homo], val) is not None else 0 for homo in homoset.homo_namelist]))})
templabe.append(labe[i])
i += 1
for k, v in data.iteritems():
if k == "conservation":
v = v - v.min()
elif k == "mRNA_abundance" or k == "protein_abundance":
v = np.log(1 + v)
source = ColumnDataSource(data=data)
output_notebook()
callback = CustomJS(args=dict(source=source), code=utils.callback_allhomo)
radio_button_group = widgets.RadioButtonGroup(
labels=templabe, callback=callback, active=0)
hover = HoverTool(tooltips=[("homologies: ", "@homologies"), ("avg nans: ", "@nans"), ("similarity scores: ", "@similarity_scores"),
("mRNA abundance: ", "@mRNA_abundance"), ("mean ecai: ", "@meanecai"), ("amino conservation: ", "@conservation"),
("mean_entr: ", "@meanentropy"), ("length: ", "@lengths"), ("GCcount: ", "@gc")])
p = figure(title="exploration of every homologies",
tools=[hover, WheelZoomTool(), PanTool(), SaveTool(), ResetTool()],
plot_width=800, plot_height=600)
p.circle(x='x', y='y', source=source, color='color',
size='size')
save(column(radio_button_group, p), "utils/templot/homology_compare.html")
show(column(radio_button_group, p))
def regress_on_genes(self, homoset, full=True, without=['meanecai', 'meancai'], perctrain=0.8, algo="lasso", eps=0.001, n_alphas=100):
"""
Will fit a regression curve on the CUB values of the different homologies according to the metadatas available for each of them.
It will try to see if there is enough information in the metadata to retrieve CUB values. and if there is,
how much for each metadata (if we constraint the number of regressors) is it better for entropy values, mean entropy
or ECAI values
or raw frequency, should we remove some data
Args:
without: list[str] of flags [similarity_scores, KaKs_Scores, nans, lenmat, GCcount, weight,
protein_abundance, mRNA_abundance, decay_rate, cys_elements, tot_volume, mean_hydrophobicity,
glucose_cost, synthesis_steps, is_recent, meanecai]
full: bool flags to true to use full CUB values or meanCUB values, as regressee
homoset: PyCUB.homoset the homoset to use
perctrain: the percentage of training set to total set ( the rest is used as test set)
algo: str flag to lasso or nn to use either Lasso with Cross Validation, or a 2 layer neural net
eps: the eps value for the Lasso
n_alphas: the number of alphas for the lasso
Returns:
scoregenes: float, the score of the regression performed
coeffgenes: the coefficient applied to each category (for each CUB value if using full)
attrlist: the corresponding list[str] of attribute used
Raises:
UnboundLocalError: "wrong params"
"""
params = []
dataset = np.nan_to_num(homoset.averagehomo_matrix) if full else np.nan_to_num(homoset.averagehomo_matrix).mean(1)
values = ["similarity_scores", "KaKs_Scores", "nans", "lenmat", "GCcount", "weight",
"protein_abundance", "mRNA_abundance", "decay_rate", "is_secreted", "cys_elements",
"tot_volume", "mean_hydrophobicity", "glucose_cost", "synthesis_steps",
"isoelectricpoint", "meanecai", "meancai", "conservation"]
attrlist = []
pdb.set_trace()
for val in values[:5]:
if val not in without:
if getattr(homoset[0], val) is not None:
arr = np.nan_to_num(np.array([getattr(homoset[homo], val).mean() for homo in homoset.homo_namelist])).astype(float)
arr = arr / arr.max()
if not full:
print val + ': ' + str(spearmanr(np.ma.masked_equal(arr, 0), np.ma.masked_equal(dataset,0), axis=None))
params.append(arr)
attrlist.append(val)
for val in values[5:]:
if val not in without:
if getattr(homoset[0], val) is not None:
arr = np.nan_to_num(np.array([getattr(homoset[homo], val) for homo in homoset.homo_namelist])).astype(float)
arr = arr / arr.max()
if not full:
print val + ': ' + str(spearmanr(np.ma.masked_equal(arr, 0), np.ma.masked_equal(dataset,0), axis=None))
params.append(arr)
attrlist.append(val)
if algo == "lasso":
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html
print "change"
model = MultiTaskLassoCV(eps=eps, n_alphas=n_alphas,
alphas=None, fit_intercept=True, normalize=True,
max_iter=1000, tol=0.0001, copy_X=False, cv=None,
verbose=False, n_jobs=1, random_state=None, selection='cyclic')\
if full else LassoCV(eps=eps, n_alphas=n_alphas,
alphas=None, fit_intercept=True, normalize=True, precompute='auto',
max_iter=1000, tol=0.0001, copy_X=False, cv=None, verbose=False, n_jobs=-1,
positive=False, random_state=None, selection='cyclic')
elif algo == "nn" and not full:
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html
model = MLPRegressor(hidden_layer_sizes=(len(attrlist), len(attrlist)), activation='relu', solver='adam', alpha=0.0001,
batch_size='auto', learning_rate='constant', learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001,
verbose=1, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
else:
raise UnboundLocalError("wrong params")
params = np.vstack(params).T
model.fit(params[:int(len(homoset.homo_namelist) * perctrain)], dataset[:int(len(homoset.homo_namelist) * perctrain)])
self.scoregenes = model.score(params[int(len(homoset.homo_namelist) * perctrain):],
dataset[int(len(homoset.homo_namelist) * perctrain):], sample_weight=None)
self.coeffgenes = model.coef_.tolist() if algo == "lasso" else model.coefs_
print "the R^2 score is of: " + str(self.scoregenes)
print "-------------------------------"
if model == "lasso":
for i, val in enumerate(attrlist):
print val + ": " + str(self.coeffgenes[i])
return self.scoregenes, self.coeffgenes, attrlist
def getRelation2G3DD(self, species_name='saccharomyces_cerevisiae', kingdom='fungi',
intrachromosome="utils/meta/3Dmodel/interactions_HindIII_fdr0.01_intra_cerevisiae.csv",
interchromose=["utils/meta/3Dmodel/cerevisiae_inter1.csv",
"utils/meta/3Dmodel/cerevisiae_inter2.csv",
"utils/meta/3Dmodel/cerevisiae_inter3.csv",
"utils/meta/3Dmodel/cerevisiae_inter4.csv",
"utils/meta/3Dmodel/cerevisiae_inter5.csv"], bins=2000, seq='cds', use='diament2',
euclide=False, homomean=False):
"""
https://www.nature.com/articles/ncomms6876
retrieve the data for the species sacharomyces cerevisiae and Schizosaccharomyces pombe
and find if similarity distances of CUB using entropy between genes of this species is predictive
of closeness of genes in the nucleus.
Used to confirm a work on nature and see if we can have some similar results by only looking at the
CUB
Args:
species_name: str the name of the species to look for
kingdom: str the kingdom in which to find the species
intrachromosome: str the location of the csv interaction data for intrachromosome respecting the format
of the default file
interchromose: str the location of the csv interaction data for interchromose respecting the format
of the default file
bins: int, the number of bin to use (a power of 2)
seq: the type of sequence to compare to. (to compute the CUB from)
use: str flag different types of algorithm I have made trying to understand the thing
compute: str flag to different computation available
euclidean: bool flag to true to compute euclidean instead of Endres Shcidelin metrics
"""
# get gene distance matrix from entropy value distance or Andres Schindelin metrics
# compare to see how much the distance between one can explain the distance between another by
# regression
# retrieve the data.
intra = pd.read_csv(intrachromosome, delim_whitespace=True).drop(columns=["qvalue", "freq"])
inter = pd.concat([pd.read_csv(interchro) for interchro in interchromose]).drop(columns=[
"P value", "Q value", "sequence frequency"])
# getting all the genes
torname = {"HindIII fragment": "locus1",
"HindIII fragment.1": "locus2",
"chromosome": "chr1",
"chromosome.1": "chr2"}
inter = inter.rename(torname, axis="columns")
df = pd.concat([intra, inter])
df = | |
"""Functions for builtin CherryPy tools."""
import logging
import re
from hashlib import md5
import six
from six.moves import urllib
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import httputil as _httputil
from cherrypy.lib import is_iterator
# Conditional HTTP request support #
def validate_etags(autotags=False, debug=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See :rfc:`2616` Section 14.24.
"""
response = cherrypy.serving.response
# Guard against being run twice.
if hasattr(response, 'ETag'):
return
status, reason, msg = _httputil.valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if etag:
if debug:
cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
elif not autotags:
if debug:
cherrypy.log('Autotags off', 'TOOLS.ETAGS')
elif status != 200:
if debug:
cherrypy.log('Status not 200', 'TOOLS.ETAGS')
else:
etag = response.collapse_body()
etag = '"%s"' % md5(etag).hexdigest()
if debug:
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if debug:
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
if status >= 200 and status <= 299:
request = cherrypy.serving.request
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions and not (conditions == ['*'] or etag in conditions):
raise cherrypy.HTTPError(412, 'If-Match failed: ETag %r did '
'not match %r' % (etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions == ['*'] or etag in conditions:
if debug:
cherrypy.log('request.method: %s' %
request.method, 'TOOLS.ETAGS')
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412, 'If-None-Match failed: ETag %r '
'matched %r' % (etag, conditions))
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no validation
will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = _httputil.valid_status(response.status)
request = cherrypy.serving.request
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
raise cherrypy.HTTPError(412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412)
# Tool code #
def allow(methods=None, debug=False):
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
The given methods are case-insensitive, and may be in any order.
If only one method is allowed, you may supply a single string;
if more than one, supply a list of strings.
Regardless of whether the current method is allowed or not, this
also emits an 'Allow' response header, containing the given methods.
"""
if not isinstance(methods, (tuple, list)):
methods = [methods]
methods = [m.upper() for m in methods if m]
if not methods:
methods = ['GET', 'HEAD']
elif 'GET' in methods and 'HEAD' not in methods:
methods.append('HEAD')
cherrypy.response.headers['Allow'] = ', '.join(methods)
if cherrypy.request.method not in methods:
if debug:
cherrypy.log('request.method %r not in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
raise cherrypy.HTTPError(405)
else:
if debug:
cherrypy.log('request.method %r in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
For running a CP server behind Apache, lighttpd, or other HTTP server.
For Apache and lighttpd, you should leave the 'local' argument at the
default value of 'X-Forwarded-Host'. For Squid, you probably want to set
tools.proxy.local = 'Origin'.
If you want the new request.base to include path info (not just the host),
you must explicitly set base to the full base path, and ALSO set 'local'
to '', so that the X-Forwarded-Host request header (which never includes
path info) does not override it. Regardless, the value for 'base' MUST
NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
rewritten if the header specified by the 'remote' arg is valid.
By default, 'remote' is set to 'X-Forwarded-For'. If you do not
want to rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
if scheme:
s = request.headers.get(scheme, None)
if debug:
cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
if s == 'on' and 'ssl' in scheme.lower():
# This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
scheme = 'https'
else:
# This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
scheme = s
if not scheme:
scheme = request.base[:request.base.find('://')]
if local:
lbase = request.headers.get(local, None)
if debug:
cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
if lbase is not None:
base = lbase.split(',')[0]
if not base:
default = urllib.parse.urlparse(request.base).netloc
base = request.headers.get('Host', default)
if base.find('://') == -1:
# add http:// or https:// if needed
base = scheme + '://' + base
request.base = base
if remote:
xff = request.headers.get(remote)
if debug:
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
if xff:
if remote == 'X-Forwarded-For':
# Grab the first IP in a comma-separated list. Ref #1268.
xff = next(ip.strip() for ip in xff.split(','))
request.remote.ip = xff
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
This is a useful tool for working behind certain HTTP servers;
for example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
for name in headers:
if name in request.headers:
if debug:
cherrypy.log('Ignoring request header %r' % name,
'TOOLS.IGNORE_HEADERS')
del request.headers[name]
def response_headers(headers=None, debug=False):
"""Set headers on the response."""
if debug:
cherrypy.log('Setting response headers: %s' % repr(headers),
'TOOLS.RESPONSE_HEADERS')
for name, value in (headers or []):
cherrypy.serving.response.headers[name] = value
response_headers.failsafe = True
def referer(pattern, accept=True, accept_missing=False, error=403,
message='Forbidden Referer header.', debug=False):
"""Raise HTTPError if Referer header does/does not match the given pattern.
pattern
A regular expression pattern to test against the Referer.
accept
If True, the Referer must match the pattern; if False,
the Referer must NOT match the pattern.
accept_missing
If True, permit requests with no Referer header.
error
The HTTP error code to return to the client on failure.
message
A string to include in the response body on failure.
"""
try:
ref = cherrypy.serving.request.headers['Referer']
match = bool(re.match(pattern, ref))
if debug:
cherrypy.log('Referer %r matches %r' % (ref, pattern),
'TOOLS.REFERER')
if accept == match:
return
except KeyError:
if debug:
cherrypy.log('No Referer header', 'TOOLS.REFERER')
if accept_missing:
return
raise cherrypy.HTTPError(error, message)
class SessionAuth(object):
"""Assert that the user is logged in."""
session_key = 'username'
debug = False
def check_username_and_password(self, username, password):
pass
def anonymous(self):
"""Provide a temporary user name for anonymous users."""
pass
def on_login(self, username):
pass
def on_logout(self, username):
pass
def on_check(self, username):
pass
def login_screen(self, from_page='..', username='', error_msg='',
**kwargs):
return (six.text_type("""<html><body>
Message: %(error_msg)s
<form method="post" action="do_login">
Login: <input type="text" name="username" value="%(username)s" size="10" />
<br />
Password: <input type="password" name="password" size="10" />
<br />
<input type="hidden" name="from_page" value="%(from_page)s" />
<br />
<input type="submit" />
</form>
</body></html>""") % vars()).encode('utf-8')
def do_login(self, username, password, from_page='..', **kwargs):
"""Login. May raise redirect, or return True if request handled."""
response = cherrypy.serving.response
error_msg = self.check_username_and_password(username, password)
if error_msg:
body = self.login_screen(from_page, username, error_msg)
response.body = body
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
else:
cherrypy.serving.request.login = username
cherrypy.session[self.session_key] = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or '/')
def do_logout(self, from_page='..', **kwargs):
"""Logout. May raise redirect, or return True if request handled."""
sess = | |
"""
Author: <NAME>
BSD 3-Clause License
Copyright (c) 2019, The Regents of the University of Minnesota
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
© 2019 GitHub, Inc.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import matplotlib.path as Path
import numpy as np
import global_var as glob
class Component(object):
def __init__(self, name = '', cell = '', orient=None, x=0, y=0, number = 0):
self.compName = name
self.type = cell
self.orient = orient
self.x = x
self.y = y
self.number = number
self.property = []
def set_location(self, x, y):
self.x = x
self.y = y
def set_orientation(self, orient):
self.orient = orient
def set_property(self, data):
self.property.append(data)
def set_direction(self, dir):
if dir == 'INPUT':
dir = 'I'
elif dir == 'OUTPUT':
dir = 'O'
self.direction = dir
def __repr__(self):
return self.name
class NET(object):
def __init__(self, name, number = 0):
self.name = name
self.regression_data = []
self.cell = []
self.input = []
self.output = []
self.pin = []
self.number = number
self.wire_list = []
self.rc_tree = None
self.coupling_capacitance = []
self.property = []
self._virtual = None
self.__duplicate_cell = []
def add_cell(self, cell):
self.cell = cell
def add_pin(self, l):
self.pin.append(l)
def set_virtual(self,l):
self._virtual = l
@property
def get_duplicateCell(self):
return self.__duplicate_cell
@property
def get_virtual(self):
return self._virtual
@property
def isVirtual(self):
if self._virtual:
return True
return False
@property
def hasPin(self):
if self.pin:
return True
return False
@property
def hasDuplicate(self):
if self.__duplicate_cell:
return True
return False
def add_regression_data(self, val):
self.regression_data.append(val)
def add_input(self, l):
self.input.append(l)
def add_output(self, l):
self.output.append(l)
def set_wire_list(self, l):
self.wire_list.append(l)
def get_wire_list(self):
return self.wire_list
def add_cc(self, l):
# self.coupling_capacitance.append(l)
self.coupling_capacitance = l
def set_rc_tree(self, l):
self.rc_tree = l
def set_property(self, data):
self.property.append(data)
def set_duplicate_cell(self):
x = self.input
_size = len(x)
repeated = []
for i in range(_size):
k = i + 1
for j in range(k, _size):
if x[i][0] == x[j][0] and x[i][0] not in repeated:
repeated.append(x[i][0])
if repeated:
self.__duplicate_cell = repeated
class routingPoints(object):
def __init__(self, x, y, metal_layer = None, ext = None, via_name = None):
self.x = x
self.y = y
self.extPoint = ext
self.metal_layer = metal_layer
self.number = None
self.NEXT = []
self.via_name = via_name
self.connected_to_pin = False
@property
def is_connected_to_pin(self):
return self.connected_to_pin
@property
def set_connected_to_pin(self):
self.connected_to_pin = True
def set_next(self, data):
self.NEXT.append(data)
def set_number(self, num):
self.number = num
def set_metal_layer(self, l):
self.metal_layer = l
@property
def hasExt(self):
if self.extPoint != None:
return True
return False
@property
def isVia(self):
if self.via_name != None:
return True
return False
@property
def get_extPoint(self):
return self.extPoint
def __repr__(self):
if self.hasExt:
return '( {} {} {} )'.format(self.x, self.y, self.extPoint)
return '( {} {} )'.format(self.x, self.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def isBetween(self, other, next_other):
if (int(self.y) == int(other.y) and int(self.y) == int(next_other.y)) \
and ((int(self.x) < int(other.x) and int(self.x) > int(next_other.x)) \
or (int(self.x) > int(other.x) and int(self.x) < int(next_other.x))):
return True
if (int(self.x) == int(other.x) and int(self.x) == int(next_other.x)) \
and ((int(self.y) < int(other.y) and int(self.y) > int(next_other.y)) \
or (int(self.y) > int(other.y) and int(self.y) < int(next_other.y))):
return True
return False
class Wire(object):
def __init__(self, rp = [], virtual = False, metal_layer = None):
self.routing_points = rp
self.virtual = virtual
self.metal_layer = metal_layer
self.adj = []
self.depth = None
self.branch_rp = []
def add_adj(self, new):
self.adj.append(new)
def add_rp(self, rp):
self.routing_points.append(rp)
def set_branchRP(self, rp):
self.branch_rp.append(rp)
def set_depth(self, depth):
self.depth = depth
def set_prev(self, prev):
self.prev = prev
@property
def isWire(self):
if self.isVia:
return False
return True
# Eliminating star and link the two routing points
def init(self, prev_x, prev_y):
self.eliminate_star(prev_x, prev_y)
if not self.isVia:
self.routing_points[0].next = self.routing_points[1]
@property
def isVirtual(self):
return self.virtual
@property
def isVia(self):
return False
def eliminate_star(self, prev_x, prev_y):
if not self.isVia:
if self.routing_points[1].x == '*':
self.routing_points[1].x = self.routing_points[0].x
if self.routing_points[1].y == '*':
self.routing_points[1].y = self.routing_points[0].y
if self.isVia:
if self.routing_points[0].x == '*':
self.routing_points[0].x = prev_x
if self.routing_points[0].y == '*':
self.routing_points[0].y = prev_y
def __repr__(self):
if self.isVirtual:
return 'Virtual --- {}, {}, {} {} {}'.format(self.prev,self.depth, self.metal_layer, self.routing_points[0], self.routing_points[1])
return '{}, {}, {} {} {}'.format(self.prev,self.depth, self.metal_layer, self.routing_points[0], self.routing_points[1])
# Check if the point is between two nodes
def isPartOf(self, other):
if self.isVia and not other.isVia:
if ((int(self.routing_points[0].y) < int(other.routing_points[0].y) and int(self.routing_points[0].y) > int(other.routing_points[1].y)) \
or (int(self.routing_points[0].y) > int(other.routing_points[0].y) and int(self.routing_points[0].y) < int(other.routing_points[1].y))) \
and (int(self.routing_points[0].x) == int(other.routing_points[0].x) and int(self.routing_points[0].x) == int(other.routing_points[1].x)):
# print('via x ------ {} - {}'.format(self.routing_points[0], other))
return True, None
if ((int(self.routing_points[0].x) < int(other.routing_points[0].x) and int(self.routing_points[0].x) > int(other.routing_points[1].x)) \
or (int(self.routing_points[0].x) > int(other.routing_points[0].x) and int(self.routing_points[0].x) < int(other.routing_points[1].x))) \
and (int(self.routing_points[0].y) == int(other.routing_points[0].y) and int(self.routing_points[0].y) == int(other.routing_points[1].y)):
# print('via y ------ {} - {}'.format(self.routing_points[0], other))
return True, None
elif not self.isVia and not other.isVia:
for i in range(2):
if (((int(self.routing_points[i].y) < int(other.routing_points[0].y) and int(self.routing_points[i].y) > int(other.routing_points[1].y)) \
or (int(self.routing_points[i].y) > int(other.routing_points[0].y) and int(self.routing_points[i].y) < int(other.routing_points[1].y))) \
and (int(self.routing_points[i].x) == int(other.routing_points[0].x) and int(self.routing_points[i].x) == int(other.routing_points[1].x))):
# print('y ------ {} - {}'.format(self.routing_points[i], other))
return True, i
if (int(self.routing_points[i].y) == int(other.routing_points[0].y) and int(self.routing_points[i].y) == int(other.routing_points[1].y)) \
and ((int(self.routing_points[i].x) < int(other.routing_points[0].x) and int(self.routing_points[i].x) > int(other.routing_points[1].x)) \
or (int(self.routing_points[i].x) > int(other.routing_points[0].x) and int(self.routing_points[i].x) < int(other.routing_points[1].x))):
# print('x ------ {} - {}'.format(self.routing_points[i], other))
return True, i
return False, None
class Via(Wire):
def __init__(self, name, rp = [], metal_layer = None, bottom_layer = None):
Wire.__init__(self, rp, metal_layer = metal_layer)
self.viaName = name
self.bottom_layer = bottom_layer
if self.bottom_layer == None:
if glob.TECH_LEF_DICT and self.viaName in glob.TECH_LEF_DICT['Via']:
self.set_bottom_layer(glob.TECH_LEF_DICT['Via'][self.viaName]['bottom_layer_number'])
@property
def get_viaName(self):
return self.viaName
def set_bottom_layer(self, bl):
self.bottom_layer = bl
@property
def isVia(self):
return True
def __repr__(self):
return '{}, {}, {} {} {}'.format(self.prev,self.depth, self.metal_layer, self.routing_points[0], self.viaName)
class Pin(object):
def __init__(self, number = None):
self.name = ''
self.net = ''
self.direction = ''
self.layer = ''
self.x = '0'
self.y = '0'
self.orientation = ''
self.dimension = {}
self.number = number
def set_name(self, data):
self.name = data
def set_net(self, data):
self.net = data
def set_orientation(self, data):
self.orientation = data
def set_direction(self, data):
self.direction = data
def set_layer(self, data):
self.layer = data
def set_x(self, data):
self.x = data
def set_y(self, data):
self.y = data
def set_dimension(self, data):
self.dimension = {'xl' : data[0] , 'xr' : data[2], 'yb' : data[1], 'yt' : data[3]}
def __repr__(self):
return self.net + ' ' + self.direction + ' ' + self.layer + ' ' + self.x + ' ' + self.y
class Node(Wire):
def __init__(self, rp = None):
Wire.__init__(self, rp)
self.adj = []
self.prev = None
self.depth = None
def | |
2.2 and 3.0 compat
self.assertTrue(
'File extension “” is not allowed' in res['error']['image'][0]
or 'File extension \'\' is not allowed' in res['error']['image'][0])
@override_settings(MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'))
def test_comment_image_upload_unique_bad_name(self):
utils.login(self)
img = io.BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
ext = '.gif'
image_name = '???' + ext
file = SimpleUploadedFile(
image_name, img.read(), content_type='image/gif')
response = self.client.post(
reverse('spirit:comment:image-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data={'image': file})
res = json.loads(response.content.decode('utf-8'))
self.assertTrue(res['url'].endswith(ext))
self.assertTrue(len(os.path.basename(res['url'])), len(ext) + 32) # uuid name
@override_settings(MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'))
def test_comment_image_upload_unique_dots_name(self):
utils.login(self)
img = io.BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
ext = '.gif'
image_name = '?...?...?' + ext
file = SimpleUploadedFile(
image_name, img.read(), content_type='image/gif')
response = self.client.post(
reverse('spirit:comment:image-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data={'image': file})
res = json.loads(response.content.decode('utf-8'))
self.assertTrue(res['url'].endswith(ext))
self.assertTrue(len(os.path.basename(res['url'])), len(ext) + 32) # uuid name
@override_settings(MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'))
def test_comment_image_upload_unique_hidden_name(self):
utils.login(self)
img = io.BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
ext = '.gif'
image_name = '?.h?i?d?d?e?n' + ext
file = SimpleUploadedFile(
image_name, img.read(), content_type='image/gif')
response = self.client.post(
reverse('spirit:comment:image-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data={'image': file})
res = json.loads(response.content.decode('utf-8'))
self.assertTrue(res['url'].endswith('/hidden.gif'))
def test_comment_image_upload_invalid(self):
"""
comment image upload, invalid image
"""
utils.login(self)
image = io.BytesIO(b'BAD\x02D\x01\x00;')
image.name = 'image.gif'
image.content_type = 'image/gif'
files = {'image': SimpleUploadedFile(image.name, image.read()), }
response = self.client.post(reverse('spirit:comment:image-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data=files)
res = json.loads(response.content.decode('utf-8'))
self.assertIn('error', res.keys())
self.assertIn('image', res['error'].keys())
@override_settings(
MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'),
FILE_UPLOAD_MAX_MEMORY_SIZE=2621440,
ST_PREVENT_SOME_FILE_DUPLICATION=True)
def test_comment_file_upload(self):
"""
Check (in-memory) upload files are checked
"""
utils.login(self)
# sample valid pdf - https://stackoverflow.com/a/17280876
file = io.BytesIO(
b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1'
b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000'
b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre'
b'f\n149\n%EOF\n')
files = {'file': SimpleUploadedFile('file.pdf', file.read(), content_type='application/pdf'), }
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data=files)
res = json.loads(response.content.decode('utf-8'))
file_url = os.path.join(
settings.MEDIA_URL, 'spirit', 'files', str(self.user.pk), "fadcb2389bb2b69b46bc54185de0ae91.pdf"
).replace("\\", "/")
self.assertEqual(res['url'], file_url)
file_path = os.path.join(
settings.MEDIA_ROOT, 'spirit', 'files', str(self.user.pk), "fadcb2389bb2b69b46bc54185de0ae91.pdf"
)
with open(file_path, 'rb') as fh:
file.seek(0)
self.assertEqual(fh.read(), file.read())
shutil.rmtree(settings.MEDIA_ROOT) # cleanup
@override_settings(
MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'),
FILE_UPLOAD_MAX_MEMORY_SIZE=1,
ST_PREVENT_SOME_FILE_DUPLICATION=True)
def test_comment_file_upload_tmp_file(self):
"""
Check (tmp) upload files are checked
"""
utils.login(self)
file = io.BytesIO(
b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1'
b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000'
b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre'
b'f\n149\n%EOF\n')
files = {
'file': SimpleUploadedFile(
'file_large.pdf', file.read(), content_type='application/pdf'),}
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data=files)
res = json.loads(response.content.decode('utf-8'))
file_url = os.path.join(
settings.MEDIA_URL, 'spirit', 'files', str(self.user.pk), "fadcb2389bb2b69b46bc54185de0ae91.pdf"
).replace("\\", "/")
self.assertEqual(res['url'], file_url)
file_path = os.path.join(
settings.MEDIA_ROOT, 'spirit', 'files', str(self.user.pk), "fadcb2389bb2b69b46bc54185de0ae91.pdf"
)
with open(file_path, 'rb') as fh:
file.seek(0)
self.assertEqual(fh.read(), file.read())
shutil.rmtree(settings.MEDIA_ROOT) # cleanup
@override_settings(MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'))
def test_comment_file_upload_unique(self):
user_files_parts = ('spirit', 'files', str(self.user.pk))
user_files_base = os.path.join(*user_files_parts)
user_media = os.path.join(settings.MEDIA_ROOT, user_files_base)
self.assertFalse(os.path.isdir(user_media))
utils.login(self)
pdf = io.BytesIO(
b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1'
b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000'
b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre'
b'f\n149\n%EOF\n')
file_name = 'foo.pdf'
file = SimpleUploadedFile(
file_name, pdf.read(), content_type='application/pdf')
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data={'file': file})
res = json.loads(response.content.decode('utf-8'))
self.assertTrue(os.path.isdir(user_media))
url_parts = res['url'].split('/')
self.assertEqual(
url_parts[:-2],
(settings.MEDIA_URL + '/'.join(user_files_parts)).split('/'))
self.assertEqual(len(url_parts[-2]), 32) # uuid
self.assertEqual(url_parts[-1], file_name)
self.assertEqual(len(os.listdir(user_media)), 1)
self.assertTrue(os.path.join(
user_media, os.listdir(user_media)[0], file_name))
shutil.rmtree(settings.MEDIA_ROOT) # cleanup
@override_settings(MEDIA_ROOT=os.path.join(settings.BASE_DIR, 'media_test'))
def test_comment_file_upload_unique_no_duplication(self):
utils.login(self)
pdf = io.BytesIO(
b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1'
b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000'
b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre'
b'f\n149\n%EOF\n')
file_name = 'foo.pdf'
file = SimpleUploadedFile(
file_name, pdf.read(), content_type='application/pdf')
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data={'file': file})
res = json.loads(response.content.decode('utf-8'))
first_url = res['url']
utils.cache_clear()
file.seek(0)
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data={'file': file})
res = json.loads(response.content.decode('utf-8'))
second_url = res['url']
self.assertNotEqual(first_url, second_url)
def test_comment_file_upload_invalid_ext(self):
"""
comment file upload, invalid file extension
"""
utils.login(self)
# sample valid pdf - https://stackoverflow.com/a/17280876
file = io.BytesIO(
b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1'
b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000'
b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre'
b'f\n149\n%EOF\n')
files = {'file': SimpleUploadedFile('fake.gif', file.read(), content_type='application/pdf'), }
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data=files)
res = json.loads(response.content.decode('utf-8'))
self.assertIn('error', res)
self.assertIn('file', res['error'])
self.assertEqual(
res['error']['file'],
['Unsupported file extension gif. Supported extensions are doc, docx, pdf.'])
def test_comment_file_upload_invalid_mime(self):
"""
comment file upload, invalid mime type
"""
utils.login(self)
file = io.BytesIO(b'BAD\x02D\x01\x00;')
files = {
'file': SimpleUploadedFile(
'file.pdf', file.read(), content_type='application/pdf')}
response = self.client.post(
reverse('spirit:comment:file-upload-ajax'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data=files)
res = json.loads(response.content.decode('utf-8'))
self.assertIn('error', res)
self.assertIn('file', res['error'])
self.assertEqual(
res['error']['file'],
['Unsupported file mime type application/octet-stream. '
'Supported types are application/msword, '
'application/pdf, '
'application/vnd.openxmlformats-officedocument.wordprocessingml.document.'])
class CommentModelsTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
def test_comment_increase_modified_count(self):
"""
Increase modified_count
"""
comment = utils.create_comment(topic=self.topic)
comment.increase_modified_count()
self.assertEqual(Comment.objects.get(pk=comment.pk).modified_count, 1)
def test_comment_increase_likes_count(self):
"""
Increase like_count on comment like
"""
comment = utils.create_comment(topic=self.topic)
comment.increase_likes_count()
self.assertEqual(Comment.objects.get(pk=comment.pk).likes_count, 1)
def test_comment_decrease_likes_count(self):
"""
Decrease like_count on remove comment like
"""
comment = utils.create_comment(topic=self.topic, likes_count=1)
comment.decrease_likes_count()
self.assertEqual(Comment.objects.get(pk=comment.pk).likes_count, 0)
def test_comment_create_moderation_action(self):
"""
Create comment that tells what moderation action was made
"""
Comment.create_moderation_action(user=self.user, topic=self.topic, action=1)
self.assertEqual(Comment.objects.filter(user=self.user, topic=self.topic, action=1).count(), 1)
def test_comment_get_last_for_topic(self):
"""
Should return last comment for a given topic
"""
utils.create_comment(topic=self.topic)
comment_last = utils.create_comment(topic=self.topic)
self.assertEqual(Comment.get_last_for_topic(self.topic.pk), comment_last)
class CommentTemplateTagTests(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
utils.create_comment(topic=self.topic)
utils.create_comment(topic=self.topic)
utils.create_comment(topic=self.topic)
def test_render_comments_form(self):
"""
should display simple comment form
"""
req = RequestFactory().get('/')
req.user = self.user
req_context = Context({'topic': self.topic, 'request': req})
Template(
"{% load spirit_tags %}"
"{% render_comments_form topic %}"
).render(req_context)
context = render_comments_form(req_context, self.topic)
self.assertEqual(context['next'], None)
self.assertIsInstance(context['form'], CommentForm)
self.assertEqual(context['topic_id'], self.topic.pk)
def test_get_action_text(self):
"""
should display action
"""
out = Template(
"{% load spirit_tags %}"
"{% get_comment_action_text 1 %}"
).render(Context())
self.assertNotEqual(out, "")
class CommentFormTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category)
def test_comment_create(self):
form_data = {'comment': 'foo', }
form = CommentForm(data=form_data)
self.assertEqual(form.is_valid(), True)
def test_comment_markdown(self):
form_data = {'comment': '**Spirit unicode: áéíóú** '
'<script>alert();</script>', }
form = CommentForm(data=form_data)
self.assertEqual(form.is_valid(), True)
form.user = self.user
form.topic = self.topic
comment = form.save()
self.assertEqual(comment.comment_html, '<p><strong>Spirit unicode: áéíóú</strong> '
'<script>alert();</script></p>')
def test_comment_markdown_no_follow(self):
form_data = {'comment': 'http://foo.com'}
form = CommentForm(data=form_data)
self.assertEqual(form.is_valid(), True)
form.user = self.user
form.topic = self.topic
comment = form.save()
self.assertEqual(comment.comment_html, '<p><a rel="nofollow" href="http://foo.com">http://foo.com</a></p>')
self.user.st.is_moderator = True
comment2 = form.save()
self.assertEqual(comment2.comment_html, '<p><a href="http://foo.com">http://foo.com</a></p>')
def test_comment_get_comment_hash(self):
"""
Should return the comment hash
"""
comment_txt = 'foo'
form_data = {'comment': comment_txt}
form = CommentForm(data=form_data, topic=self.topic)
self.assertTrue(form.is_valid())
comment_txt_to_hash = '{}thread-{}'.format(comment_txt, self.topic.pk)
self.assertEqual(
form.get_comment_hash(),
hashlib.md5(comment_txt_to_hash.encode('utf-8')).hexdigest())
def test_comment_get_comment_hash_from_field(self):
"""
Should return the comment hash from field
"""
comment_hash = '1' * 32
form_data = {'comment': 'foo', 'comment_hash': comment_hash}
form = CommentForm(data=form_data, topic=self.topic)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_comment_hash(), comment_hash)
def test_comments_move(self):
comment = utils.create_comment(user=self.user, topic=self.topic)
comment2 = utils.create_comment(user=self.user, topic=self.topic)
to_topic = utils.create_topic(category=self.category)
form_data = {'topic': to_topic.pk,
'comments': [comment.pk, comment2.pk], }
form = CommentMoveForm(topic=self.topic, data=form_data)
self.assertEqual(form.is_valid(), True)
self.assertEqual(form.save(), list(Comment.objects.filter(topic=to_topic)))
@override_settings(ST_PREVENT_SOME_FILE_DUPLICATION=True)
def test_comment_image_upload(self):
"""
Image upload
"""
content = (
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
img = io.BytesIO(content)
files = {'image': SimpleUploadedFile('image.gif', img.read(), content_type='image/gif'), }
form = CommentImageForm(user=self.user, data={}, files=files)
self.assertTrue(form.is_valid())
image = form.save()
self.assertEqual(image.name, "bf21c3043d749d5598366c26e7e4ab44.gif")
image_url = os.path.join(settings.MEDIA_URL, 'spirit', 'images', str(self.user.pk),
image.name).replace("\\", "/")
self.assertEqual(image.url, image_url)
image_path = os.path.join(settings.MEDIA_ROOT, 'spirit', 'images', str(self.user.pk), image.name)
self.assertTrue(os.path.isfile(image_path))
with open(image_path, "rb") as fh:
self.assertEqual(fh.read(), content)
os.remove(image_path)
def test_comment_image_upload_ext_ci(self):
"""Should allow images with mixed case extension"""
content = (
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
files = {
'image': SimpleUploadedFile(
'image.GiF', content, content_type='image/gif')}
form = CommentImageForm(user=self.user, data={}, files=files)
self.assertTrue(form.is_valid())
def test_comment_image_upload_no_extension(self):
"""
Image upload without extension should raise an error
"""
img = io.BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
files = {'image': SimpleUploadedFile('image', img.read(), content_type='image/gif'), }
form = CommentImageForm(user=self.user, data={}, files=files)
self.assertFalse(form.is_valid())
def test_comment_image_upload_not_allowed_ext(self):
"""
Image upload with good mime but not allowed extension should raise an error
"""
img = io.BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
files = {'image': SimpleUploadedFile('image.png', img.read(), content_type='image/png'), }
form = CommentImageForm(user=self.user, data={}, files=files)
self.assertFalse(form.is_valid())
@override_settings(ST_ALLOWED_UPLOAD_IMAGE_FORMAT=['png', ])
def test_comment_image_upload_not_allowed_format(self):
"""
Image upload without allowed mime but good extension should raise an error
"""
img = io.BytesIO(
b'GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00'
b'\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
# fake png extension
files = {'image': SimpleUploadedFile('image.png', img.read(), content_type='image/png'), }
form = CommentImageForm(data={}, files=files)
self.assertFalse(form.is_valid())
def test_comment_image_upload_invalid(self):
"""
Image upload with bad content but good extension should raise an error
"""
img = io.BytesIO(b'bad\x00;')
files = {'image': SimpleUploadedFile('image.gif', img.read(), content_type='image/gif'), }
form = CommentImageForm(data={}, files=files)
self.assertFalse(form.is_valid())
def test_comment_file_upload_no_libmagic(self):
"""
Magic lib | |
top genotype
top_genotype = self._to_top(i, genotype, coding, location)
# replace alleles in ped lines only if necessary
new_line[6+i*2], new_line[6+i*2+1] = top_genotype
return new_line
def _check_file_sizes(self, line):
# check genotypes size 2*mapdata (diploidy) + 6 extra columns:
if len(line) != len(self.mapdata)*2 + 6:
logger.critical(
f"SNPs sizes don't match in '{self.mapfile}' "
"and '{self.pedfile}'")
logger.critical("Please check file contents")
raise PlinkIOException(".ped line size doens't match .map size")
def _process_relationship(self, line, sample):
# create a copy of the original object
new_line = line.copy()
# add father or mather to ped line (if I can)
if str(line[2]) != '0':
if sample.father_id:
new_line[2] = sample.father_id.smarter_id
else:
logger.warning(
f"Cannot resolve relationship for father {line[2]}")
new_line[2] = '0'
if str(line[3]) != '0':
if sample.mother_id:
new_line[3] = sample.mother_id.smarter_id
else:
logger.warning(
f"Cannot resolve relationship for mother {line[3]}")
new_line[3] = '0'
return new_line
def _process_pedline(
self,
line: list,
dataset: Dataset,
coding: str,
create_samples: bool = False,
sample_field: str = "original_id"):
self._check_file_sizes(line)
logger.debug(f"Processing {line[:10]+ ['...']}")
# check for breed in database reling on fid.
try:
breed = self.get_breed(fid=line[0], dataset=dataset)
except DoesNotExist as e:
logger.error(e)
raise SmarterDBException(
f"Couldn't find breed_code '{line[0]}': {line[:10]+ ['...']}"
)
# check for sample in database
if create_samples:
sample = self.get_or_create_sample(line, dataset, breed)
else:
sample = self.get_sample(line, dataset, sample_field)
# if I couldn't find a registered sample (in such case)
# i can skip such record
if not sample:
return None
# a new line obj
new_line = line.copy()
# updating ped line with smarter ids
new_line[0] = breed.code
new_line[1] = sample.smarter_id
# replace relationship if possible
new_line = self._process_relationship(new_line, sample)
# check and fix genotypes if necessary
new_line = self._process_genotypes(new_line, coding)
# need to remove filtered snps from ped line
for index in sorted(self.filtered, reverse=True):
# index is snp position. Need to delete two fields
del new_line[6+index*2+1]
del new_line[6+index*2]
return new_line
def update_pedfile(
self,
outputfile: str,
dataset: Dataset,
coding: str,
create_samples: bool = False,
sample_field: str = "original_id",
*args,
**kwargs):
"""
Write a new pedfile relying on illumina_top genotypes and coordinates
stored in smarter database
Args:
outputfile (str): write ped to this path (overwrite if exists)
dataset (Dataset): the dataset we are converting
coding (str): the source coding (could be 'top', 'ab', 'forward')
create_samples (bool): create samples if not exist (useful to
create samples directly from ped file)
sample_field (str): search samples using this attribute (def.
'original_id')
"""
with open(outputfile, "w") as target:
writer = csv.writer(
target, delimiter=' ', lineterminator="\n")
processed = 0
for line in self.read_genotype_method(
dataset=dataset, *args, **kwargs):
# covert the ped line with the desidered format
new_line = self._process_pedline(
line, dataset, coding, create_samples, sample_field)
if new_line:
# write updated line into updated ped file
logger.info(
f"Writing: {new_line[:10]+ ['...']} "
f"({int((len(new_line)-6)/2)} SNPs)")
writer.writerow(new_line)
processed += 1
else:
logger.warning(
f"Skipping: {line[:10]+ ['...']} "
f"({int((len(line)-6)/2)} SNPs)"
)
logger.info(f"Processed {processed} individuals")
# output file block
# input file block
class TextPlinkIO(SmarterMixin):
mapfile = None
pedfile = None
def __init__(
self,
prefix: str = None,
mapfile: str = None,
pedfile: str = None,
species: str = None,
chip_name: str = None):
# need to be set in order to write a genotype
self.read_genotype_method = self.read_pedfile
if prefix:
self.mapfile = prefix + ".map"
self.pedfile = prefix + ".ped"
elif mapfile or pedfile:
self.mapfile = mapfile
self.pedfile = pedfile
if species:
self.species = species
if chip_name:
self.chip_name = chip_name
def read_mapfile(self):
"""Read map data and track informations in memory. Useful to process
data files"""
with open(self.mapfile) as handle:
reader = get_reader(handle)
self.mapdata = [MapRecord(*record) for record in reader]
def read_pedfile(self, *args, **kwargs):
"""Open pedfile for reading return iterator"""
with open(self.pedfile) as handle:
reader = get_reader(handle)
for line in reader:
yield line
# a new class for affymetrix plink files, which are slightly different from
# plink text files
class AffyPlinkIO(TextPlinkIO):
def read_mapfile(self):
"""Read map data and track informations in memory. Useful to process
data files"""
self.mapdata = []
with open(self.mapfile) as handle:
# affy files has both " " and "\t" in their files
for line in handle:
record = re.split('[ \t]+', line.strip())
# affy data may have comments in files
if not record[0].startswith("#"):
self.mapdata.append(MapRecord(*record))
def get_breed(self, fid, *args, **kwargs):
"""Override the default get_breed method"""
breed = Breed.objects(code=fid, species=self.species).get()
logger.debug(f"Found breed {breed}")
return breed
def read_pedfile(self, fid: str, *args, **kwargs):
"""Open pedfile for reading return iterator"""
with open(self.pedfile) as handle:
# affy files has both " " and "\t" in their files
for record in handle:
# affy data may have comments in files
if record.startswith("#"):
logger.info(f"Skipping {record}")
continue
line = re.split('[ \t]+', record.strip())
# affy ped lacks of plink columns. add such value to line
line.insert(0, fid) # FID
line.insert(2, '0') # father
line.insert(3, '0') # mother
line.insert(4, '0') # SEX
line.insert(5, -9) # phenotype
yield line
class BinaryPlinkIO(SmarterMixin):
plink_file = None
_prefix = None
def __init__(
self,
prefix: str = None,
species: str = None,
chip_name: str = None):
# need to be set in order to write a genotype
self.read_genotype_method = self.read_pedfile
if prefix:
self.prefix = prefix
if species:
self.species = species
if chip_name:
self.chip_name = chip_name
@property
def prefix(self):
return self._prefix
@prefix.setter
def prefix(self, prefix: str):
self._prefix = prefix
self.plink_file = plinkfile.open(self._prefix)
def read_mapfile(self):
"""Read map data and track informations in memory. Useful to process
data files"""
self.mapdata = list()
for locus in self.plink_file.get_loci():
record = MapRecord(
chrom=locus.chromosome,
name=locus.name,
position=locus.bp_position,
cm=locus.position
)
self.mapdata.append(record)
def read_pedfile(self, *args, **kwargs):
"""Open pedfile for reading return iterator"""
sample_list = self.plink_file.get_samples()
locus_list = self.plink_file.get_loci()
snp_arrays = list(self.plink_file)
def format_sex(value):
if value in [1, 2]:
return str(value)
else:
return "0"
def convert(genotype, locus):
# in binary format, allele2 is REF allele1 ALT
if genotype == 0:
return locus.allele1, locus.allele1
elif genotype == 1:
return locus.allele2, locus.allele1
elif genotype == 2:
return locus.allele2, locus.allele2
elif genotype == 3:
return "0", "0"
else:
raise CodingException("Genotype %s Not supported" % genotype)
# determine genotype length
size = 6 + 2*len(self.mapdata)
for sample_idx, sample in enumerate(sample_list):
# this will be the returned row
line = ["0"] * size
# set values. I need to set a breed code in order to get a
# proper ped line
line[0:6] = [
sample.fid,
sample.iid,
sample.father_iid,
sample.mother_iid,
format_sex(sample.sex),
int(sample.phenotype)
]
for idx, locus in enumerate(locus_list):
genotype = snp_arrays[idx][sample_idx]
line[6+idx*2], line[6+idx*2+1] = convert(genotype, locus)
yield line
class IlluminaReportIO(SmarterMixin):
snpfile = None
report = None
def __init__(
self,
snpfile: str = None,
report: str = None,
species: str = None,
chip_name: str = None):
# need to be set in order to write a genotype
self.read_genotype_method = self.read_reportfile
if snpfile or report:
self.snpfile = snpfile
self.report = report
if species:
self.species = species
if chip_name:
self.chip_name = chip_name
def get_breed(self, fid, *args, **kwargs):
"""Override the default get_breed method"""
breed = Breed.objects(code=fid, species=self.species).get()
logger.debug(f"Found breed {breed}")
return breed
def read_snpfile(self):
"""Read snp data and track informations in memory. Useful to process
data files"""
self.mapdata = list(read_snpList(self.snpfile))
# this will be called when calling read_genotype_method()
def read_reportfile(
self, fid: str = None, dataset: Dataset = None, *args, **kwargs):
"""Open illumina report returns iterator"""
# determine genotype length
size = 6 + 2*len(self.mapdata)
# track sample
last_sample = None
# need to have snp indexes
indexes = [record.name for record in self.mapdata]
# this will be the returned row
line = list()
# this is the snp position index
idx = 0
# tray to returns something like a ped row
for row in read_illuminaRow(self.report):
if row.sample_id != last_sample:
logger.debug(f"Reading sample {row.sample_id}")
# this is not returned if I'm processing the first sample
if last_sample:
yield line
# initialize an empty array
line = ["0"] * size
logger.debug(f"Searching fid for sample '{row.sample_id}'")
# determine fid from sample, if not received as argument
if not fid:
sample = self.SampleSpecies.objects.get(
original_id=row.sample_id,
dataset=dataset
)
breed = sample.breed_code
logger.debug(f"Found breed {breed} from {row.sample_id}")
else:
breed = fid
# set values. I need to set a breed code in order to get a
# proper ped line
| |
22.5056461813379*m.x3335
+ 22.1617279110058*m.x3336 + 29.6585391439873*m.x3337 + 22.8299755509779*m.x3338
+ 20.8671488680664*m.x3339 + 10.1010293848718*m.x3340 + 23.9887690242752*m.x3341
+ 26.3362278937491*m.x3342 + 13.5190583563331*m.x3343 + 26.6306210689289*m.x3344
+ 17.350679114*m.x3345 + 27.7508454104506*m.x3346 + 25.0650799946707*m.x3347
+ 24.8444490192152*m.x3348 + 22.5040069726872*m.x3349 + 14.8109043005821*m.x3350
+ 18.4070940602271*m.x3351 + 19.515433516695*m.x3352 + 28.8035500199335*m.x3353
+ 23.528099956958*m.x3354 + 16.9591287816962*m.x3355 + 25.7325488621728*m.x3356
+ 10.8194728284224*m.x3357 + 17.5949412834287*m.x3358 + 21.4048695577266*m.x3359
+ 21.8525765124647*m.x3360 + 4.91156940862162*m.x3361 + 16.4268462434451*m.x3362
+ 18.6064374562404*m.x3363 + 19.1190313770704*m.x3364 + 20.3457030523758*m.x3365
+ 15.6514798534555*m.x3366 + 30.9156994554431*m.x3367 + 11.8311868347604*m.x3368
+ 19.2302970632256*m.x3369 + 20.624390272055*m.x3370 + 16.2136992910487*m.x3371
+ 30.6957095826052*m.x3372 + 28.9210430969865*m.x3373 + 15.4301175127009*m.x3374
+ 23.5839516949133*m.x3375 + 8.69991519162406*m.x3376 + 35.0030966184198*m.x3377
+ 15.9715164698399*m.x3378 + 23.0364765711615*m.x3379 + 12.0795324056314*m.x3380
+ 9.4121453559878*m.x3381 + 24.2473590364858*m.x3382 + 31.0457410130886*m.x3383
+ 25.3463765735948*m.x3384 + 20.2692154766496*m.x3385 + 22.2962692266436*m.x3386
+ 25.3746013519753*m.x3387 + 7.26129268801056*m.x3388 + 6.21001770347525*m.x3389
+ 14.6802967194087*m.x3390 + 19.0916351385667*m.x3391 + 29.9719897998316*m.x3392
+ 9.19756293331044*m.x3393 + 9.86753141470196*m.x3394 + 31.0841176404303*m.x3395
+ 27.5908336352887*m.x3396 + 34.6681451076726*m.x3397 + 36.5809454861915*m.x3398
+ 24.9138529518998*m.x3399 + 14.8652900760805*m.x3400 + 11.8831888763743*m.x3401
+ 19.0998340958833*m.x3402 + 30.5860785747221*m.x3403 + 15.7522801518445*m.x3404
+ 21.949913898082*m.x3405 + 7.62284633541492*m.x3406 + 37.3434751042506*m.x3407
+ 27.3725638997979*m.x3408 + 32.5907338102533*m.x3409 + 32.0460511309237*m.x3410
+ 28.6296126804323*m.x3411 + 8.71781776930555*m.x3412 + 6.34193522552392*m.x3413
+ 7.51572798635554*m.x3414 + 28.563351268364*m.x3415 + 34.9138117333246*m.x3416
+ 27.1584625442265*m.x3417 + 16.6487515517386*m.x3418 + 24.8952516090068*m.x3419
+ 17.6425930111463*m.x3420 + 16.9187025233669*m.x3421 + 12.6872705464025*m.x3422
+ 17.4275117710324*m.x3423 + 23.3346718383699*m.x3424 + 12.2794303602461*m.x3425
+ 30.4623390000167*m.x3426 + 12.5548568902996*m.x3427 + 23.8220921807891*m.x3428
+ 7.66599328785797*m.x3429 + 25.0155424242791*m.x3430 + 20.7756469883104*m.x3431
+ 24.5402584205053*m.x3432 + 27.8442460605185*m.x3433 + 25.289499220713*m.x3434
+ 14.0150583650048*m.x3435 + 25.2758626891172*m.x3436 + 17.1430490234524*m.x3437
+ 8.19913167543617*m.x3438 + 27.9363634685213*m.x3439 + 19.7190616467888*m.x3440
+ 6.8523437378341*m.x3441 + 32.9803518363597*m.x3442 + 29.8032504143226*m.x3443
+ 30.9030492690411*m.x3444 + 24.1578155453594*m.x3445 + 10.8477523930841*m.x3446
+ 25.0416779305109*m.x3447 + 16.8880417505085*m.x3448 + 6.34940850630375*m.x3449
+ 26.410350584247*m.x3450 + 28.8921522468799*m.x3451 + 38.1906595080285*m.x3452
+ 21.5501013142097*m.x3453 + 12.3649859938775*m.x3454 + 24.3609772182176*m.x3455
+ 14.2411018614193*m.x3456 + 15.4392298050887*m.x3457 + 34.4880194583339*m.x3458
+ 34.0721275232163*m.x3459 + 0.549432931496268*m.x3460 + 26.2443556181775*m.x3461
+ 27.5213255960692*m.x3462 + 22.7121218111638*m.x3463 + 13.453880084141*m.x3464
+ 13.1501458631542*m.x3465 + 18.0296358239836*m.x3466 + 24.8792981636705*m.x3467
+ 11.7578815026441*m.x3468 + 16.301677502193*m.x3469 + 16.9813207408356*m.x3470
+ 3.7042643041658*m.x3471 + 17.8785548019763*m.x3472 + 16.8951361733371*m.x3473
+ 14.7005138194311*m.x3474 + 23.23811718677*m.x3475 + 33.3842353537195*m.x3476
+ 32.2028887129766*m.x3477 + 28.4993033970582*m.x3478 + 25.9908594070708*m.x3479
+ 29.3006723671216*m.x3480 + 9.08028997796549*m.x3481 + 31.4820307604215*m.x3482
+ 13.1626420936248*m.x3483 + 28.233634600167*m.x3484 + 24.5674339941447*m.x3485
+ 26.1822073824204*m.x3486 + 28.3400602520795*m.x3487 + 30.7352585532857*m.x3488
+ 15.4108371290511*m.x3489 + 4.25653550788204*m.x3490 + 29.7815471500944*m.x3491
+ 17.7343371435022*m.x3492 + 16.267406680963*m.x3493 + 17.6934472375878*m.x3494
+ 10.7396848717272*m.x3495 + 29.2033245626832*m.x3496 + 15.9248959280462*m.x3497
+ 26.7413176995901*m.x3498 + 30.2854495637487*m.x3499 + 5.81439496467003*m.x3500
+ 11.3218868740856*m.x3501 + 17.3505559586962*m.x3502 + 21.0557519636491*m.x3503
+ 19.1639427958036*m.x3504 + 8.42465011934031*m.x3505 + 16.7865763872667*m.x3506
+ 19.9707070088884*m.x3507 + 21.1346394143163*m.x3508 + 27.7529251617661*m.x3509
+ 13.5261251704117*m.x3510 + 8.68347967382187*m.x3511 + 24.0721709162968*m.x3512
+ 27.6688085833399*m.x3513 + 18.6657907890105*m.x3514 + 22.8759347832117*m.x3515
+ 7.86508155429802*m.x3516 + 23.9959316932204*m.x3517 + 8.72641584950155*m.x3518
+ 22.6091995449525*m.x3519 + 11.6285646729234*m.x3520 + 13.1914696516261*m.x3521
+ 25.4968742335054*m.x3522 + 28.2387558517034*m.x3523 + 17.6732719386708*m.x3524
+ 31.1762428146141*m.x3525 + 12.0091646814581*m.x3526 + 33.7233500171578*m.x3527
+ 14.2037442433073*m.x3528 + 16.1070726471026*m.x3529 + 4.05430718347128*m.x3530
+ 4.79433358005691*m.x3531 + 22.7167740520233*m.x3532 + 22.8424411919431*m.x3533
+ 31.0079181329726*m.x3534 + 20.28128077279*m.x3535 + 25.1096829850538*m.x3536
+ 26.1150099645593*m.x3537 + 8.85523346434398*m.x3538 + 5.73520775254775*m.x3539
+ 20.1522650300462*m.x3540 + 17.8357911475779*m.x3541 + 21.0002355570145*m.x3542
+ 15.5725070832838*m.x3543 + 11.9039745640208*m.x3544 + 24.2568611337342*m.x3545
+ 24.3171036604083*m.x3546 + 25.8904648706625*m.x3547 + 27.476697460827*m.x3548
+ 31.9169034340419*m.x3549 + 17.8142743390946*m.x3550 + 17.7801259632221*m.x3551
+ 18.924690905095*m.x3552 + 21.4321803346852*m.x3553 + 6.64022933814346*m.x3554
+ 24.5835851056506*m.x3555 + 16.746001505209*m.x3556 + 34.8220256019384*m.x3557
+ 29.8899629508361*m.x3558 + 23.4912395224212*m.x3559 + 26.2935197485145*m.x3560
+ 27.6445663142823*m.x3561 + 17.3124278653281*m.x3562 + 15.3065053063277*m.x3563
+ 9.526217695814*m.x3564 + 19.433288681438*m.x3565 + 30.3456980027586*m.x3566
+ 18.0047048566731*m.x3567 + 24.3997759343051*m.x3568 + 25.8335565199958*m.x3569
+ 10.6639559848468*m.x3570 + 23.8018287277918*m.x3571 + 12.1274016997383*m.x3572
+ 8.36935957706628*m.x3573 + 28.6858628339174*m.x3574 + 13.6527316157525*m.x3575
+ 21.3556452174014*m.x3576 + 20.0426116639944*m.x3577 + 17.1074086280618*m.x3578
+ 16.295892573818*m.x3579 + 19.137691228924*m.x3580 + 23.442041163355*m.x3581
+ 19.2271120641258*m.x3582 + 27.0632817084064*m.x3583 + 19.1995328820521*m.x3584
+ 5.68564686610468*m.x3585 + 32.9962899282973*m.x3586 + 13.690455516746*m.x3587
+ 10.2953969263319*m.x3588 + 29.374462681311*m.x3589 + 28.5980845091457*m.x3590
+ 13.6305409562287*m.x3591 + 23.8270546882396*m.x3592 + 30.6566536691518*m.x3593
+ 28.1085817293928*m.x3594 + 16.5658387410502*m.x3595 + 19.7607436620868*m.x3596
+ 28.8958032515267*m.x3597 + 23.6063777629438*m.x3598 + 15.4027459189989*m.x3599
+ 17.2559919729644*m.x3600 + 29.2693237058077*m.x3601 + 35.6359147915263*m.x3602
+ 25.6025762935972*m.x3603 + 18.0898657326687*m.x3604 + 31.6544258111538*m.x3605
+ 19.6584701140468*m.x3606 + 9.69889600008519*m.x3607 + 25.4608131413479*m.x3608
+ 30.3656948447349*m.x3609 + 9.69969304575302*m.x3610 + 34.3741739524852*m.x3611
+ 19.5795614828115*m.x3612 + 19.2946035353923*m.x3613 + 4.35659928452086*m.x3614
+ 17.1834946918421*m.x3615 + 25.2359054569864*m.x3616 + 18.6213941172461*m.x3617
+ 7.15310319245113*m.x3618 + 9.30414905638241*m.x3619 + 9.84822507253758*m.x3620
+ 22.7494603749982*m.x3621 + 33.6852683776349*m.x3622 + 10.7206878674227*m.x3623
+ 7.47600939076093*m.x3624 + 33.3982195332002*m.x3625 + 43.0036002356241*m.x3626
+ 42.4260736915939*m.x3627 + 39.1230792688528*m.x3628 + 4.26756894852125*m.x3629
+ 48.479617430757*m.x3630 + 30.4861249397169*m.x3631 + 42.7581929880471*m.x3632
+ 8.58565951644984*m.x3633 + 44.9573886426077*m.x3634 + 42.8283167485547*m.x3635
+ 45.9589959082964*m.x3636 + 41.7656621483819*m.x3637 + 52.4283505874577*m.x3638
+ 15.2408075310033*m.x3639 + 23.6201721610966*m.x3640 + 50.5881339040157*m.x3641
+ 15.8997114999657*m.x3642 + 36.308644393856*m.x3643 + 13.0735825795562*m.x3644
+ 14.6837681243094*m.x3645 + 46.2164666339357*m.x3646 + 10.0798985546155*m.x3647
+ 44.5527848694283*m.x3648 + 51.9566589948571*m.x3649 + 17.5665557380742*m.x3650
+ 13.3084779178034*m.x3651 + 22.4602902190764*m.x3652 + 6.25547396604378*m.x3653
+ 17.8888852893371*m.x3654 + 13.4674364640721*m.x3655 + 13.0295823133506*m.x3656
+ 40.8810938051653*m.x3657 + 33.5957458732743*m.x3658 + 48.9277566621566*m.x3659
+ 8.73461643032597*m.x3660 + 30.3097233602182*m.x3661 + 45.7502943103479*m.x3662
+ 48.9814439401171*m.x3663 + 25.9777916179306*m.x3664 + 33.121456475341*m.x3665
+ 14.9547819196004*m.x3666 + 26.0305811334134*m.x3667 + 21.3025044105538*m.x3668
+ 42.2298318215336*m.x3669 + 14.0609153883086*m.x3670 + 29.0804754088057*m.x3671
+ 32.0490346146365*m.x3672 + 42.6386161352802*m.x3673 + 29.7752431659234*m.x3674
+ 52.7962583467669*m.x3675 + 28.5290786203658*m.x3676 + 46.2518837906597*m.x3677
+ 22.7029866490238*m.x3678 + 22.7291324363998*m.x3679 + 18.3449200536791*m.x3680
+ 21.4510644068783*m.x3681 + 37.1130590410124*m.x3682 + 19.7894595671626*m.x3683
+ 51.6889835750601*m.x3684 + 37.2170092078145*m.x3685 + 43.9948298809083*m.x3686
+ 31.9267477996151*m.x3687 + 29.940357705045*m.x3688 + 24.8199402366415*m.x3689
+ 35.6124200355143*m.x3690 + 33.9994755709227*m.x3691 + 3.0162126513931*m.x3692
+ 37.1471524445552*m.x3693 + 27.2489325179065*m.x3694 + 26.4993252995858*m.x3695
+ 35.4213854987766*m.x3696 + 4.28465706125627*m.x3697 + 8.48148372130015*m.x3698
+ 53.3160374104376*m.x3699 + 30.814178891957*m.x3700 + 39.1351886395665*m.x3701
+ 26.5592303757182*m.x3702 + 8.37108725327391*m.x3703 + 16.3235126957316*m.x3704
+ 43.370420825735*m.x3705 + 37.9848776545933*m.x3706 + 44.9225683477235*m.x3707
+ 47.9881037642339*m.x3708 + 5.851541459594*m.x3709 + 31.0802615061477*m.x3710
+ 41.7143518145204*m.x3711 + 38.9536183126907*m.x3712 + 36.7590087399383*m.x3713
+ 30.5837836853177*m.x3714 + 6.33114804993233*m.x3715 + 36.9803131405976*m.x3716
+ 8.02468469716644*m.x3717 + 46.0928773704544*m.x3718 + 42.7326380533529*m.x3719
+ 14.0051410800199*m.x3720 + 40.408202399332*m.x3721 + 30.9411480916703*m.x3722
+ 15.4664548013192*m.x3723 + 49.2564855583797*m.x3724 + 27.0210726287227*m.x3725
+ 5.48294117842575*m.x3726 + 41.7265993473991*m.x3727 + 23.6615462476538*m.x3728
+ 36.3279882487886*m.x3729 + 26.7428411625848*m.x3730 + 33.7015740014783*m.x3731
+ 15.324364285544*m.x3732 + 41.5440441499104*m.x3733 + 26.2203664692194*m.x3734
+ 19.621086188861*m.x3735 + 54.6408861709285*m.x3736 + 28.7647117473386*m.x3737
+ 26.89630705857*m.x3738 + 46.3495047693367*m.x3739 + 50.1375940343381*m.x3740
+ 35.3202837410005*m.x3741 + 9.18687475338085*m.x3742 + 46.7573701143097*m.x3743
+ 39.0753455509701*m.x3744 + 8.47614447349492*m.x3745 + 39.951691468588*m.x3746
+ 48.3324472797214*m.x3747 + 40.003997713904*m.x3748 + 36.1294256909455*m.x3749
+ 8.41975863751315*m.x3750 + 44.9528545965472*m.x3751 + 45.5298511696328*m.x3752
+ 37.4501291135854*m.x3753 + 39.3710167950552*m.x3754 + 53.1735380037326*m.x3755
+ 40.7301360827694*m.x3756 + 24.0486856091001*m.x3757 + 13.7720244019257*m.x3758
+ 38.9637527505718*m.x3759 + 30.9325929199138*m.x3760 + 56.0909399349371*m.x3761
+ 5.57914735649643*m.x3762 + 31.8815687247226*m.x3763 + 17.4562121924215*m.x3764
+ 31.8533254635752*m.x3765 + 46.7999409682991*m.x3766 + 25.5161740129798*m.x3767
+ 25.1046539706632*m.x3768 + 14.9946040653314*m.x3769 + 14.3258347960474*m.x3770
+ 6.57517272117975*m.x3771 + 13.5792848450451*m.x3772 + 11.2945388099441*m.x3773
+ 12.8485605731825*m.x3774 + 15.6070656369817*m.x3775 + 42.5772156105316*m.x3776
+ 41.434187364127*m.x3777 + 37.7222241926287*m.x3778 + 24.0485603260402*m.x3779
+ 37.6038952035594*m.x3780 + 16.7063103227872*m.x3781 + 40.7770320634138*m.x3782
+ 13.2992983689393*m.x3783 + 37.3098119641702*m.x3784 + 33.3539352131246*m.x3785
+ 34.2535936194114*m.x3786 + 37.6818045357024*m.x3787 + 35.839309519507*m.x3788
+ 7.7827151348928*m.x3789 + 13.5983151587161*m.x3790 + 36.885329096502*m.x3791
+ 23.1796826859922*m.x3792 + 24.6119598041144*m.x3793 + 22.0378908387482*m.x3794
+ 5.49731854289179*m.x3795 + 38.208161745865*m.x3796 + 18.9598697083716*m.x3797
+ 35.6050969654423*m.x3798 + 35.5544685741938*m.x3799 + 12.3381365880621*m.x3800
+ 6.86763247458734*m.x3801 + 8.01275671651737*m.x3802 + 16.8645985306536*m.x3803
+ 10.5505667493393*m.x3804 + 8.78715048492048*m.x3805 + 21.1916683726736*m.x3806
+ 21.9457187746669*m.x3807 + 14.5411094055133*m.x3808 + 34.4329756879065*m.x3809
+ 11.6588583738777*m.x3810 + 15.788174919918*m.x3811 + 29.5377164102035*m.x3812
+ 30.2765273075373*m.x3813 + 9.70365925831154*m.x3814 + 15.2590823087148*m.x3815
+ 6.47691515763238*m.x3816 + 31.4949329359085*m.x3817 + 1.34795204696829*m.x3818
+ 30.8971180149345*m.x3819 + 16.5667301478534*m.x3820 + 22.5311414765686*m.x3821
+ 34.0888895902526*m.x3822 + 37.5586322582897*m.x3823 + 10.616658561017*m.x3824
+ 36.6724967003994*m.x3825 + 8.4184995109031*m.x3826 + 43.0645017491814*m.x3827
+ 5.07017435675254*m.x3828 + 24.0331627461742*m.x3829 + 6.60641839394541*m.x3830
+ 4.77178989418264*m.x3831 + 32.0571948688543*m.x3832 + 28.7485443417744*m.x3833
+ 38.2107792492048*m.x3834 + 29.4665767906883*m.x3835 + 33.6589588828245*m.x3836
+ 17.2748785324446*m.x3837 + 16.9866480042968*m.x3838 + 7.01272779638755*m.x3839
+ 15.6142513683706*m.x3840 + 27.1423932347946*m.x3841 + 20.7913858050931*m.x3842
+ 22.0441562725308*m.x3843 + 7.07520176331396*m.x3844 + 31.8285771860143*m.x3845
+ 33.5211790917715*m.x3846 + 24.3998079042467*m.x3847 + 27.86463041645*m.x3848
+ 38.041111828154*m.x3849 + 11.3389612036966*m.x3850 + 24.6374929764805*m.x3851
+ 10.065258683386*m.x3852 + 23.4294454047108*m.x3853 + 12.198109926024*m.x3854
+ 33.1873338743701*m.x3855 + 19.6848732548033*m.x3856 + 44.0672237375261*m.x3857
+ 38.6021382335599*m.x3858 + 24.0678491855856*m.x3859 + 34.5741871954405*m.x3860
+ 36.9782405118457*m.x3861 + 21.6274109062513*m.x3862 + 18.995660872567*m.x3863
+ 17.6048791301599*m.x3864 + 20.7382012517937*m.x3865 + 39.1096227214245*m.x3866
+ 20.0933120956469*m.x3867 + 29.7424201156292*m.x3868 + 34.918636054136*m.x3869
+ 6.17840159648312*m.x3870 + 20.3034823652046*m.x3871 + 21.2117619913827*m.x3872
+ 13.7389547384664*m.x3873 + 36.0704080930922*m.x3874 + 7.1064184063942*m.x3875
+ 22.174983191966*m.x3876 + 25.6835366670731*m.x3877 + 25.1348777530842*m.x3878
+ 16.9129237931977*m.x3879 + 27.5954456953517*m.x3880 + 15.8669644548059*m.x3881
+ 11.4114529103801*m.x3882 + 36.3855810923929*m.x3883 + 27.5337659738113*m.x3884
+ 13.6597576129269*m.x3885 + 38.3278563482037*m.x3886 + 23.0056702585591*m.x3887
+ 6.93398165103871*m.x3888 + 38.3842610281419*m.x3889 + 31.889985313594*m.x3890
+ 19.7440704025177*m.x3891 + 25.69099178716*m.x3892 + 39.7994469427552*m.x3893
+ 37.3542006362702*m.x3894 + 12.3624224787753*m.x3895 + 20.4511973888054*m.x3896
+ 37.0794870282006*m.x3897 + 19.9240728633543*m.x3898 + 17.3018908096017*m.x3899
+ 19.5096084507167*m.x3900 + 38.4821293798511*m.x3901 + 44.8723898645978*m.x3902
+ 18.9056578203709*m.x3903 + 25.0647130284965*m.x3904 + 37.4841881306931*m.x3905
+ 26.8622112957725*m.x3906 + 18.7414096806469*m.x3907 + 28.7047648870619*m.x3908
+ 39.4078507842521*m.x3909 + 13.4691445938068*m.x3910 + 39.1233150267918*m.x3911
+ 15.9284866858513*m.x3912 + 28.5374124831488*m.x3913 + 9.47207022867339*m.x3914
+ 11.9001251421352*m.x3915 + 31.1574311780089*m.x3916 + 26.8798668380829*m.x3917
+ 16.4928507097685*m.x3918 + 5.39160715147705*m.x3919 + 5.98037971893693*m.x3920
+ 10.0071207980333*m.x3921 + 13.0282951627243*m.x3922 + 10.9927748440217*m.x3923
+ 14.1814606437418*m.x3924 + 12.9731647300938*m.x3925 + 46.1999441621018*m.x3926
+ 45.0607373774868*m.x3927 + 41.3483802004327*m.x3928 + 24.5845517737172*m.x3929
+ 40.8071540488742*m.x3930 + 19.9306547586983*m.x3931 + 44.4060936893915*m.x3932
+ 15.2905790873656*m.x3933 + 40.7876428283932*m.x3934 + 36.7347934331285*m.x3935
+ 37.395116239815*m.x3936 + 41.2941055776351*m.x3937 + 37.9942334599268*m.x3938
+ 6.58315825473924*m.x3939 + 17.2150382768716*m.x3940 + 39.6946024956007*m.x3941
+ 26.1080434099102*m.x3942 + 27.8989134838959*m.x3943 + 24.7230686297219*m.x3944
+ 6.93166632597869*m.x3945 + 41.6573145107916*m.x3946 + 21.4101984089589*m.x3947
+ 39.0072169159484*m.x3948 + 37.76850018864*m.x3949 + 15.7739425487876*m.x3950
+ 8.21741809407346*m.x3951 + 4.39269845305387*m.x3952 + 16.7721406269259*m.x3953
+ 7.941120310534*m.x3954 + 11.4704457006814*m.x3955 + 23.9163832827278*m.x3956
+ 23.3804985866274*m.x3957 + 12.6060681086767*m.x3958 + 37.1244785089003*m.x3959
+ 13.1391051301552*m.x3960 + 18.9289695517224*m.x3961 + 31.9029983877523*m.x3962
+ 31.6687378864989*m.x3963 + 6.33492619950236*m.x3964 + 12.6485738281622*m.x3965
+ 9.21489699920594*m.x3966 + 34.8010408834597*m.x3967 + 4.38017061561681*m.x3968
+ 34.1258273807682*m.x3969 + 19.5722482143959*m.x3970 + 26.1504773503779*m.x3971
+ 37.6162339995941*m.x3972 + 41.1478189702338*m.x3973 + 8.73765051321337*m.x3974
+ | |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-22 14:18
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
import djgeojson.fields
from django.conf import settings
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import migrations, models
# Functions from the following migrations need manual copying.
# Move them and any dependencies into this file, then update the
# RunPython operations to refer to the local versions:
# mainapp.migrations.0012_site
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.CreateModel(
name='AgendaItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=20)),
('position', models.IntegerField()),
('public', models.NullBooleanField()),
],
options={
'ordering': ['position'],
},
),
migrations.CreateModel(
name='Body',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Committee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CommitteeMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField()),
('end', models.DateField()),
('role', models.CharField(max_length=200)),
('committee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Committee')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('storage_filename', models.CharField(max_length=256)),
('displayed_filename', models.CharField(max_length=256)),
('legal_date', models.DateField()),
('filesize', models.IntegerField()),
('parsed_text', models.TextField(blank=True, null=True)),
('license', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LegislativeTerm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('displayed_name', models.CharField(max_length=1000)),
('description', models.TextField(blank=True, null=True)),
('is_official', models.BooleanField()),
('osm_id', models.BigIntegerField(blank=True, null=True)),
('geometry', djgeojson.fields.GeometryField(default=None)),
('bodies', models.ManyToManyField(blank=True, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Meeting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=1000)),
('cancelled', models.BooleanField()),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('public', models.IntegerField(blank=True, choices=[(0, 'unknown'), (1, 'public'), (2, 'not public'), (3, 'splitted')], default=0)),
('auxiliary_files', models.ManyToManyField(blank=True, related_name='meeting_auxiliary_files', to='mainapp.File')),
('committees', models.ManyToManyField(blank=True, to='mainapp.Committee')),
('invitation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='meeting_invitation', to='mainapp.File')),
('locations', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MeetingSeries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=1000)),
('description', models.TextField(blank=True, null=True)),
('is_regular', models.BooleanField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('reference_number', models.CharField(max_length=50)),
('name', models.CharField(max_length=200)),
('short_name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('is_change_request_of', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Paper')),
('submitter_committee', models.ManyToManyField(to='mainapp.Committee')),
('submitter_department', models.ManyToManyField(to='mainapp.Department')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ParliamentaryGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=200)),
('start', models.DateField()),
('end', models.DateField()),
('short_name', models.CharField(max_length=20)),
('body', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Body')),
('legislative_terms', models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ParliamentaryGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField()),
('end', models.DateField()),
('role', models.CharField(max_length=200)),
('parliamentary_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.ParliamentaryGroup')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=100)),
('given_name', models.CharField(max_length=50)),
('family_name', models.CharField(max_length=50)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Location')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SearchPoi',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('displayed_name', models.CharField(max_length=1000)),
('osm_id', models.BigIntegerField(blank=True, null=True)),
('osm_amenity', models.CharField(max_length=1000, null=True)),
('geometry', djgeojson.fields.GeometryField(null=True)),
('exclude_from_search', models.BooleanField(default=False)),
('bodies', models.ManyToManyField(blank=True, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SearchStreet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('displayed_name', models.CharField(max_length=1000)),
('osm_id', models.BigIntegerField(blank=True, null=True)),
('exclude_from_search', models.BooleanField(default=False)),
('bodies', models.ManyToManyField(blank=True, to='mainapp.Body')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='parliamentarygroupmembership',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person'),
),
migrations.AddField(
model_name='paper',
name='submitter_parliamentary_groups',
field=models.ManyToManyField(blank=True, to='mainapp.ParliamentaryGroup'),
),
migrations.AddField(
model_name='paper',
name='submitter_persons',
field=models.ManyToManyField(blank=True, to='mainapp.Person'),
),
migrations.AddField(
model_name='meeting',
name='meeting_series',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.MeetingSeries'),
),
migrations.AddField(
model_name='meeting',
name='persons',
field=models.ManyToManyField(blank=True, to='mainapp.Person'),
),
migrations.AddField(
model_name='meeting',
name='results_protocol',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='meeting_results_protocol', to='mainapp.File'),
),
migrations.AddField(
model_name='meeting',
name='verbatim_protocol',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='meeting_verbatim_protocol', to='mainapp.File'),
),
migrations.AddField(
model_name='file',
name='locations',
field=models.ManyToManyField(blank=True, to='mainapp.Location'),
),
migrations.AddField(
model_name='file',
name='paper',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Paper'),
),
migrations.AddField(
model_name='department',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Location'),
),
migrations.AddField(
model_name='committeemembership',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person'),
),
migrations.AddField(
model_name='committee',
name='legislative_terms',
field=models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm'),
),
migrations.AddField(
model_name='committee',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Location'),
),
migrations.AddField(
model_name='body',
name='center',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='body_center', to='mainapp.Location'),
),
migrations.AddField(
model_name='body',
name='legislative_terms',
field=models.ManyToManyField(blank=True, to='mainapp.LegislativeTerm'),
),
migrations.AddField(
model_name='body',
name='outline',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='body_outline', to='mainapp.Location'),
),
migrations.AddField(
model_name='agendaitem',
name='meeting',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Meeting'),
),
migrations.AddField(
model_name='agendaitem',
name='paper',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mainapp.Paper'),
),
migrations.AddField(
model_name='agendaitem',
name='title',
field=models.CharField(default='', max_length=1000),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='agendaitem',
unique_together=set([('meeting', 'position')]),
),
migrations.RemoveField(
model_name='location',
name='displayed_name',
),
migrations.AddField(
model_name='legislativeterm',
name='short_name',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='location',
name='name',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='location',
name='short_name',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='meeting',
name='short_name',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name='meeting',
name='name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='parliamentarygroup',
name='short_name',
field=models.CharField(max_length=50),
),
migrations.CreateModel(
name='DepartmentMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oparl_id', models.CharField(blank=True, max_length=2000, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('start', models.DateField()),
('end', models.DateField()),
('role', models.CharField(max_length=200)),
('department', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Department')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Person')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='paper',
old_name='submitter_committee',
new_name='submitter_committees',
),
migrations.AlterField(
model_name='paper',
name='submitter_committees',
field=models.ManyToManyField(blank=True, to='mainapp.Committee'),
),
migrations.RenameField(
model_name='paper',
old_name='submitter_department',
new_name='submitter_departments',
),
migrations.AlterField(
model_name='paper',
name='submitter_departments',
field=models.ManyToManyField(blank=True, to='mainapp.Department'),
),
migrations.AlterModelOptions(
name='agendaitem',
options={'ordering': ['meeting', 'position']},
),
migrations.RenameField(
model_name='paper',
old_name='is_change_request_of',
new_name='change_request_of',
),
migrations.RenameField(
model_name='legislativeterm',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='legislativeterm',
old_name='start_date',
new_name='start',
),
migrations.AddField(
model_name='committee',
name='end',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='committee',
name='start',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.PositiveIntegerField(blank=True, null=True)),
('completion_level', models.PositiveSmallIntegerField(default=0, verbose_name='Profile completion percentage')),
('email_is_verified', models.BooleanField(default=False, verbose_name='Email is verified')),
('personal_info_is_completed', models.BooleanField(default=False, verbose_name='Personal info completed')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name_plural': 'User profiles',
'verbose_name': 'User profile',
},
),
migrations.AddField(
model_name='meetingseries',
name='short_name',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='file',
name='mime_type',
field=models.CharField(default='FIXME', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='file',
name='name',
field=models.CharField(default='FIXME', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='file',
name='displayed_filename',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='file',
name='storage_filename',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='committee',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroup',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='committee',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroup',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='meeting',
name='end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='file',
name='legal_date',
field=models.DateField(blank=True, null=True),
),
migrations.RemoveField(
model_name='meeting',
name='meeting_series',
),
migrations.AlterField(
model_name='body',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='committee',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='committeemembership',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='department',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='departmentmembership',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='file',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='legislativeterm',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='location',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='meeting',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='paper',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.RemoveField(
model_name='parliamentarygroup',
name='body',
),
migrations.AlterField(
model_name='parliamentarygroup',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='parliamentarygroupmembership',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='person',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='searchpoi',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AlterField(
model_name='searchstreet',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.DeleteModel(
name='MeetingSeries',
),
migrations.AlterField(
model_name='departmentmembership',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='departmentmembership',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroupmembership',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='committeemembership',
name='end',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='paper',
name='legal_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='paper',
name='main_file',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='paper_main_file', to='mainapp.File'),
),
migrations.AlterField(
model_name='paper',
name='name',
field=models.CharField(max_length=300),
),
migrations.RemoveField(
model_name='file',
name='paper',
),
migrations.AddField(
model_name='agendaitem',
name='oparl_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='paper',
name='files',
field=models.ManyToManyField(blank=True, to='mainapp.File'),
),
migrations.AlterField(
model_name='committeemembership',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='parliamentarygroupmembership',
name='start',
field=models.DateField(blank=True, null=True),
),
migrations.CreateModel(
name='UserAlert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('last_match', models.DateTimeField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('search_string', models.TextField(default='')),
],
),
migrations.RemoveField(
model_name='userprofile',
name='completion_level',
),
migrations.RemoveField(
model_name='userprofile',
name='personal_info_is_completed',
),
migrations.RemoveField(
model_name='userprofile',
name='phone',
),
migrations.AlterField(
model_name='agendaitem',
name='title',
field=models.CharField(max_length=2000),
),
migrations.AlterField(
model_name='agendaitem',
name='title',
field=models.TextField(),
| |
row['ctl_rules'].split(';')
else:
rs = [row['ctl_rules']]
for r in rs:
operator = False
if ('>=' in r):
operator = '>='
action = 'greater_than_or_equal_to'
elif ('<=' in r):
operator = '<='
action = 'less_than_or_equal_to'
elif ('==' in r):
operator = '=='
action = 'equal_to'
elif ('=' in r):
operator = '='
action = 'autocalculate'
elif ('!!!' in r):
operator = '!!!'
action = 'exclusive_pair'
else:
log('Syntax error: Cannot compile rule ' + urllib.parse.unquote(r) + ' as it does not have an operator (=, <=, >=, !!!)', 'warn')
if operator:
# Save the rules to process later in the script
a = r.split(operator)
left = a[0]
right = a[1].strip(' ')
if (re.search('[^A-Za-z0-9\_\-\+\%\s\.\,\:\"\/\(\)]', left)):
log('Syntax error: Rule ' + urllib.parse.unquote(r) + ' cannot be compiled as it either uses an illegal operator (=, <=, >= or !!! allowed) or the left expression has illegal characters (letters, numbers, spaces, parens, and certain symbols (".,_-:/+%) allowed)', 'warn')
elif (re.search('[^A-Za-z0-9\_\-\+\%\s\.\,\:\"\/\(\)]', right)):
log('Syntax error: Rule ' + urllib.parse.unquote(r) + ' cannot be compiled as it either uses an illegal operator (=, <=, >= or !!! allowed) or the right expression has illegal characters (letters, numbers, spaces, parens, and certain symbols (".,_-:/+%) allowed)', 'warn')
else:
rules.append([left, action, right, htab['uidsuffix'], uids, ssids, row['sub_priority'], 'ctl_rules row ' + urllib.parse.unquote(r), form['periodType']])
if row['dhis_ind'] and action == 'autocalculate' and left == 'R' and htab['uidsuffix'] != 'xta':
rules.append([left, 'indicator', right, htab['uidsuffix'], uids, ssids, row['sub_priority'], 'indicator for ctl_rules row ' + urllib.parse.unquote(r), row['dhis_ind']])
for x in range(1, 3):
j = 'degs' + str(x)
if row[j]:
d = row[j]
for uid in uids:
if d not in degs:
degs[d] = []
degs[d].append(uid)
subIndicatorsCount += 1
if(subIndicatorsCount > 0):
if(len(htabs) == 1):
outputHTML += indicatorHTML_before.format(name=indicator['name'], frequency=indicator['frequency'], title=htab['type'] + ': ' + indicator['name'])
else:
outputHTML += indicatorHTML_before.format(name=htab['label'] + ': ' + indicator['name'], frequency=indicator['frequency'], title=htab['type'] + ': ' + indicator['name'])
outputHTML += subIndicatorsHTML
outputHTML += indicatorHTML_after.format(title=htab['type'] + ' ' + indicator['name'])
outputHTML += entryAreaHTML_end
outputHTML += minorNavHTML_end
#skipping targets for now
#form['name'].count('Targets') == 0
if not(nofavorites) and form['name'].count('Narratives') == 0 and (not(specificForms) or form['uid'] in formsToOutput):
favoriteType = ''
if form['name'].count('Targets') > 0:
favoriteType = 'Targets'
elif form['name'].count('Results') > 0:
favoriteType = 'Results'
for i in range(len(form['vtabs'])):
vtab = form['vtabs'][i]
for k in range(len(vtab['indicators'])):
indicator = vtab['indicators'][k]
for row in indicator['rows']:
#check to see if the row is anything by AutoCalc
if row['sub_priority'] == 'Required' or row['sub_priority'] == 'Conditional' or row['sub_priority'] == 'Optional':
#Check to see if we should make a favorite for this indicator
#Check to see if this is actually an autocalc row that is mislabled
#print(indicator['name']+" - "+indicator['frequency'])
favoriteFirstDeShortName=getDataElement(list(findDataElementsFromRow(row))[0])['shortName']
#favoriteName="PEPFAR "+ISOQuarterToFYOctQuarter(favoritesISOQuarter)+" "+favoriteType+" "+indicator['name']+" "+getNumeratorDenominator(favoriteFirstDeShortName)+" "+getDisagg(favoriteFirstDeShortName)+" Completeness Review Precursor"
curISOQuarter="FY"+curYear()+"Q"+curQuarter()
favoriteName="PEPFAR "+favoritesISOQuarter+" "+favoriteType+" "+indicator['name']+" "+getNumeratorDenominator(favoriteFirstDeShortName)+" "+getDisagg(favoriteFirstDeShortName)+" Completeness Review Precursor"
favoriteDisplayName=favoriteName
favoriteDescription="This is an auto generated favorite made by MERTIDE, this is not intended to be deployed in its current form, but rather a precursor for PPM staff to create the completeness review pivot."
favoriteId=makeUidHash(favoriteName)
#log(favoriteName)
#no else statement, previous if that checks for a valid frequency would kick out sooner
favoriteISOPeriod=favoritesISOQuarter
favoritePeriodsPreCursor='{"periods": [{"id": ""}]}'
favoritePeriods=json.loads(favoritePeriodsPreCursor)
if favoriteType == 'Targets':
#favoriteISOPeriod=ISOQuarterToISOFYOctTARGET(favoritesISOQuarter)
#HARDCODE IS BAD
favoritePeriods['periods'][0]['id']='2019Oct'
elif indicator['frequency'] == 'Annually':
favoriteISOPeriod=ISOQuarterToISOFYOct(favoritesISOQuarter)
favoritePeriods['periods'][0]['id']=favoriteISOPeriod
elif indicator['frequency'] == 'Semiannually':
favoriteISOPeriod=ISOQuarterToISOSAApr(favoritesISOQuarter)
favoritePeriods['periods'][0]['id']=favoriteISOPeriod
elif indicator['frequency'] == 'Quarterly':
favoriteISOPeriod=favoritesISOQuarter
favoritePeriods['periods'][0]['id']=favoriteISOPeriod
favoriteDataDimensionsItems = {"dataDimensionItems": []}
for de in findDataElementsFromRow(row):
favoriteDataDimensionItemTypeFull = {"dataDimensionItemType": "DATA_ELEMENT","dataElement": {"id": ""}}
favoriteDataDimensionItemTypeFull["dataElement"]["id"] = str(de)
favoriteDataDimensionsItems["dataDimensionItems"].append(favoriteDataDimensionItemTypeFull)
favoriteNew=favoriteStub.copy()
favoriteNew['id'] = favoriteId
favoriteNew['name'] = favoriteName
favoriteNew['displayName'] = favoriteDisplayName
favoriteNew['description'] = favoriteDescription
favoriteNew['dataDimensionItems'] = favoriteDataDimensionsItems['dataDimensionItems']
favoriteNew['periods'] = favoritePeriods['periods']
if favoriteId not in favoritesCreated:
favoritesCreated.append(favoriteId)
if indicator['frequency'] == 'Annually':
favoriteAnnuallyJSON['reportTables'].append(favoriteNew)
if indicator['frequency'] == 'Semiannually':
favoriteSemiannuallyJSON['reportTables'].append(favoriteNew)
if indicator['frequency'] == 'Quarterly':
favoriteQuarterlyJSON['reportTables'].append(favoriteNew)
if not(noconnection):
for rule in rules:
# Get validation rule period
rulePeriod = rule[8]
[left, leftjs, leftnames, ignore] = processMertideExpression(rule[0], rule, False, 'left', uidCache, skipCache, dataElementCache)
[right, rightjs, rightnames, rightMissingValue] = processMertideExpression(rule[2], rule, False, 'right', uidCache, skipCache, dataElementCache)
if right or rightjs:
if rule[1] == 'autocalculate':
dynamicjs += " stella.autocalc(" + str(rightjs) + ", " + str(leftjs) + ");\n"
elif rule[1] == 'indicator':
if rule[3] == 'dsd':
temprule = rule.copy()
temprule[3] = 'xta'
[tempright, ignore1, temprightnames, ignore2] = processMertideExpression(rule[2], temprule, False, 'right', uidCache, skipCache, dataElementCache)
right.extend(tempright)
rightnames.extend(temprightnames)
n = []
for x in right:
if x['optionCombo']:
n.append('#{' + x['id'] + '.' + x['optionCombo'] + '}')
else:
n.append('#{' + x['id'] + '}')
[uid, name] = rule[8].split(';')
exportIndicators.append([name, uid, n, ' + '.join(rightnames)])
else:
if left != [{}] and right != [{}]:
j = {}
j['importance'] = 'MEDIUM'
j['ruleType'] = 'VALIDATION'
j['periodType'] = rulePeriod
j['operator'] = rule[1]
j['leftSide'] = {}
j['rightSide'] = {}
j['leftSide']['dataElements'] = set([])
j['rightSide']['dataElements'] = set([])
for l in left:
j = addExpression(j, 'leftSide', l)
if j['operator'] == 'less_than_or_equal_to' or j['operator'] == 'greater_than_or_equal_to' or j['operator'] == 'equal_to':
if j['operator'] == 'less_than_or_equal_to':
j['name'] = ' <= '
elif j['operator'] == 'greater_than_or_equal_to':
j['name'] = ' >= '
else:
j['name'] = ' == '
if rule[6] in skip:
j['leftSide']['missingValueStrategy'] = 'SKIP_IF_ALL_VALUES_MISSING'
else:
j['leftSide']['missingValueStrategy'] = 'NEVER_SKIP'
if rule[6] not in neverskip:
log('Syntax error: ' + rule[6] + ' not associated with missing value strategy for rule ' + rule[7], 'warn')
j['rightSide']['missingValueStrategy'] = 'NEVER_SKIP'
if rightMissingValue:
j['rightSide']['missingValueStrategy'] = rightMissingValue
else:
log('Error: Unable to identify missing value strategy for right side of rule ' + rule[7] + '; defaulting to NEVER_SKIP', 'warn')
j['rightSide']['missingValueStrategy'] = 'NEVER_SKIP'
elif j['operator'] == 'exclusive_pair':
j['name'] = ' :OR: '
j['leftSide']['missingValueStrategy'] = 'SKIP_IF_ALL_VALUES_MISSING'
j['rightSide']['missingValueStrategy'] = 'SKIP_IF_ALL_VALUES_MISSING'
for r in right:
j = addExpression(j, 'rightSide', r)
j['name'] = ' + '.join(leftnames) + j['name'] + ' + '.join(rightnames)
j['description'] = j['name']
j['instruction'] = j['name']
j['leftSide']['dataElements'] = reformatDataElements(j['leftSide']['dataElements'])
j['rightSide']['dataElements'] = reformatDataElements(j['rightSide']['dataElements'])
h = hashRule(j)
if h:
if h in rulesCache:
j['id'] = rulesCache[h]
else:
if j['operator'] == 'exclusive_pair':
k = copy.deepcopy(j)
k['leftSide']['expression'] = j['rightSide']['expression']
k['rightSide']['expression'] = j['leftSide']['expression']
h = hashRule(k)
if h in rulesCache:
j['id'] = rulesCache[h]
else:
j['id'] = makeUid()
else:
j['id'] = makeUid()
# Shorten the name if it's over 230 chars
j['name'] = j['name'][0:230]
# Shorten the descriptions if they are over 255 chars
j['leftSide']['description'] = j['leftSide']['description'][0:255]
j['rightSide']['description'] = j['rightSide']['description'][0:255]
rulesCache[h] = 'used' + form['uid']
# Only add each rule once to DHIS2
if not(j['id'].startswith('used')):
if h in dhisRulesCache:
modified = False
for key in dhisRulesCache[h]:
if key == 'leftSide' or key == 'rightSide':
for key2 in dhisRulesCache[h][key]:
if (dhisRulesCache[h][key][key2] != j[key][key2] and
(key2 != 'expression' or hashExpression(dhisRulesCache[h][key][key2]) != hashExpression(j[key][key2]))):
modified = True
break
else:
if dhisRulesCache[h][key] != j[key]:
modified = True
if modified:
break
if modified:
modifiedRules.append(j)
else:
oldRules.append(j)
else:
newRules.append(j)
validationRules.append(j)
if j['operator'] == 'exclusive_pair':
dynamicjs += " meany.autoexclude(" + str(leftjs) + ", " + str(rightjs) + ");\n"
else:
if left == [{}]:
log('Syntax error: Left expression appears empty after processing in ' + rule[7], 'warn')
if right == [{}]:
log('Syntax error: Right expression appears empty after processing in ' + rule[7], 'warn')
for i in degs:
try:
req = requests.get(api + 'dataElementGroups.json', cookies=jsessionid,
params = {'paging': False, 'fields': 'name,id', 'filter': 'name:eq:' + i})
groups = form['dataElementGroups'].copy()
groups.append(req.json()['dataElementGroups'][0]['id'] + '_' + i)
for uid in degs[i]:
addDataElement(form, uid, groups, indicator['frequency'])
except Exception as e:
pass
#log('Syntax error: Problem with data element group set ' + i, 'warn')
else:
log('Not connected to DHIS2, so skipping all rules and data element group sets', 'warn')
# Set special JS extras
outputHTML = outputHTML.replace("//#dataValuesLoaded#", '\n' + dynamicjs) #cannot use format here because all the curly braces {} in the javascript and css
#outputHTML = outputHTML.replace("//#formReady#","")
#outputHTML = outputHTML.replace("//#dataValueSaved#","")
outputHTML += open(setuptabsHTML).read()
outputHTML += majorNavHTML_end + '<!-- End Custom DHIS2 Form -->\n\n'
# Create the standalone form preview file
if severe:
log('Skipping form due to severe error: ' + form['name'] + ' - ' + form['uid'])
return
elif specificForms and form['uid'] not in formsToOutput:
log('Skipping form: ' + form['name'] + ' - ' + form['uid'])
else:
log('Creating form: ' + form['name'] + ' - ' + form['periodType'] + ' - ' + form['uid'])
formFile = open(outDir+formFileName+'.html', 'w')
#Creats an offline version of the form for offline specific requests.
offlineOutputHTML = open(standaloneHTMLa).read().replace('MER Results: Facility Based', form['name'])
insertArray = ""
insertArray2 = ""
for key, value in formDataElementList.items():
if value['form'] == form['name']:
insertArray += "dataElementList['"+key+"'] = '"+value['name']+"';\n"
for cocKey, cocValue in masterCategoryOptionComboList.items():
if cocValue['categoryComboID'] == value['categoryCombo']:
insertArray2 += "catOptionCombo['"+cocKey+"'] = '"+cocValue['name']+"';\n"
offlineOutputHTML += outputHTML
insertArrayCombined = insertArray+insertArray2
offlineOutputHTML = offlineOutputHTML.replace('//dataElementListHere', insertArrayCombined)
if form['categoryCombo'] == 'bjDvmb4bfuf':
offlineOutputHTML = re.sub(r'<!--attributeComboStart(.*)attributeComboEnd-->','',offlineOutputHTML, flags=re.S)
formFile.write(offlineOutputHTML)
formFile.write(open(standaloneHTMLb).read())
formFile.close()
# Format the dataset for the ouput XML files
datasetPrefix = open('codechunks/dataset_prefix.xml').read() \
.format(code=codeName(form['shortshortname']), name=form['name'], shortname=form['shortshortname'], uid=form['uid'], periodType=form['periodType'],
categoryCombo=form['categoryCombo'], version=form['version'], approveData=form['approveData'], userGroupAccesses=form['userGroupAccesses'] )
# 2.21 to 2.24
# dataElements = ' <dataElements>\n'
# for id in form['formDataElements']:
# dataElements += ' <dataElement id="' + id + '" />\n'
# dataElements += ' </dataElements>\n'
#2.25 updates
dataElements = ' <dataSetElements>\n'
for id in form['formDataElements']:
dataElements += ' <dataSetElement>\n'
# dataElements += ' <externalAccess>false</externalAccess>\n'
dataElements += ' <dataElement id="' + id + '" />\n'
dataElements += ' <dataSet id="' + form['uid'] + '" />\n'
if id in catComboCache:
dataElements += ' <categoryCombo id="' + catComboCache[id] + '" />\n'
dataElements += ' </dataSetElement>\n'
dataElements += ' </dataSetElements>\n'
# .xml export file
if not(specificForms) or (form['uid'] in formsToOutput):
exportDataEntryForms.append(
' <dataEntryForm id="' + form['formUid'] + '">\n' +
' <name>' +form['name'] + '</name>\n' +
' <externalAccess>false</externalAccess>\n' +
' <style>NORMAL</style>\n' +
' <htmlCode>\n' + escape(outputHTML) + '\n' +
' </htmlCode>\n' +
' <format>2</format>\n' +
' </dataEntryForm>\n')
# Offline forms
exportStaticHTML.append(outputHTML)
thisDatasetPrefix = datasetPrefix
if form['workflow']:
thisDatasetPrefix += ' <workflow id="' + form['workflow'] + '" />\n'
exportDatasets.append(thisDatasetPrefix +
' <dataEntryForm id="' + form['formUid'] + '" />\n' +
dataElements +
' </dataSet>\n')
# Remove white space from all keys in a row
def stripWhiteSpace(row):
for key in row:
if isinstance(row[key], str):
row[key] = row[key].strip()
# Process a control .CSV file.
# The lines of the .CSV file are assembled into a structure of dictionaries
# and lists for each form as follows:
#
# form: name, uid, vtabs
# vtab: name, indicators
# indicator: name, frequency, rows (SUB / AUTO / DESC)
def doControlFile(controlFileName):
with | |
<filename>blackswan/visualisation.py
# encoding: utf8
import os
from os.path import join
from tempfile import mkstemp
from netCDF4 import Dataset
from datetime import datetime, date
import numpy as np
import logging
from matplotlib import use
use('Agg') # use this if no xserver is available
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
from cartopy import config as cartopy_config
from cartopy.util import add_cyclic_point
import cartopy.crs as ccrs
from blackswan import utils
LOGGER = logging.getLogger("PYWPS")
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def fig2plot(fig, file_extension='png', output_dir='.', bbox_inches='tight', dpi=300, facecolor='w', edgecolor='k', figsize=(20, 10)):
'''saving a matplotlib figure to a graphic
:param fig: matplotlib figure object
:param output_dir: directory of output plot
:param file_extension: file file_extension (default='png')
:return str: path to graphic
'''
_, graphic = mkstemp(dir=output_dir, suffix='.%s' % file_extension)
fig.savefig(graphic, bbox_inches=bbox_inches, dpi=dpi, facecolor=facecolor, edgecolor=edgecolor, figsize=figsize)
return graphic
def plot_extend(resource, file_extension='png'):
"""
plots the extend (domain) of the values stored in a netCDF file:
:parm resource: path to netCDF file
:param file_extension: file format of the graphic. if file_extension=None a matplotlib figure will be returned
:return graphic: graphic in specified format
"""
import matplotlib.patches as mpatches
lats, lons = utils.get_coordinates(resource, unrotate=True)
# box_top = 45
# x, y = [-20, -20, 45, 45, -44], [-45, box_top, box_top, -45, -45]
xy = np.array([[np.min(lons), np.min(lats)],
[np.max(lons), np.min(lats)],
[np.max(lons), np.max(lats)],
[np.min(lons), np.max(lats)]])
fig = plt.figure(figsize=(20, 10), dpi=600, facecolor='w', edgecolor='k')
projection = ccrs.Robinson()
# ccrs.Orthographic(central_longitude=np.mean(xy[:, 0]),
# central_latitude=np.mean(xy[:, 1]),
# globe=None) # Robinson()
ax = plt.axes(projection=projection)
ax.stock_img()
ax.coastlines()
ax.add_patch(mpatches.Polygon(xy, closed=True, transform=ccrs.PlateCarree(), color='coral', alpha=0.6))
# ccrs.Geodetic()
ax.gridlines()
plt.show()
if file_extension is None:
map_graphic = fig
else:
map_graphic = fig2plot(fig=fig, file_extension=file_extension)
plt.close()
return map_graphic
def plot_polygons(regions, file_extension='png'):
"""
extract the polygon coordinate and plot it on a worldmap
:param regions: list of ISO abreviations for polygons
:return png: map_graphic.png
"""
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
from numpy import mean, append
from blackswan import config
DIR_SHP = config.shapefiles_path()
if type(regions) == str:
regions = list([regions])
fname = join(DIR_SHP, "countries.shp")
geos = Reader(fname).geometries()
records = Reader(fname).records()
central_latitude = []
central_longitude = []
for r in records:
geo = geos.next()
if r.attributes['ISO_A3'] in regions:
x, y = geo.centroid.coords.xy
central_longitude.append(x[0])
central_latitude.append(y[0])
fig = plt.figure(figsize=(20, 10))
projection = ccrs.Orthographic(central_longitude=mean(central_longitude),
central_latitude=mean(central_latitude),
globe=None) # Robinson()
ax = plt.axes(projection=projection)
geos = Reader(fname).geometries()
records = Reader(fname).records()
for r in records:
geo = geos.next()
if r.attributes['ISO_A3'] in regions:
shape_feature = ShapelyFeature(geo, ccrs.PlateCarree(), edgecolor='black', color='coral')
ax.add_feature(shape_feature)
ax.coastlines()
ax.gridlines()
ax.stock_img()
# ax.set_global()
map_graphic = fig2plot(fig=fig, file_extension=file_extension)
plt.close()
return map_graphic
def factsheetbrewer(png_region=None, png_spaghetti=None, png_uncertainty=None, png_robustness=None):
"""
Put graphics into the climate fact sheet template to generate the final climate fact sheet
:param png_region: World map graphic with countries polygons.
:param png_uncertainty: Graphic showing a timeseries with fieldmean values and corresponding uncertainty
:param png_spaghetti: Graphic showing each datatset as a single timeseries
:param png_robustness: Map of the signal change including hashes and dots for robutsness values
:return pdf foumular: pdf with fillable text boxes for interpretation text
"""
from PyPDF2 import PdfFileWriter, PdfFileReader
from reportlab.pdfgen import canvas
from blackswan.config import data_path
try:
try:
_, pdf_region = mkstemp(dir='.', suffix='.pdf')
c = canvas.Canvas(pdf_region)
c.drawImage(png_region, 340, 490, width=130, height=130) # , mask=None, preserveAspectRatio=False)
c.save()
pfr_region = PdfFileReader(open(pdf_region, 'rb'))
except:
LOGGER.exception('failed to convert png to pdf')
try:
_, pdf_uncertainty = mkstemp(dir='.', suffix='.pdf')
c = canvas.Canvas(pdf_uncertainty)
c.drawImage(png_uncertainty, 20, 350, width=250, height=130) # , mask=None, preserveAspectRatio=False)
c.save()
pfr_uncertainty = PdfFileReader(open(pdf_uncertainty, 'rb'))
except:
LOGGER.exception('failed to convert png to pdf')
try:
_, pdf_spaghetti = mkstemp(dir='.', suffix='.pdf')
c = canvas.Canvas(pdf_spaghetti)
c.drawImage(png_spaghetti, 280, 350, width=250, height=130) # , mask=None, preserveAspectRatio=False)
c.save()
pfr_spagetthi = PdfFileReader(open(pdf_spaghetti, 'rb'))
except:
LOGGER.exception('failed to convert png to pdf')
try:
_, pdf_robustness = mkstemp(dir='.', suffix='.pdf')
c = canvas.Canvas(pdf_robustness)
c.drawImage(png_robustness, 30, 100, width=200, height=170) # , mask=None, preserveAspectRatio=False)
c.save()
pfr_robustness = PdfFileReader(open(pdf_robustness, 'rb'))
except:
LOGGER.exception('failed to convert png to pdf')
output_file = PdfFileWriter()
pfr_template = PdfFileReader(file(data_path() + '/pdf/climatefactsheettemplate.pdf', 'rb'))
LOGGER.debug('template: %s' % pfr_template)
page_count = pfr_template.getNumPages()
for page_number in range(page_count):
LOGGER.debug("Plotting png to {} of {}".format(page_number, page_count))
input_page = pfr_template.getPage(page_number)
try:
input_page.mergePage(pfr_region.getPage(0))
except:
LOGGER.warn('failed to merge courtry map')
try:
input_page.mergePage(pfr_uncertainty.getPage(0))
except:
LOGGER.warn('failed to merge uncertainty plot')
try:
input_page.mergePage(pfr_spagetthi.getPage(0))
except:
LOGGER.warn('failed to merge spaghetti plot')
try:
input_page.mergePage(pfr_robustness.getPage(0))
except:
LOGGER.warn('failed to merge robustness plot')
try:
output_file.addPage(input_page)
except:
LOGGER.warn('failed to add page to output pdf')
try:
_, climatefactsheet = mkstemp(dir='.', suffix='.pdf')
with open(climatefactsheet, 'wb') as outputStream:
output_file.write(outputStream)
LOGGER.info('sucessfully brewed the demanded factsheet')
except:
LOGGER.exception('failed write filled template to pdf. empty template will be set as output')
climatefactsheet = data_path() + '/pdf/climatefactsheettemplate.pdf'
except:
LOGGER.exception("failed to brew the factsheet, empty template will be set as output")
return climatefactsheet
def spaghetti(resouces, variable=None, title=None, file_extension='png'):
"""
creates a png file containing the appropriate spaghetti plot as a field mean of the values.
:param resouces: list of files containing the same variable
:param variable: variable to be visualised. If None (default), variable will be detected
:param title: string to be used as title
:retruns str: path to png file
"""
from blackswan.calculation import fieldmean
try:
fig = plt.figure(figsize=(20, 10), dpi=600, facecolor='w', edgecolor='k')
LOGGER.debug('Start visualisation spaghetti plot')
# === prepare invironment
if type(resouces) != list:
resouces = [resouces]
if variable is None:
variable = utils.get_variable(resouces[0])
if title is None:
title = "Field mean of %s " % variable
LOGGER.info('plot values preparation done')
except:
msg = "plot values preparation failed"
LOGGER.exception(msg)
raise Exception(msg)
try:
for c, nc in enumerate(resouces):
# get timestapms
try:
dt = utils.get_time(nc) # [datetime.strptime(elem, '%Y-%m-%d') for elem in strDate[0]]
ts = fieldmean(nc)
plt.plot(dt, ts)
# fig.line( dt,ts )
except:
msg = "spaghetti plot failed for %s " % nc
LOGGER.exception(msg)
plt.title(title, fontsize=20)
plt.grid()
output_png = fig2plot(fig=fig, file_extension=file_extension)
plt.close()
LOGGER.info('timeseries spaghetti plot done for %s with %s lines.' % (variable, c))
except:
msg = 'matplotlib spaghetti plot failed'
LOGGER.exception(msg)
return output_png
def uncertainty(resouces, variable=None, ylim=None, title=None, file_extension='png', window=None):
"""
creates a png file containing the appropriate uncertainty plot.
:param resouces: list of files containing the same variable
:param variable: variable to be visualised. If None (default), variable will be detected
:param title: string to be used as title
:param window: windowsize of the rolling mean
:returns str: path/to/file.png
"""
LOGGER.debug('Start visualisation uncertainty plot')
import pandas as pd
import numpy as np
from os.path import basename
from blackswan.utils import get_time, sort_by_filename
from blackswan.calculation import fieldmean
from blackswan.metadata import get_frequency
# === prepare invironment
if type(resouces) == str:
resouces = list([resouces])
if variable is None:
variable = utils.get_variable(resouces[0])
if title is None:
title = "Field mean of %s " % variable
try:
fig = plt.figure(figsize=(20, 10), facecolor='w', edgecolor='k') # dpi=600,
# variable = utils.get_variable(resouces[0])
df = pd.DataFrame()
LOGGER.info('variable %s found in resources.' % variable)
datasets = sort_by_filename(resouces, historical_concatination=True)
for key in datasets.keys():
try:
data = fieldmean(datasets[key]) # get_values(f)
ts = get_time(datasets[key])
ds = pd.Series(data=data, index=ts, name=key)
# ds_yr = ds.resample('12M', ).mean() # yearly mean loffset='6M'
df[key] = ds
except Exception:
LOGGER.exception('failed to calculate timeseries for %s ' % (key))
frq = get_frequency(resouces[0])
print frq
if window is None:
if frq == 'day':
window = 10951
elif frq == 'man':
window = 359
elif frq == 'sem':
window = 119
elif frq == 'yr':
window = 30
else:
LOGGER.debug('frequency %s is not included' % frq)
window = 30
if len(df.index.values) >= window * 2:
# TODO: calculate windowsize according to timestapms (day,mon,yr ... with get_frequency)
df_smooth = df.rolling(window=window, center=True).mean()
LOGGER.info('rolling mean calculated for all input data')
else:
df_smooth = df
LOGGER.debug('timeseries too short for moving mean')
fig.text(0.95, 0.05, '!!! timeseries too short for moving mean over 30years !!!',
fontsize=20, color='red',
ha='right', va='bottom', alpha=0.5)
try:
rmean = df_smooth.quantile([0.5], axis=1,) # df_smooth.median(axis=1)
# skipna=False quantile([0.5], axis=1, numeric_only=False )
q05 = df_smooth.quantile([0.10], axis=1,) # numeric_only=False)
q33 = df_smooth.quantile([0.33], axis=1,) # numeric_only=False)
q66 = df_smooth.quantile([0.66], axis=1, ) # numeric_only=False)
q95 = df_smooth.quantile([0.90], axis=1, ) # numeric_only=False)
LOGGER.info('quantile calculated for all input data')
except Exception:
LOGGER.exception('failed to calculate quantiles')
try:
plt.fill_between(df_smooth.index.values, np.squeeze(q05.values), np.squeeze(q95.values),
alpha=0.5, color='grey')
plt.fill_between(df_smooth.index.values, np.squeeze(q33.values), np.squeeze(q66.values),
alpha=0.5, color='grey')
plt.plot(df_smooth.index.values, np.squeeze(rmean.values), c='r', | |
###########################################
# VCZ calibration (coarse landscape) FLUX dance 1
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X3.cfg_awg_channel_amplitude(0.28500000000000003)
flux_lm_X3.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_D8.vcz_amp_dac_at_11_02_SW(0)
flux_lm_D6.cfg_awg_channel_amplitude(0.19302332066356387)
flux_lm_D6.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_X2.vcz_amp_dac_at_11_02_NE(0)
flux_lm_X1.cfg_awg_channel_amplitude(0.25166666666666665)
flux_lm_X1.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_D2.vcz_amp_dac_at_11_02_SW(0)
# Set park parameters
flux_lm_D7.cfg_awg_channel_amplitude(.21)
flux_lm_Z4.cfg_awg_channel_amplitude(.19)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D1.cfg_awg_channel_amplitude(.235)
flux_lm_D7.park_amp(.5)
flux_lm_Z4.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D1.park_amp(.5)
flux_lm_D7.park_double_sided(True)
flux_lm_Z4.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D1.park_double_sided(True)
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=['D7', 'Z4', 'Z1', 'D1'])
device.prepare_for_timedomain(qubits=['X3', 'D8', 'D6', 'X2', 'X1', 'D2'])
pairs = [['X3', 'D8'], ['D6', 'X2'], ['X1', 'D2']]
parked_qubits = ['D7', 'Z1', 'Z4', 'D1']
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-1',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [ swf.FLsweep(flux_lm_X3, flux_lm_X3.vcz_amp_sq_NE, 'cz_NE'),
swf.FLsweep(flux_lm_D6, flux_lm_D6.vcz_amp_sq_SW, 'cz_SW'),
swf.FLsweep(flux_lm_X1, flux_lm_X1.vcz_amp_sq_NE, 'cz_NE') ]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X3, flux_lm_D8,
flux_lm_D6, flux_lm_X2,
flux_lm_X1, flux_lm_D2],
which_gate= ['NE', 'SW',
'SW', 'NE',
'NE', 'SW'],
fl_lm_park = [flux_lm_Z1, flux_lm_D7, flux_lm_Z4, flux_lm_D1],
speed_limit = [2.9583333333333334e-08, 2.75e-08, 2.75e-08])
# swf2.set_parameter(5)
# plt.plot(flux_lm_D5._wave_dict['cz_SE'], label='D5')
# plt.plot(flux_lm_X3._wave_dict['cz_NW'], label='X3')
# plt.plot(flux_lm_X2._wave_dict['cz_NW'], label='X2')
# plt.plot(flux_lm_D7._wave_dict['cz_SE'], label='D7')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z1')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z4')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='D8')
# plt.axhline(.5, color='k', ls='--', alpha=.25)
# plt.legend()
# plt.show()
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 21))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 2
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X3.cfg_awg_channel_amplitude(0.3242724012703858)
flux_lm_X3.vcz_amp_dac_at_11_02_NW(.5)
flux_lm_D7.vcz_amp_dac_at_11_02_SE(0)
flux_lm_D5.cfg_awg_channel_amplitude(0.16687470158591108)
flux_lm_D5.vcz_amp_dac_at_11_02_SE(.5)
flux_lm_X2.vcz_amp_dac_at_11_02_NW(0)
flux_lm_X1.cfg_awg_channel_amplitude(0.27975182997855896)
flux_lm_X1.vcz_amp_dac_at_11_02_NW(.5)
flux_lm_D1.vcz_amp_dac_at_11_02_SE(0)
# Set park parameters
flux_lm_D8.cfg_awg_channel_amplitude(.22)
flux_lm_Z4.cfg_awg_channel_amplitude(.19)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D2.cfg_awg_channel_amplitude(.225)
flux_lm_D8.park_amp(.5)
flux_lm_Z4.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D2.park_amp(.5)
flux_lm_D8.park_double_sided(True)
flux_lm_Z4.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D2.park_double_sided(True)
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=['D8', 'Z4', 'Z1', 'D2'])
device.prepare_for_timedomain(qubits=['X3', 'D7', 'D5', 'X2', 'X1', 'D1'])
pairs = [['X3', 'D7'], ['D5', 'X2'], ['X1', 'D1']]
parked_qubits = ['D8', 'Z1', 'Z4', 'D2']
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-2',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [ swf.FLsweep(flux_lm_X3, flux_lm_X3.vcz_amp_sq_NW, 'cz_NW'),
swf.FLsweep(flux_lm_D5, flux_lm_D5.vcz_amp_sq_SE, 'cz_SE'),
swf.FLsweep(flux_lm_X1, flux_lm_X1.vcz_amp_sq_NW, 'cz_NW') ]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1.2/3, 1, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [flux_lm_X3, flux_lm_D7,
flux_lm_D5, flux_lm_X2,
flux_lm_X1, flux_lm_D1],
which_gate= ['NW', 'SE',
'SE', 'NW',
'NW', 'SE'],
fl_lm_park = [flux_lm_Z1, flux_lm_D8, flux_lm_Z4, flux_lm_D2],
speed_limit = [2.9583333333333334e-08, 2.4166666666666668e-08, 2.5416666666666666e-08])
# swf2.set_parameter(5)
# plt.plot(flux_lm_X4._wave_dict['cz_SE'], label='X4')
# plt.plot(flux_lm_D9._wave_dict['cz_NW'], label='D9')
# plt.plot(flux_lm_D5._wave_dict['cz_NW'], label='D5')
# plt.plot(flux_lm_X3._wave_dict['cz_SE'], label='X3')
# plt.plot(flux_lm_X2._wave_dict['cz_NW'], label='X2')
# plt.plot(flux_lm_D3._wave_dict['cz_SE'], label='D3')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z1')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='Z4')
# plt.plot(flux_lm_Z1._wave_dict['park'], label='D8')
# plt.axhline(.5, color='k', ls='--', alpha=.25)
# plt.legend()
# plt.show()
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 21))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
coha = ma2.Conditional_Oscillation_Heatmap_Analysis(
label="223142_VCZ_2D_[['X3', 'D7'], ['D5', 'X2'], ['X1', 'D1']]_fine_sweep",
for_multi_CZ = True,
pair = {'pair_name':['X3','D7'],'sweep_ratio':[1.2/3,1],'pair_num':0},
close_figs=True,
extract_only=False,
plt_orig_pnts=True,
plt_contour_L1=False,
plt_contour_phase=True,
plt_optimal_values=True,
plt_optimal_values_max=1,
find_local_optimals=True,
plt_clusters=False,
cluster_from_interp=False,
clims={
"Cost func": [0., 300],
"missing fraction": [0, 30],
"offset difference": [0, 30]
},
target_cond_phase=180,
phase_thr=15,
L1_thr=5,
clustering_thr=0.15,
gen_optima_hulls=True,
hull_L1_thr=4,
hull_phase_thr=20,
plt_optimal_hulls=True,
save_cond_phase_contours=[180],
)
###########################################
# VCZ calibration (coarse landscape) FLUX dance 3
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X4.cfg_awg_channel_amplitude(0.2658333333333333)
flux_lm_X4.vcz_amp_dac_at_11_02_SE(.5)
flux_lm_D9.vcz_amp_dac_at_11_02_NW(0)
flux_lm_D5.cfg_awg_channel_amplitude(0.2)
flux_lm_D5.vcz_amp_dac_at_11_02_NW(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SE(0)
flux_lm_X2.cfg_awg_channel_amplitude(0.316)
flux_lm_X2.vcz_amp_dac_at_11_02_SE(.5)
flux_lm_D3.vcz_amp_dac_at_11_02_NW(0)
# Set park parameters
flux_lm_D8.cfg_awg_channel_amplitude(.22)
flux_lm_Z4.cfg_awg_channel_amplitude(.19)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D2.cfg_awg_channel_amplitude(.225)
flux_lm_D8.park_amp(.5)
flux_lm_Z4.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D2.park_amp(.5)
flux_lm_D8.park_double_sided(True)
flux_lm_Z4.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D2.park_double_sided(True)
# flux-dance 3
## input from user besides cfg amps & speedlimt & flux-danace code
pairs = [['X4', 'D9'], ['D5', 'X3'], ['X2', 'D3']]
which_gate= [['SE', 'NW'],['NW', 'SE'], ['SE', 'NW']]
parked_qubits = ['D8', 'Z1', 'Z4', 'D2']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-3',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [.5, 1, .2])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.75e-08, 2.75e-08, 2.75e-8]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 31))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 4
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X4.cfg_awg_channel_amplitude(0.261)
flux_lm_X4.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D8.vcz_amp_dac_at_11_02_NE(0)
flux_lm_D4.cfg_awg_channel_amplitude(0.201)
flux_lm_D4.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SW(0)
flux_lm_X2.cfg_awg_channel_amplitude(0.31174999999999997)
flux_lm_X2.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D2.vcz_amp_dac_at_11_02_NE(0)
# Set park parameters
flux_lm_D9.cfg_awg_channel_amplitude(.206)
flux_lm_Z3.cfg_awg_channel_amplitude(.214)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D3.cfg_awg_channel_amplitude(.223)
flux_lm_D9.park_amp(.5)
flux_lm_Z3.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D3.park_amp(.5)
flux_lm_D9.park_double_sided(True)
flux_lm_Z3.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D3.park_double_sided(True)
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['X4', 'D8'], ['D4', 'X3'], ['X2', 'D2']]
which_gate= [['SW', 'NE'],['NE', 'SW'], ['SW', 'NE']]
parked_qubits = ['D9', 'Z1', 'Z3', 'D3']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'flux-dance-4',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [.6, 1.8, 1.2/3])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.75e-08, 2.78e-8,2.75e-08]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.985, 1.005, 31))
nested_MC.set_sweep_points_2D(np.linspace(0, 10, 11)[::-1])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 4
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_D4.cfg_awg_channel_amplitude(0.201)
flux_lm_D4.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SW(0)
# Set park parameters
flux_lm_Z3.cfg_awg_channel_amplitude(.3)#(.214)
flux_lm_Z1.cfg_awg_channel_amplitude(.3)#(.21)
flux_lm_Z3.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_Z3.park_double_sided(False)
flux_lm_Z1.park_double_sided(False)
plt.plot(flux_lm_D4._wave_dict['cz_NE'], label='D4')
plt.plot(flux_lm_X3._wave_dict['cz_SW'], label='X3')
plt.plot(flux_lm_Z1._wave_dict['park'], label='Z1')
plt.plot(flux_lm_Z3._wave_dict['park'], label='Z3')
plt.axhline(.5, color='k', ls='--', alpha=.25)
plt.legend()
plt.show()
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['D4', 'X3']]
which_gate= [['NE', 'SW']]
parked_qubits = ['Z1', 'Z3']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
get_function=cf.conventional_CZ_cost_func2,
msmt_kw={'device': device,
'MC': MC,
'pairs' : pairs,
'parked_qbs': parked_qubits,
'wait_time_before_flux_ns': 60,
'wait_time_after_flux_ns': 60,
'prepare_for_timedomain': False,
'disable_metadata': True,
'extract_only': True,
'disable_metadata': True,
'flux_codeword': 'cz',
'parked_qubit_seq': 'ground',
'include_single_qubit_phase_in_cost': False,
'target_single_qubit_phase': 360,
'include_leakage_in_cost': True,
'target_phase': 180,
'cond_phase_weight_factor': 2},
value_names=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
result_keys=[f'cost_function_val_{pair}' for pair in pairs ] +
[f'delta_phi_{pair}' for pair in pairs ] +
[f'missing_fraction_{pair}' for pair in pairs ],
value_units=['a.u.' for pair in pairs ] +
['deg' for pair in pairs ] +
['%' for pair in pairs ])
Sw_functions = [swf.FLsweep(flux_lm_target, flux_lm_target.parameters['vcz_amp_sq_{}'.format(gate[0])],
'cz_{}'.format(gate[0])) for flux_lm_target, gate in \
zip(flux_lms_target,which_gate)]
swf1 = swf.multi_sweep_function(Sw_functions, sweep_point_ratios= [1])
swf2 = swf.flux_t_middle_sweep(fl_lm_tm = [device.find_instrument("flux_lm_{}".format(qubit))\
for qubit in list_qubits_used],
which_gate= which_gates,
fl_lm_park = flux_lms_park,
speed_limit = [2.78e-8]) # input
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_function_2D(swf2)
nested_MC.set_sweep_points(np.linspace(.95, 1.05, 11))
nested_MC.set_sweep_points_2D([0,1,2,3,4,5,6,7,8,9,10])
nested_MC.cfg_clipping_mode(True)
label = 'VCZ_2D_{}_tm{}'.format(pairs, ' sweep')
nested_MC.set_detector_function(conv_cost_det)
result = nested_MC.run(label, mode='2D')
try:
ma2.Conditional_Oscillation_Heatmap_Analysis(label=label)
except Exception:
print('Failed Analysis')
###########################################
# VCZ calibration (coarse landscape) FLUX dance 4 (olddd)
###########################################
file_cfg = gc.generate_config(in_filename=input_file,
out_filename=config_fn,
mw_pulse_duration=20,
ro_duration=2200,
flux_pulse_duration=60,
init_duration=200000)
# set CZ parameters
flux_lm_X4.cfg_awg_channel_amplitude(0.261)
flux_lm_X4.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D8.vcz_amp_dac_at_11_02_NE(0)
flux_lm_D4.cfg_awg_channel_amplitude(0.25999999046325684)
flux_lm_D4.vcz_amp_dac_at_11_02_NE(.5)
flux_lm_X3.vcz_amp_dac_at_11_02_SW(0)
flux_lm_X2.cfg_awg_channel_amplitude(0.31174999999999997)
flux_lm_X2.vcz_amp_dac_at_11_02_SW(.5)
flux_lm_D2.vcz_amp_dac_at_11_02_NE(0)
# Set park parameters
flux_lm_D9.cfg_awg_channel_amplitude(.206)
flux_lm_Z3.cfg_awg_channel_amplitude(.214)
flux_lm_Z1.cfg_awg_channel_amplitude(.21)
flux_lm_D3.cfg_awg_channel_amplitude(.223)
flux_lm_D9.park_amp(.5)
flux_lm_Z3.park_amp(.5)
flux_lm_Z1.park_amp(.5)
flux_lm_D3.park_amp(.5)
flux_lm_D9.park_double_sided(True)
flux_lm_Z3.park_double_sided(True)
flux_lm_Z1.park_double_sided(True)
flux_lm_D3.park_double_sided(True)
# flux-dance 4
## input from user besides cfg amps & speedlimt & flux-danace code word
pairs = [['X4', 'D8'], ['D4', 'X3'], ['X2', 'D2']]
which_gate= [['SW', 'NE'],['NE', 'SW'], ['SW', 'NE']]
parked_qubits = ['D9', 'Z1', 'Z3', 'D3']
## processed
flux_lms_target = [device.find_instrument("flux_lm_{}".format(pair[0]))\
for pair in pairs]
flux_lms_control = [device.find_instrument("flux_lm_{}".format(pair[1]))\
for pair in pairs]
flux_lms_park = [device.find_instrument("flux_lm_{}".format(qb))\
for qb in parked_qubits]
list_qubits_used = np.asarray(pairs).flatten().tolist()
which_gates = np.asarray(which_gate).flatten().tolist()
device.ro_acq_averages(1024)
device.ro_acq_digitized(False)
device.ro_acq_weight_type('optimal')
device.prepare_fluxing(qubits=parked_qubits)
device.prepare_for_timedomain(qubits=list_qubits_used)
from pycqed.measurement import cz_cost_functions as cf
conv_cost_det = det.Function_Detector(
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implementation of SQLAlchemy backend.
"""
import re
import warnings
from nova import block_device
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import utils
from nova import log as logging
from nova.compute import vm_states
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from nova.db.sqlalchemy.session import get_session_dodai
from sqlalchemy import or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.db.sqlalchemy")
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def can_read_deleted(context):
"""Indicates if the context has access to deleted objects."""
if not context:
return False
return context.read_deleted
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_instance_exists(f):
"""Decorator to require the specified instance to exist.
Requres the wrapped function to use context and instance_id as
their first two arguments.
"""
def wrapper(context, instance_id, *args, **kwargs):
db.api.instance_get(context, instance_id)
return f(context, instance_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requres the wrapped function to use context and volume_id as
their first two arguments.
"""
def wrapper(context, volume_id, *args, **kwargs):
db.api.volume_get(context, volume_id)
return f(context, volume_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
if service_ref.topic == 'compute' and \
len(service_ref.compute_node) != 0:
for c in service_ref.compute_node:
c.delete(session=session)
@require_admin_context
def service_get(context, service_id, session=None):
if not session:
session = get_session()
result = session.query(models.Service).\
options(joinedload('compute_node')).\
filter_by(id=service_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get_all(context, disabled=None):
session = get_session()
query = session.query(models.Service).\
filter_by(deleted=can_read_deleted(context))
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(host=host).\
all()
@require_admin_context
def service_get_all_compute_by_host(context, host):
topic = 'compute'
session = get_session()
result = session.query(models.Service).\
options(joinedload('compute_node')).\
filter_by(deleted=False).\
filter_by(host=host).\
filter_by(topic=topic).\
all()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
filter_by(topic=topic).\
filter_by(deleted=False).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
@require_admin_context
def service_get_all_compute_sorted(context):
session = get_session()
with session.begin():
# NOTE(vish): The intended query is below
# SELECT services.*, COALESCE(inst_cores.instance_cores,
# 0)
# FROM services LEFT OUTER JOIN
# (SELECT host, SUM(instances.vcpus) AS instance_cores
# FROM instances GROUP BY host) AS inst_cores
# ON services.host = inst_cores.host
topic = 'compute'
label = 'instance_cores'
subq = session.query(models.Instance.host,
func.sum(models.Instance.vcpus).label(label)).\
filter_by(deleted=False).\
group_by(models.Instance.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_all_network_sorted(context):
session = get_session()
with session.begin():
topic = 'network'
label = 'network_count'
subq = session.query(models.Network.host,
func.count(models.Network.id).label(label)).\
filter_by(deleted=False).\
group_by(models.Network.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_all_volume_sorted(context):
session = get_session()
with session.begin():
topic = 'volume'
label = 'volume_gigabytes'
subq = session.query(models.Volume.host,
func.sum(models.Volume.size).label(label)).\
filter_by(deleted=False).\
group_by(models.Volume.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_by_args(context, host, binary):
session = get_session()
result = session.query(models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not FLAGS.enable_new_services:
service_ref.disabled = True
service_ref.save()
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
service_ref.update(values)
service_ref.save(session=session)
###################
@require_admin_context
def compute_node_get(context, compute_id, session=None):
if not session:
session = get_session()
result = session.query(models.ComputeNode).\
filter_by(id=compute_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_create(context, values):
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@require_admin_context
def compute_node_update(context, compute_id, values):
session = get_session()
with session.begin():
compute_ref = compute_node_get(context, compute_id, session=session)
compute_ref.update(values)
compute_ref.save(session=session)
###################
@require_admin_context
def certificate_get(context, certificate_id, session=None):
if not session:
session = get_session()
result = session.query(models.Certificate).\
filter_by(id=certificate_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.CertificateNotFound(certificate_id=certificate_id)
return result
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_destroy(context, certificate_id):
session = get_session()
with session.begin():
certificate_ref = certificate_get(context,
certificate_id,
session=session)
certificate_ref.delete(session=session)
@require_admin_context
def certificate_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Certificate).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
session = get_session()
return session.query(models.Certificate).\
filter_by(user_id=user_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(_context, user_id, project_id):
session = get_session()
return session.query(models.Certificate).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
all()
@require_admin_context
def certificate_update(context, certificate_id, values):
session = get_session()
with session.begin():
certificate_ref = certificate_get(context,
certificate_id,
session=session)
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save(session=session)
###################
@require_context
def floating_ip_get(context, id):
session = get_session()
result = None
if is_admin_context(context):
result = session.query(models.FloatingIp).\
options(joinedload('fixed_ip')).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(id=id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.FloatingIp).\
options(joinedload('fixed_ip')).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=context.project_id).\
filter_by(id=id).\
filter_by(deleted=False).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
return result
@require_context
def floating_ip_allocate_address(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = session.query(models.FloatingIp).\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
floating_ip_ref.save()
return floating_ip_ref['address']
@require_context
def floating_ip_count_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return session.query(models.FloatingIp).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
filter_by(deleted=False).\
count()
@require_context
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = fixed_ip_get_by_address(context,
fixed_address,
session=session)
floating_ip_ref.fixed_ip = fixed_ip_ref
floating_ip_ref.host = host
floating_ip_ref.save(session=session)
@require_context
def floating_ip_deallocate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref['project_id'] = None
floating_ip_ref['host'] = None
floating_ip_ref['auto_assigned'] = False
floating_ip_ref.save(session=session)
@require_context
def floating_ip_destroy(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.delete(session=session)
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref = floating_ip_ref.fixed_ip
if fixed_ip_ref:
fixed_ip_address = fixed_ip_ref['address']
else:
fixed_ip_address = None
floating_ip_ref.fixed_ip = None
floating_ip_ref.host = None
floating_ip_ref.save(session=session)
return fixed_ip_address
@require_context
def floating_ip_set_auto_assigned(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.auto_assigned = True
floating_ip_ref.save(session=session)
@require_admin_context
def floating_ip_get_all(context):
session = get_session()
floating_ip_refs = session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(deleted=False).\
all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
session = get_session()
floating_ip_refs = session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(host=host).\
filter_by(deleted=False).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
floating_ip_refs = session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
filter_by(deleted=False).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForProject(project_id=project_id)
return floating_ip_refs
@require_context
def floating_ip_get_by_address(context, address, session=None):
if not session:
session = get_session()
result = session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.network')).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and is_user_context(context):
authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context, address, session)
for (key, value) in values.iteritems():
floating_ip_ref[key] = value
floating_ip_ref.save(session=session)
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_id, network_id=None,
reserved=False):
| |
<reponame>Excalibur95/eneticAlgorithmsWithPython
# File: ticTacToe.py
# Del capítulo 18 de _Algoritmos Genéticos con Python_
#
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import random
import unittest
from functools import partial
import genetic
def obtener_aptitud(genes):
copiaLocal = genes[:]
aptitud = obtener_aptitud_para_juegos(copiaLocal)
aptitud.ConteoDeGenes = len(genes)
return aptitud
índicesDeCuadrados = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def jugar1en1(xGenes, oGenes):
tablero = dict((i, Cuadrado(i, TipoDeContenido.Vacío)) for i in range(1, 9 + 1))
vacíos = [v for v in tablero.values() if v.Contenido == TipoDeContenido.Vacío]
datosDeRonda = [[xGenes, TipoDeContenido.Mia, genetic.ResultadoDeCompetición.Perdido,
genetic.ResultadoDeCompetición.Ganado],
[oGenes, TipoDeContenido.Oponente, genetic.ResultadoDeCompetición.Ganado,
genetic.ResultadoDeCompetición.Perdido]]
índiceDelJugador = 0
while len(vacíos) > 0:
datosDelJugador = datosDeRonda[índiceDelJugador]
índiceDelJugador = 1 - índiceDelJugador
genes, pieza, perdió, ganó = datosDelJugador
índiceDeReglaYMovimiento = obtener_mover(genes, tablero, vacíos)
if índiceDeReglaYMovimiento is None: # no pudo encontrar un movimiento
return perdió
índice = índiceDeReglaYMovimiento[0]
tablero[índice] = Cuadrado(índice, pieza)
sóloElMovimientoMásReciente = [tablero[índice]]
if len(FiltroDeContenidoDeFila(pieza, 3).obtener_coincidencias(tablero, sóloElMovimientoMásReciente)) > 0 or \
len(FiltroDeContenidoDeColumna(pieza, 3).obtener_coincidencias(tablero, sóloElMovimientoMásReciente)) > 0 or \
len(DiagonalContenidoFilter(pieza, 3).obtener_coincidencias(tablero, sóloElMovimientoMásReciente)) > 0:
return ganó
vacíos = [v for v in tablero.values() if v.Contenido == TipoDeContenido.Vacío]
return genetic.ResultadoDeCompetición.Empatado
def obtener_aptitud_para_juegos(genes):
def obtenerCadenaDelTablero(b):
return ''.join(map(lambda i:
'.' if b[i].Contenido == TipoDeContenido.Vacío
else 'x' if b[i].Contenido == TipoDeContenido.Mia
else 'o', índicesDeCuadrados))
tablero = dict((i, Cuadrado(i, TipoDeContenido.Vacío)) for i in range(1, 9 + 1))
cola = [tablero]
for cuadrado in tablero.values():
copiaDelCandidato = tablero.copy()
copiaDelCandidato[cuadrado.Índice] = Cuadrado(cuadrado.Índice, TipoDeContenido.Oponente)
cola.append(copiaDelCandidato)
reglasGanadoras = {}
ganados = empates = perdidos = 0
while len(cola) > 0:
tablero = cola.pop()
cadenaDelTablero = obtenerCadenaDelTablero(tablero)
vacíos = [v for v in tablero.values() if v.Contenido == TipoDeContenido.Vacío]
if len(vacíos) == 0:
empates += 1
continue
candidatoÍndiceAndReglaÍndice = obtener_mover(genes, tablero, vacíos)
if candidatoÍndiceAndReglaÍndice is None: # no pudo encontrar un movimiento
# hay vacíos pero no encontró un movimiento
perdidos += 1
# ir al siguiente tablero
continue
# encontró al menos un movimiento
índice = candidatoÍndiceAndReglaÍndice[0]
tablero[índice] = Cuadrado(índice, TipoDeContenido.Mia)
# newTableroString = obtenerCadenaDelTablero(tablero)
# Si ahora tenemos tres de mis piezas en cualquier fila, columna o diagonal, ganamos
sóloElMovimientoMásReciente = [tablero[índice]]
if len(tengoTresEnUnaFila.obtener_coincidencias(tablero, sóloElMovimientoMásReciente)) > 0 or \
len(tengoTresEnUnaColumna.obtener_coincidencias(tablero, sóloElMovimientoMásReciente)) > 0 or \
len(tengoTresEnDiagonal.obtener_coincidencias(tablero, sóloElMovimientoMásReciente)) > 0:
reglaId = candidatoÍndiceAndReglaÍndice[1]
if reglaId not in reglasGanadoras:
reglasGanadoras[reglaId] = list()
reglasGanadoras[reglaId].append(cadenaDelTablero)
ganados += 1
# ir al siguiente tablero
continue
# perdemos si vacíos tienen dos piezas opositoras en una fila, columna o diagonal
vacíos = [v for v in tablero.values() if v.Contenido == TipoDeContenido.Vacío]
if len(oponenteTieneDosEnUnaFila.obtener_coincidencias(tablero, vacíos)) > 0:
perdidos += 1
# ir al siguiente tablero
continue
# poner en cola todas las posibles respuestas de los oponentes
for cuadrado in vacíos:
copiaDelCandidato = tablero.copy()
copiaDelCandidato[cuadrado.Índice] = Cuadrado(cuadrado.Índice,
TipoDeContenido.Oponente)
cola.append(copiaDelCandidato)
return Aptitud(ganados, empates, perdidos, len(genes))
def obtener_mover(reglaSet, tablero, vacíos, índiceDePrimeraRegla=0):
copiaDeReglas = reglaSet[:]
for reglaÍndice in range(índiceDePrimeraRegla, len(copiaDeReglas)):
gene = copiaDeReglas[reglaÍndice]
coincidencias = gene.obtener_coincidencias(tablero, vacíos)
if len(coincidencias) == 0:
continue
if len(coincidencias) == 1:
return [list(coincidencias)[0], reglaÍndice]
if len(vacíos) > len(coincidencias):
vacíos = [e for e in vacíos if e.Índice in coincidencias]
return None
def mostrar(candidato, horaInicio):
diferencia = (datetime.datetime.now() - horaInicio).total_seconds()
copiaLocal = candidato.Genes[:]
for i in reversed(range(len(copiaLocal))):
copiaLocal[i] = str(copiaLocal[i])
print("\t{}\n{}\n{}".format(
'\n\t'.join([d for d in copiaLocal]),
candidato.Aptitud,
diferencia))
def mudar_añadir(genes, geneSet):
índice = random.randrange(0, len(genes) + 1) if len(genes) > 0 else 0
genes[índice:índice] = [random.choice(geneSet)]
return True
def mudar_remover(genes):
if len(genes) < 1:
return False
del genes[random.randrange(0, len(genes))]
if len(genes) > 1 and random.randint(0, 1) == 1:
del genes[random.randrange(0, len(genes))]
return True
def mudar_reemplazar(genes, geneSet):
if len(genes) < 1:
return False
índice = random.randrange(0, len(genes))
genes[índice] = random.choice(geneSet)
return True
def mudar_intercambiar_adyacente(genes):
if len(genes) < 2:
return False
índice = random.choice(range(len(genes) - 1))
genes[índice], genes[índice + 1] = genes[índice + 1], genes[índice]
return True
def mudar_mover(genes):
if len(genes) < 3:
return False
principio = random.choice(range(len(genes)))
fin = principio + random.randint(1, 2)
aMover = genes[principio:fin]
genes[principio:fin] = []
índice = random.choice(range(len(genes)))
genes[índice:índice] = aMover
return True
def mudar(genes, fnObtenerAptitud, operadoresDeMutación, recuentoDeMutaciones):
aptitudInicial = fnObtenerAptitud(genes)
cuenta = random.choice(recuentoDeMutaciones)
for i in range(1, cuenta + 2):
duplo = operadoresDeMutación[:]
func = random.choice(duplo)
while not func(genes):
duplo.remove(func)
func = random.choice(duplo)
if fnObtenerAptitud(genes) > aptitudInicial:
recuentoDeMutaciones.append(i)
return
def crear_geneSet():
opciones = [[TipoDeContenido.Oponente, [0, 1, 2]],
[TipoDeContenido.Mia, [0, 1, 2]]]
geneSet = [
ReglaMetadatos(FiltroDeContenidoDeFila, opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeFilaSuperior(), opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeFilaDelMedio(),
opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeFilaInferior(),
opciones),
ReglaMetadatos(FiltroDeContenidoDeColumna, opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeColumnaIzquierda(),
opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeColumnaMedia(),
opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeColumnaDerecha(),
opciones),
ReglaMetadatos(DiagonalContenidoFilter, opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeUbicaciónDiagonal(),
opciones),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeEsquina()),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeLado()),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroCentral()),
ReglaMetadatos(lambda contenidoEsperado, cuenta:
FiltroDeOpuestosDeFila(contenidoEsperado), opciones,
necesitaContenidoEspecífico=True),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeOpuestosDeColumna(
contenidoEsperado), opciones, necesitaContenidoEspecífico=True),
ReglaMetadatos(lambda contenidoEsperado, cuenta: FiltroDeOpuestosDeDiagonal(
contenidoEsperado), opciones, necesitaContenidoEspecífico=True),
]
genes = list()
for gene in geneSet:
genes.extend(gene.crear_reglas())
print("creado " + str(len(genes)) + " genes")
return genes
class TicTacToeTests(unittest.TestCase):
def test_conocimiento_perfecto(self):
mínGenes = 10
máxGenes = 20
geneSet = crear_geneSet()
horaInicio = datetime.datetime.now()
def fnMostrar(candidato):
mostrar(candidato, horaInicio)
def fnObtenerAptitud(genes):
return obtener_aptitud(genes)
recuentoDeMutaciones = [1]
operadoresDeMutación = [
partial(mudar_añadir, geneSet=geneSet),
partial(mudar_reemplazar, geneSet=geneSet),
mudar_remover,
mudar_intercambiar_adyacente,
mudar_mover,
]
def fnMudar(genes):
mudar(genes, fnObtenerAptitud, operadoresDeMutación, recuentoDeMutaciones)
def fnIntercambio(padre, donante):
niño = padre[0:int(len(padre) / 2)] + \
donante[int(len(donante) / 2):]
fnMudar(niño)
return niño
def fnCrear():
return random.sample(geneSet, random.randrange(mínGenes, máxGenes))
aptitudÓptima = Aptitud(620, 120, 0, 11)
mejor = genetic.obtener_mejor(fnObtenerAptitud, mínGenes, aptitudÓptima, None,
fnMostrar, fnMudar, fnCrear, edadMáxima=500,
tamañoDePiscina=20, intercambiar=fnIntercambio)
self.assertTrue(not aptitudÓptima > mejor.Aptitud)
def test_tornament(self):
mínGenes = 10
máxGenes = 20
geneSet = crear_geneSet()
horaInicio = datetime.datetime.now()
def fnMostrar(genes, ganados, empates, perdidos, generación):
print("-- generación {} --".format(generación))
mostrar(genetic.Cromosoma(genes,
Aptitud(ganados, empates, perdidos, len(genes)),
None), horaInicio)
recuentoDeMutaciones = [1]
operadoresDeMutación = [
partial(mudar_añadir, geneSet=geneSet),
partial(mudar_reemplazar, geneSet=geneSet),
mudar_remover,
mudar_intercambiar_adyacente,
mudar_mover,
]
def fnMudar(genes):
mudar(genes, lambda x: 0, operadoresDeMutación, recuentoDeMutaciones)
def fnIntercambio(padre, donante):
niño = padre[0:int(len(padre) / 2)] + \
donante[int(len(donante) / 2):]
fnMudar(niño)
return niño
def fnCrear():
return random.sample(geneSet, random.randrange(mínGenes, máxGenes))
def fnClaveDeOrden(genes, ganados, empates, perdidos):
return -1000 * perdidos - empates + 1 / len(genes)
genetic.torneo(fnCrear, fnIntercambio, jugar1en1, fnMostrar,
fnClaveDeOrden, 13)
class TipoDeContenido:
Vacío = 'VACÍO'
Mia = 'MIA'
Oponente = 'OPONENTE'
class Cuadrado:
def __init__(self, índice, contenido=TipoDeContenido.Vacío):
self.Contenido = contenido
self.Índice = índice
self.Diagonales = []
# diseño del tablero es
# 1 2 3
# 4 5 6
# 7 8 9
self.EsCentro = False
self.EsEsquina = False
self.EsLado = False
self.EsFilaSuperior = False
self.EsFilaDelMedio = False
self.EsFilaInferior = False
self.EsColumnaIzquierda = False
self.EsColumnaEnMedio = False
self.EsColumnaDerecha = False
self.Fila = None
self.Columna = None
self.OpuestoDeDiagonal = None
self.OpuestoDeFila = None
self.OpuestoDeColumna = None
if índice == 1 or índice == 2 or índice == 3:
self.EsFilaSuperior = True
self.Fila = [1, 2, 3]
elif índice == 4 or índice == 5 or índice == 6:
self.EsFilaDelMedio = True
self.Fila = [4, 5, 6]
elif índice == 7 or índice == 8 or índice == 9:
self.EsFilaInferior = True
self.Fila = [7, 8, 9]
if índice % 3 == 1:
self.Columna = [1, 4, 7]
self.EsColumnaIzquierda = True
elif índice % 3 == 2:
self.Columna = [2, 5, 8]
self.EsColumnaEnMedio = True
elif índice % 3 == 0:
self.Columna = [3, 6, 9]
self.EsColumnaDerecha = True
if índice == 5:
self.EsCentro = True
else:
if índice == 1 or índice == 3 or índice == 7 or índice == 9:
self.EsEsquina = True
elif índice == 2 or índice == 4 or índice == 6 or índice == 8:
self.EsLado = True
if índice == 1:
self.OpuestoDeFila = 3
self.OpuestoDeColumna = 7
self.OpuestoDeDiagonal = 9
elif índice == 2:
self.OpuestoDeColumna = 8
elif índice == 3:
self.OpuestoDeFila = 1
self.OpuestoDeColumna = 9
self.OpuestoDeDiagonal = 7
elif índice == 4:
self.OpuestoDeFila | |
a discrete intensity grid
:param xy: observed spatial locations as a two-column vector
:param w: observation window, i.e. discrete grid to be mapped to, [xmin xmax ymin ymax]
:param nt: two-element vector defining number of bins in both directions
"""
# Make grid
x = nnp.linspace(w[0], w[1], nt[0] + 1)
y = nnp.linspace(w[2], w[3], nt[1] + 1)
X, Y = nnp.meshgrid(x, y)
# Count points
N = nnp.zeros([nt[1], nt[0]])
for i in range(nt[0]):
for j in range(nt[1]):
ind = (xy[:, 0] >= x[i]) & (xy[:, 0] < x[i + 1]) & (xy[:, 1] >= y[j]) & (xy[:, 1] < y[j + 1])
N[j, i] = nnp.sum(ind)
return X[:-1, :-1].T, Y[:-1, :-1].T, N.T
def plot(model, it_num, ax=None):
post_mean, post_var, _, nlpd = model.predict()
if ax is None:
fig, ax = plt.subplots(1, 1)
lb = post_mean[:, 0] - 1.96 * post_var[:, 0] ** 0.5
ub = post_mean[:, 0] + 1.96 * post_var[:, 0] ** 0.5
ax.plot(model.t_train, model.y, 'k.', label='training observations')
plt.plot(model.t_all[model.test_id], model.y_all[model.test_id], 'r.', alpha=0.4, label='test observations')
ax.plot(model.t_all, post_mean, 'b', label='posterior mean')
ax.fill_between(model.t_all[:, 0], lb, ub, color='b', alpha=0.05, label='95% confidence')
ax.legend(loc=1)
plt.xlim([model.t_test[0], model.t_test[-1]])
plt.title('Test NLPD: %1.2f' % nlpd)
plt.xlabel('time - $t$')
plt.savefig('output/output_%04d.png' % it_num)
plt.close()
def plot_2d_classification(m, it_num):
# fig, ax = plt.subplots(1, 1, figsize=(6, 6))
# # xtest, ytest = np.mgrid[-2.8:2.8:100j, -2.8:2.8:100j]
# # Xtest = np.vstack((xtest.flatten(), ytest.flatten())).T
# for i, mark in [[1, 'o'], [0, 'o']]:
# ind = m.y[:, 0] == i
# # ax.plot(X[ind, 0], X[ind, 1], mark)
# ax.scatter(m.t_train[ind, 0], m.t_train[ind, 1], s=100, alpha=.5)
# mu, var, _, nlpd_test = m.predict_2d()
# ax.contour(m.t_test, m.y_all[m.test_id], mu.reshape(100, 100), levels=[.5],
# colors='k', linewidths=4.)
# ax.axis('equal')
# plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
# plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
# # ax.axis('off')
# ax.set_xlim(-2.8, 2.8)
# ax.set_ylim(-2.8, 2.8)
mu, var, _, nlpd_test, _, _ = m.predict(return_full=True)
mu = np.squeeze(mu)
lim = 2.8
label0, label1 = -1., 1. # class labels are +/-1
cmap_ = [[1, 0.498039215686275, 0.0549019607843137], [0.12156862745098, 0.466666666666667, 0.705882352941177]]
cmap = hsv_to_rgb(
interp1d([label0, label1], rgb_to_hsv(cmap_), axis=0
)(m.likelihood.link_fn(nnp.linspace(-3.5, 3.5, num=64))))
newcmp = ListedColormap(cmap)
Xtest, Ytest = nnp.mgrid[-2.8:2.8:100j, -2.8:2.8:100j]
plt.figure()
im = plt.imshow(m.likelihood.link_fn(mu).T, cmap=newcmp, extent=[-lim, lim, -lim, lim], origin='lower',
vmin=label0, vmax=label1)
cb = plt.colorbar(im)
cb.set_ticks([cb.vmin, 0, cb.vmax])
cb.set_ticklabels([-1, 0, 1])
plt.contour(Xtest, Ytest, mu, levels=[.0], colors='k', linewidths=1.5)
# plt.axis('equal')
for label in [1, 0]:
ind = m.y[:, 0] == label
plt.scatter(m.t_train[ind, 0], m.t_train[ind, 1], s=50, alpha=.5, edgecolor='k')
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
plt.title('Iteration: %02d' % (it_num + 1), loc='right', fontweight='bold')
plt.savefig('output/output_%04d.png' % it_num)
plt.close()
def plot_2d_classification_filtering(m, it_num, plot_num, mu_prev=None):
mu, var, _, nlpd_test, mu_filt, var_filt = m.predict_2d(return_full=True)
mu, mu_filt = np.squeeze(mu), np.squeeze(mu_filt)
if mu_prev is None:
mu_plot = nnp.zeros_like(mu)
else:
mu_plot = mu_prev
lim = 2.8
label0, label1 = -1., 1. # class labels are +/-1
cmap_ = [[1, 0.498039215686275, 0.0549019607843137], [0.12156862745098, 0.466666666666667, 0.705882352941177]]
cmap = hsv_to_rgb(
interp1d([label0, label1], rgb_to_hsv(cmap_), axis=0
)(m.likelihood.link_fn(nnp.linspace(-3.5, 3.5, num=64))))
newcmp = ListedColormap(cmap)
Xtest, Ytest = nnp.mgrid[-lim:lim:100j, -lim:lim:100j]
for i in range(Xtest.shape[0]):
mu_plot[i] = mu_filt[i]
plt.figure()
im = plt.imshow(m.likelihood.link_fn(mu_plot).T, cmap=newcmp, extent=[-lim, lim, -lim, lim], origin='lower',
vmin=label0, vmax=label1)
cb = plt.colorbar(im)
cb.set_ticks([cb.vmin, 0, cb.vmax])
cb.set_ticklabels([-1, 0, 1])
# plt.contour(Xtest, Ytest, mu_plot, levels=[.0], colors='k', linewidths=1.5)
# plt.axis('equal')
for label in [1, 0]:
ind = m.y[:, 0] == label
plt.scatter(m.t_train[ind, 0], m.t_train[ind, 1], s=50, alpha=.5, edgecolor='k')
plt.plot([Xtest[i, 0], Xtest[i, 0]], [-lim, lim], 'k', alpha=0.4)
plt.title('Iteration: %02d' % (it_num + 1), loc='right', fontweight='bold')
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
plt.xlim(-lim, lim)
plt.ylim(-lim, lim)
plt.savefig('output/output_%04d.png' % plot_num)
plt.close()
plot_num += 1
for i in range(Xtest.shape[0] - 1, -1, -1):
mu_plot[i] = mu[i]
plt.figure()
im = plt.imshow(m.likelihood.link_fn(mu_plot).T, cmap=newcmp, extent=[-lim, lim, -lim, lim], origin='lower',
vmin=label0, vmax=label1)
cb = plt.colorbar(im)
cb.set_ticks([cb.vmin, 0, cb.vmax])
cb.set_ticklabels([-1, 0, 1])
# plt.contour(Xtest, Ytest, mu_plot, levels=[.0], colors='k', linewidths=1.5)
# plt.axis('equal')
for label in [1, 0]:
ind = m.y[:, 0] == label
plt.scatter(m.t_train[ind, 0], m.t_train[ind, 1], s=50, alpha=.5, edgecolor='k')
plt.plot([Xtest[i, 0], Xtest[i, 0]], [-lim, lim], 'k', alpha=0.4)
plt.title('Iteration: %02d' % (it_num + 1), loc='right', fontweight='bold')
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
plt.xlim(-lim, lim)
plt.ylim(-lim, lim)
plt.savefig('output/output_%04d.png' % plot_num)
plt.close()
plot_num += 1
return plot_num, mu_plot
def mvhermgauss(H: int, D: int):
"""
This function is taken from GPflow: https://github.com/GPflow/GPflow
Copied here rather than imported so that users don't need to install gpflow to use kalman-jax
LICENSE:
Copyright The Contributors to the GPflow Project. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Return the evaluation locations 'xn', and weights 'wn' for a multivariate
Gauss-Hermite quadrature.
The outputs can be used to approximate the following type of integral:
int exp(-x)*f(x) dx ~ sum_i w[i,:]*f(x[i,:])
:param H: Number of Gauss-Hermite evaluation points.
:param D: Number of input dimensions. Needs to be known at call-time.
:return: eval_locations 'x' (H**DxD), weights 'w' (H**D)
"""
gh_x, gh_w = hermgauss(H)
x = np.array(list(itertools.product(*(gh_x,) * D))) # H**DxD
w = np.prod(np.array(list(itertools.product(*(gh_w,) * D))), 1) # H**D
return x, w
def gauss_hermite(dim=1, num_quad_pts=20):
"""
Return weights and sigma-points for Gauss-Hermite cubature
"""
# sigma_pts, weights = hermgauss(num_quad_pts) # Gauss-Hermite sigma points and weights
sigma_pts, weights = mvhermgauss(num_quad_pts, dim)
sigma_pts = np.sqrt(2) * sigma_pts.T
weights = weights.T * pi ** (-0.5 * dim) # scale weights by 1/√π
return sigma_pts, weights
def symmetric_cubature_third_order(dim=1, kappa=None):
"""
Return weights and sigma-points for the symmetric cubature rule of order 5, for
dimension dim with parameter kappa (default 0).
"""
if kappa is None:
# kappa = 1 - dim
kappa = 0 # CKF
if (dim == 1) and (kappa == 0):
weights = np.array([0., 0.5, 0.5])
sigma_pts = np.array([0., 1., -1.])
# sigma_pts = np.array([-1., 0., 1.])
# weights = np.array([0.5, 0., 0.5])
# u = 1
elif (dim == 2) and (kappa == 0):
weights = np.array([0., 0.25, 0.25, 0.25, 0.25])
sigma_pts = np.block([[0., 1.4142, 0., -1.4142, 0.],
[0., 0., 1.4142, 0., -1.4142]])
# u = 1.4142
elif (dim == 3) and (kappa == 0):
weights = np.array([0., 0.1667, 0.1667, 0.1667, 0.1667, 0.1667, 0.1667])
sigma_pts = np.block([[0., 1.7321, 0., 0., -1.7321, 0., 0.],
[0., 0., 1.7321, 0., 0., -1.7321, 0.],
[0., 0., 0., 1.7321, 0., 0., -1.7321]])
# u = 1.7321
else:
# weights
weights = np.zeros([1, 2 * dim + 1])
weights = index_add(weights, index[0, 0], kappa / (dim + kappa))
for j in range(1, 2 * dim + 1):
wm = 1 / (2 * (dim + kappa))
weights = index_add(weights, index[0, j], wm)
# Sigma points
sigma_pts = np.block([np.zeros([dim, 1]), np.eye(dim), - np.eye(dim)])
sigma_pts = np.sqrt(dim + kappa) * sigma_pts
# u = np.sqrt(n + kappa)
return sigma_pts, weights # , u
def symmetric_cubature_fifth_order(dim=1):
"""
Return weights and sigma-points for the symmetric cubature rule of order 5
"""
if dim == 1:
weights = np.array([0.6667, 0.1667, 0.1667])
sigma_pts = np.array([0., 1.7321, -1.7321])
elif dim == 2:
weights = np.array([0.4444, 0.1111, 0.1111, 0.1111, 0.1111, 0.0278, 0.0278, 0.0278, 0.0278])
sigma_pts = np.block([[0., 1.7321, -1.7321, 0., 0., 1.7321, -1.7321, 1.7321, -1.7321],
[0., 0., 0., 1.7321, -1.7321, 1.7321, -1.7321, -1.7321, 1.7321]])
elif dim == 3:
weights = np.array([0.3333, 0.0556, 0.0556, 0.0556, 0.0556, 0.0556, 0.0556, 0.0278, 0.0278, 0.0278,
0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278])
sigma_pts = np.block([[0., 1.7321, -1.7321, 0., 0., 0., 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,
-1.7321, 1.7321, -1.7321, 0., 0., 0., 0.],
[0., 0., 0., 1.7321, -1.7321, 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,
0., 1.7321, -1.7321, 1.7321, -1.7321],
[0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,
1.7321, 1.7321, -1.7321, -1.7321, 1.7321]])
elif dim == 6:
| |
the encoder.
mean = np.array(transform['mean']).reshape(3, 1, 1)
std = np.array(transform['std']).reshape(3, 1, 1)
image = (image - mean) / std
image = image.astype(np.float32)
return image, gt_image, load_dict
def resize_label_image(self, image, gt_image):
size = self.conf['transform']['patch_size']
# https://github.com/scipy/scipy/issues/4458#issuecomment-269067103
image_r = scipy.misc.imresize(image, size=size, interp='cubic')
gt_image_r = scipy.misc.imresize(gt_image, size=size, interp='nearest')
assert(np.all(np.unique(gt_image_r) == np.unique(gt_image)))
return image_r, gt_image_r
def color_transform(self, image, gt_image, augmentation_level=1):
f = torchvision.transforms.functional # NOQA
pil_img = self.to_img(image)
# assert(np.all(to_np(pil_img) == image)) # TODO make test case
# gt_image = gt_image.astype(np.uint32)
if self.conf['transform']['color_augmentation_level'] > 0:
pil_img = self.color_jitter(pil_img)
if False:
pil_gt = Image.fromarray(gt_image + 1)
assert(np.all(to_np(pil_gt) == gt_image))
# TODO make test case
img_r, gt_img_r = self.rotate(pil_img, pil_gt)
image = to_np(img_r)
gt_image_r = to_np(gt_img_r)
gt_image_r[gt_image_r == 0] = 256
gt_image_r = gt_image_r - 1
assert(np.all(np.unique(gt_image_r) == np.unique(gt_image)))
gt_image = gt_image_r
else:
image = to_np(pil_img)
return image, gt_image
def to_np(img):
return np.array(img, np.int32, copy=True)
def roll_img(image, gt_image):
half = image.shape[1] // 2
image_r = image[:, half:]
image_l = image[:, :half]
image_rolled = np.concatenate([image_r, image_l], axis=1)
gt_image_r = gt_image[:, half:]
gt_image_l = gt_image[:, :half]
gt_image_rolled = np.concatenate([gt_image_r, gt_image_l], axis=1)
return image_rolled, gt_image_rolled
def random_equi_rotation(image, gt_image):
yaw = 2 * np.pi * random.random()
roll = 2 * np.pi * (random.random() - 0.5) * 0.1
pitch = 2 * np.pi * (random.random() - 0.5) * 0.1
rotation_angles = np.array([yaw, roll, pitch])
image_res = np.zeros(image.shape)
gtimage_res = np.zeros(gt_image.shape)
extractEquirectangular_quick(
True, image, image_res, Algebra.rotation_matrix(rotation_angles))
extractEquirectangular_quick(
True, gt_image, gtimage_res, Algebra.rotation_matrix(rotation_angles))
gtimage_res = (gtimage_res + 0.1).astype(np.int32)
if DEBUG:
if not np.all(np.unique(gtimage_res) == np.unique(gt_image)):
logging.warning("np.unique(gt_image ) {}".format(
np.unique(gt_image)))
logging.warning("np.unique(gt_image_res) {}".format(
np.unique(gtimage_res)))
for i in np.unique(gtimage_res):
if i == 255:
continue
else:
if i not in np.unique(gt_image):
logging.error("Equirectangular removed classes.")
assert i in np.unique(gt_image)
return image_res, gtimage_res
def random_crop_soft(image, gt_image, max_crop, crop_chance):
offset_x = random.randint(0, max_crop)
offset_y = random.randint(0, max_crop)
if random.random() < 0.8:
image = image[offset_x:, offset_y:]
gt_image = gt_image[offset_x:, offset_y:]
else:
offset_x += 1
offset_y += 1
image = image[:-offset_x, :-offset_y]
gt_image = gt_image[:-offset_x, :-offset_y]
return image, gt_image
def crop_to_size(image, gt_image, patch_size):
new_width = image.shape[1]
new_height = image.shape[0]
width = patch_size[1]
height = patch_size[0]
if new_width > width:
max_y = new_width - width
off_y = random.randint(0, max_y)
else:
off_y = 0
if new_height > height:
max_x = max(new_height - height, 0)
off_x = random.randint(0, max_x)
else:
off_x = 0
image = image[off_x:off_x + height, off_y:off_y + width]
gt_image = gt_image[off_x:off_x + height, off_y:off_y + width]
return image, gt_image
def random_resize(image, gt_image, lower_size, upper_size, sig):
factor = skewed_normal(mean=1, std=sig, lower=lower_size, upper=upper_size)
# zoom = [factor, factor, 1]
# image = scipy.ndimage.interpolation.zoom(image, zoom, order=3)
# gt_image2 = scipy.ndimage.interpolation.zoom(gt_image, factor, order=0)
# image3 = skimage.transform.resize(image, new_shape, order=3)
# gt_image3 = skimage.transform.resize(gt_image, gt_shape, order=0)
if False:
new_shape = (image.shape * np.array([factor, factor, 1])).astype(
np.uint32)
gt_shape = (gt_image.shape * np.array(factor)).astype(np.uint32)
image_ones = image.astype(np.float) / np.max(image)
image3 = skimage.transform.resize(
image_ones, new_shape, order=3, mode='reflect', anti_aliasing=True)
image2 = image3 * np.max(image)
gt_ones = gt_image.astype(np.float) / np.max(gt_image)
gt_image3 = skimage.transform.resize(
gt_ones, gt_shape, order=0, mode='reflect', anti_aliasing=False)
gt_image2 = (gt_image3 * np.max(gt_image) + 0.5).astype(np.int32)
image2 = scipy.misc.imresize(image, size=factor, interp='cubic')
gt_image2 = scipy.misc.imresize(gt_image, size=factor, interp='nearest')
"""
new_shape = (image.shape * np.array([factor, factor, 1])).astype(np.uint32)
gt_shape = (gt_image.shape * np.array(factor)).astype(np.uint32)
img = scipy.misc.toimage(image, cmin=0, cmax=255)
img = img.resize(new_shape[0:2][::-1], 3)
image2 = np.array(img)
gt_img = scipy.misc.toimage(gt_image, cmin=0, cmax=255, mode='I')
gt_img = gt_img.resize(gt_shape[::-1], 0)
gt_image2 = np.array(gt_img)
"""
if DEBUG and not np.all(np.unique(gt_image2) == np.unique(gt_image)):
logging.warning("np.unique(gt_image2) {}".format(np.unique(gt_image2)))
logging.warning("np.unique(gt_image) {}".format(np.unique(gt_image)))
for i in np.unique(gt_image2):
if i == 255:
continue
else:
assert i in np.unique(gt_image)
assert(image2.shape == gt_image2.shape)
return image2, gt_image2
def random_rotation(image, gt_image,
std=3.5, lower=-10, upper=10, expand=True):
assert lower < upper
assert std > 0
angle = truncated_normal(mean=0, std=std, lower=lower,
upper=upper)
image_r = scipy.ndimage.rotate(image, angle, order=3, cval=127)
gt_image_r = scipy.ndimage.rotate(gt_image, angle, order=0, cval=255)
gt_image[10, 10] = 255
if False:
if not np.all(np.unique(gt_image_r) == np.unique(gt_image)):
logging.info("np.unique(gt_image_r): {}".format(
np.unique(gt_image_r)))
logging.info("np.unique(gt_image): {}".format(np.unique(gt_image)))
assert(False)
return image_r, gt_image_r
def random_shear(image, gt_image, std=3.5,
lower=-10, upper=10, expand=True):
assert lower < upper
assert std > 0
angle = truncated_normal(mean=0, std=std, lower=lower,
upper=upper)
pi_angle = angle * np.pi / 360
afine_tf = tf.AffineTransform(shear=pi_angle)
image_r = (tf.warp(image / 255, inverse_map=afine_tf) * 255 + 0.4)\
.astype(np.int)
gt_image_r = tf.warp(gt_image / 255, inverse_map=afine_tf,
order=0)
gt_image_r = ((255 * gt_image_r) + 0.4).astype(np.int)
gt_image[10, 10] = 255
if DEBUG:
if not np.all(np.unique(gt_image_r) == np.unique(gt_image)):
logging.info("np.unique(gt_image_r): {}".format(
np.unique(gt_image_r)))
logging.info("np.unique(gt_image): {}".format(np.unique(gt_image)))
assert(False)
return image_r, gt_image_r
def skewed_normal(mean=1, std=0, lower=0.5, upper=2):
while True:
diff = random.normalvariate(0, std)
if diff < 0:
factor = mean + 0.5 * diff
else:
factor = mean + diff
if factor > lower and factor < upper:
break
return factor
def truncated_normal(mean=0, std=0, lower=-0.5, upper=0.5):
while True:
factor = random.normalvariate(mean, std)
if factor > lower and factor < upper:
break
return factor
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen normally from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen normally from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen normally from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen normally from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0.3,
contrast=0.25, saturation=0.3, hue=0.02):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
f = torchvision.transforms.functional
Lambda = torchvision.transforms.Lambda # NOQA
Compose = torchvision.transforms.Compose # NOQA
transforms = []
if brightness > 0:
br_factor = skewed_normal(mean=1, std=brightness)
tfm = Lambda(lambda img: f.adjust_brightness(img, br_factor))
transforms.append(tfm)
if contrast > 0:
ct_factor = skewed_normal(mean=1, std=contrast)
cfm = Lambda(lambda img: f.adjust_contrast(img, ct_factor))
transforms.append(cfm)
if saturation > 0:
sat = skewed_normal(mean=1, std=saturation)
transforms.append(
Lambda(lambda img: f.adjust_saturation(img, sat)))
if hue > 0:
hue_factor = truncated_normal(mean=0, std=hue)
transforms.append(
Lambda(lambda img: f.adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max),
the range of degrees will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC},
optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters # NOQA
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, std=3, resample=False,
expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError(
"If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError(
"If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
self.std = std
@staticmethod
def get_params(degrees, std):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
assert(degrees[0] < degrees[1])
angle = truncated_normal(mean=0, std=std,
lower=degrees[0],
upper=degrees[1])
return angle
def __call__(self, img, gt_image):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees, self.std)
f = torchvision.transforms.functional
img = f.rotate(img, angle, self.resample, self.expand, self.center)
gt_img = f.rotate(gt_image, angle, False, self.expand, self.center)
return img, gt_img
if __name__ == '__main__': # NOQA
conf = default_conf.copy()
conf["dataset"] = "blender_mini"
loader = LocalSegmentationLoader(conf=conf)
test | |
#!/usr/bin/env python3
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to manipulate Chrome OS disk & firmware images for manufacturing.
Run "image_tool help" for more info and a list of subcommands.
To add a subcommand, just add a new SubCommand subclass to this file.
"""
import argparse
import contextlib
import copy
from distutils import version as version_utils
import errno
import glob
import inspect
import json
import logging
import os
import pipes
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
import urllib.parse # pylint: disable=import-error,no-name-in-module
import yaml
# The edit_lsb command works better if readline enabled, but will still work if
# that is not available.
try:
import readline # pylint: disable=unused-import
except ImportError:
pass
# This file needs to run on various environments, for example a fresh Ubuntu
# that does not have Chromium OS source tree nor chroot. So we do want to
# prevent introducing more cros.factory dependency except very few special
# modules (pygpt, fmap, netboot_firmware_settings).
# Please don't add more cros.factory modules.
# TODO(kerker) Find a way to remove this in future
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))), 'py_pkg'))
from cros.factory.utils import fmap # pylint: disable=wrong-import-position
from cros.factory.utils import pygpt # pylint: disable=wrong-import-position
from cros.factory.tools import netboot_firmware_settings # pylint: disable=wrong-import-position
# Partition index for Chrome OS stateful partition.
PART_CROS_STATEFUL = 1
# Partition index for Chrome OS kernel A.
PART_CROS_KERNEL_A = 2
# Partition index for Chrome OS rootfs A.
PART_CROS_ROOTFS_A = 3
# Partition index for Chrome OS kernel B.
PART_CROS_KERNEL_B = 4
# Partition index for Chrome OS rootfs B.
PART_CROS_ROOTFS_B = 5
# Partition index for ChromeOS MiniOS B.
PART_CROS_MINIOS_B = 10
# Special options to mount Chrome OS rootfs partitions. (-t ext2, -o ro).
FS_TYPE_CROS_ROOTFS = 'ext2'
# Relative path of firmware updater on Chrome OS disk images.
PATH_CROS_FIRMWARE_UPDATER = '/usr/sbin/chromeos-firmwareupdate'
# Relative path of lsb-factory in factory installer.
PATH_LSB_FACTORY = os.path.join('dev_image', 'etc', 'lsb-factory')
# Preflash disk image default board name.
PREFLASH_DEFAULT_BOARD = 'preflash'
# Relative path of payload metadata in a preflash disk image.
PATH_PREFLASH_PAYLOADS_JSON = os.path.join(
'dev_image', 'etc', '%s.json' % PREFLASH_DEFAULT_BOARD)
# Relative path of RMA image metadata.
CROS_RMA_METADATA = 'rma_metadata.json'
# Mode for new created folder, 0755 = u+rwx, go+rx
MODE_NEW_DIR = 0o755
# Regular expression for parsing LSB value, which should be sh compatible.
RE_LSB = re.compile(r'^ *(.*)="?(.*[^"])"?$', re.MULTILINE)
# Key for Chrome OS board name in /etc/lsb-release.
KEY_LSB_CROS_BOARD = 'CHROMEOS_RELEASE_BOARD'
# Key for Chrome OS build version in /etc/lsb-release.
KEY_LSB_CROS_VERSION = 'CHROMEOS_RELEASE_VERSION'
# Regular expression for reading file system information from dumpe2fs.
RE_BLOCK_COUNT = re.compile(r'^Block count: *(.*)$', re.MULTILINE)
RE_BLOCK_SIZE = re.compile(r'^Block size: *(.*)$', re.MULTILINE)
# Simple constant(s)
MEGABYTE = 1048576
# The storage industry treat "mega" and "giga" differently.
GIGABYTE_STORAGE = 1000000000
# Default size of each disk block (or sector).
DEFAULT_BLOCK_SIZE = pygpt.GPT.DEFAULT_BLOCK_SIZE
# Components for preflash image.
PREFLASH_COMPONENTS = [
'release_image', 'test_image', 'toolkit', 'hwid', 'project_config']
# Components for cros_payload.
PAYLOAD_COMPONENTS = [
'release_image', 'test_image',
'toolkit', 'firmware', 'hwid', 'complete', 'toolkit_config', 'lsb_factory',
'description', 'project_config']
# Payload types
PAYLOAD_TYPE_TOOLKIT = 'toolkit'
PAYLOAD_TYPE_TOOLKIT_CONFIG = 'toolkit_config'
PAYLOAD_TYPE_LSB_FACTORY = 'lsb_factory'
# Payload subtypes.
PAYLOAD_SUBTYPE_VERSION = 'version'
# Warning message in lsb-factory file.
LSB_FACTORY_WARNING_MESSAGE = (
'# Please use image_tool to set lsb-factory config.\n'
'# Manual modifications will be overwritten at runtime!\n')
# Subconfigs in toolkit_config payload.
TOOLKIT_SUBCONFIG_ACTIVE_TEST_LIST = 'active_test_list'
TOOLKIT_SUBCONFIG_TEST_LIST_CONSTANTS = 'test_list_constants'
TOOLKIT_SUBCONFIG_CUTOFF = 'cutoff'
# Split line for separating outputs.
SPLIT_LINE = '=' * 72
# Command line namespaces.
CMD_NAMESPACE_PAYLOAD = 'payload'
CMD_NAMESPACE_RMA = 'rma'
def MakePartition(block_dev, part):
"""Helper function to build Linux device path for storage partition."""
return '%s%s%s' % (block_dev, 'p' if block_dev[-1].isdigit() else '', part)
class ArgTypes:
"""Helper class to collect all argument type checkers."""
@staticmethod
def ExistsPath(path):
"""An argument with existing path."""
if not os.path.exists(path):
raise argparse.ArgumentTypeError('Does not exist: %s' % path)
return path
@staticmethod
def GlobPath(pattern):
"""An argument as glob pattern, and solved as single path.
This is a useful type to specify default values with wildcard.
If the pattern is prefixed with '-', the value is returned as None without
raising exceptions.
If the pattern has '|', split the pattern by '|' and return the first
matched pattern.
"""
allow_none = False
if pattern.startswith('-'):
# Special trick to allow defaults.
pattern = pattern[1:]
allow_none = True
goals = pattern.split('|')
for i, goal in enumerate(goals):
found = glob.glob(goal)
if len(found) < 1:
if i + 1 < len(goals):
continue
if allow_none:
return None
raise argparse.ArgumentTypeError('Does not exist: %s' % pattern)
if len(found) > 1:
raise argparse.ArgumentTypeError(
'Too many files found for <%s>: %s' % (pattern, found))
return found[0]
class SysUtils:
"""Collection of system utilities."""
@staticmethod
def Shell(commands, sudo=False, output=False, check=True, silent=False,
log_stderr_on_error=None, **kargs):
"""Helper to execute 'sudo' command in a shell.
A simplified implementation. To reduce dependency, we don't want to use
process_utils.Spawn.
Args:
sudo: Execute the command with sudo if needed.
output: If it is True, returns the output from command. Otherwise, returns
the returncode.
check: Throws exception if returncode is not zero.
silent: Sets stdout and stderr to DEVNULL.
log_stderr_on_error: Logs stderr only if the command fails. If it is None,
then it is set to 'check and silent'.
"""
if log_stderr_on_error is None:
log_stderr_on_error = check and silent
if not isinstance(commands, str):
commands = ' '.join(pipes.quote(arg) for arg in commands)
kargs['shell'] = True
kargs['encoding'] = 'utf-8'
if sudo and os.geteuid() != 0:
commands = 'sudo -E ' + commands
if silent:
kargs['stdout'] = subprocess.DEVNULL
kargs['stderr'] = subprocess.DEVNULL
if output:
kargs['stdout'] = subprocess.PIPE
if log_stderr_on_error:
kargs['stderr'] = subprocess.PIPE
process = subprocess.run(commands, check=False, **kargs)
if process.returncode != 0 and log_stderr_on_error:
print('command: %r stderr:\n%s' % (commands, process.stderr))
if check:
process.check_returncode()
return process.stdout if output else process.returncode
@staticmethod
def Sudo(commands, **kargs):
"""Shortcut to Shell(commands, sudo=True)."""
kargs['sudo'] = True
return Shell(commands, **kargs)
@staticmethod
def SudoOutput(commands, **kargs):
"""Shortcut to Sudo(commands, output=True)."""
kargs['output'] = True
return Sudo(commands, **kargs)
@staticmethod
def FindCommand(command):
"""Returns the right path to invoke given command."""
provided = os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])), command)
if not os.path.exists(provided):
provided = Shell(['which', command], output=True, check=False).strip()
if not provided:
raise RuntimeError('Cannot find program: %s' % command)
return provided
@classmethod
def FindCommands(cls, *commands):
"""Find any of the given commands in order."""
for cmd in commands:
try:
return cls.FindCommand(cmd)
except Exception:
pass
raise RuntimeError(
'Cannot find any of the following commands: %s' % ', '.join(commands))
@classmethod
def FindCGPT(cls):
"""Returns the best match of `cgpt` style command.
The `cgpt` is a native program that is hard to deploy. As an alternative, we
have the `pygpt` that emulates most of its functions, and that is accessible
via `image_tool gpt`.
"""
if os.path.exists(__file__) and os.access(__file__, os.X_OK):
return '%s gpt' % __file__
# Are we inside PAR?
par_path = os.environ.get('PAR_PATH')
if par_path:
if os.path.basename(par_path) == 'image_tool':
return '%s gpt' % par_path
return 'sh %s image_tool gpt' % par_path
# Nothing more - let's try to find the real programs.
return cls.FindCommands('pygpt', 'cgpt')
@classmethod
def FindBZip2(cls):
"""Returns a path to best working 'bzip2'."""
return cls.FindCommands('lbzip2', 'pbzip2', 'bzip2')
@staticmethod
@contextlib.contextmanager
def TempDirectory(prefix='imgtool_', delete=True):
"""Context manager to allocate and remove temporary folder.
Args:
prefix: a string as prefix of the created folder name.
"""
tmp_folder = None
try:
tmp_folder = tempfile.mkdtemp(prefix=prefix)
yield tmp_folder
finally:
if tmp_folder and delete:
Sudo(['rm', '-rf', tmp_folder], check=False)
@staticmethod
def PartialCopy(src_path, dest_path, count, src_offset=0, dest_offset=0,
buffer_size=32 * MEGABYTE, sync=False, verbose=None):
"""Copy partial contents from one file to another file, like 'dd'."""
with open(src_path, 'rb') as src:
if verbose is None:
verbose = count // buffer_size > 5
with open(dest_path, 'r+b') as dest:
fd = dest.fileno()
src.seek(src_offset)
dest.seek(dest_offset)
remains = count
while remains > 0:
data = src.read(min(remains, buffer_size))
dest.write(data)
remains -= len(data)
if sync:
dest.flush()
os.fdatasync(fd)
if verbose:
if sys.stderr.isatty():
width = 5
sys.stderr.write(
'%*.1f%%%s' % (width, (1 - remains / count) * 100,
'\b' * (width + 1)))
else:
sys.stderr.write('.')
if verbose:
sys.stderr.write('\n')
@staticmethod
def GetDiskUsage(path):
return int(SudoOutput(['du', '-sk', path]).split()[0]) * 1024
@staticmethod
def GetRemainingSize(path):
return int(
SudoOutput(['df', '-k', '--output=avail', path]).splitlines()[1]) * 1024
@staticmethod
def WriteFile(f, content):
"""Clears the original content and write new content to a file object."""
f.seek(0)
f.truncate()
f.write(content)
f.flush()
@staticmethod
def WriteFileToMountedDir(mounted_dir, file_name, content):
with tempfile.NamedTemporaryFile('w') as f:
f.write(content)
f.flush()
os.chmod(f.name, 0o644)
dest = os.path.join(mounted_dir, file_name)
Sudo(['cp', '-pf', f.name, dest])
Sudo(['chown', 'root:root', dest])
@staticmethod
@contextlib.contextmanager
def SetUmask(mask):
old_umask = os.umask(mask)
try:
yield
finally:
os.umask(old_umask)
@staticmethod
def CreateDirectories(dir_name, mode=MODE_NEW_DIR):
with SysUtils.SetUmask(0o022):
try:
os.makedirs(dir_name, mode)
except OSError as exc:
# Need to catch | |
rights.
Source: https://core.telegram.org/bots/api#exportchatinvitelink
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns exported invite link as String on success.
:rtype: :obj:`base.String`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.EXPORT_CHAT_INVITE_LINK, payload)
return result
async def set_chat_photo(self, chat_id: typing.Union[base.Integer, base.String],
photo: base.InputFile) -> base.Boolean:
"""
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: :obj:`base.InputFile`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals(), exclude=['photo'])
result = await self.send_file('photo', api.Methods.SET_CHAT_PHOTO, photo, payload)
return result
async def delete_chat_photo(self, chat_id: typing.Union[base.Integer, base.String]) -> base.Boolean:
"""
Use this method to delete a chat photo. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#deletechatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.DELETE_CHAT_PHOTO, payload)
return result
async def set_chat_title(self, chat_id: typing.Union[base.Integer, base.String],
title: base.String) -> base.Boolean:
"""
Use this method to change the title of a chat. Titles can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchattitle
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param title: New chat title, 1-255 characters
:type title: :obj:`base.String`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_TITLE, payload)
return result
async def set_chat_description(self, chat_id: typing.Union[base.Integer, base.String],
description: typing.Union[base.String, None] = None) -> base.Boolean:
"""
Use this method to change the description of a supergroup or a channel.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#setchatdescription
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param description: New chat description, 0-255 characters
:type description: :obj:`typing.Union[base.String, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_DESCRIPTION, payload)
return result
async def pin_chat_message(self, chat_id: typing.Union[base.Integer, base.String], message_id: base.Integer,
disable_notification: typing.Union[base.Boolean, None] = None) -> base.Boolean:
"""
Use this method to pin a message in a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#pinchatmessage
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param message_id: Identifier of a message to pin
:type message_id: :obj:`base.Integer`
:param disable_notification: Pass True, if it is not necessary to send a notification to
all group members about the new pinned message
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.PIN_CHAT_MESSAGE, payload)
return result
async def unpin_chat_message(self, chat_id: typing.Union[base.Integer, base.String]) -> base.Boolean:
"""
Use this method to unpin a message in a supergroup chat.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#unpinchatmessage
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.UNPIN_CHAT_MESSAGE, payload)
return result
async def leave_chat(self, chat_id: typing.Union[base.Integer, base.String]) -> base.Boolean:
"""
Use this method for your bot to leave a group, supergroup or channel.
Source: https://core.telegram.org/bots/api#leavechat
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.LEAVE_CHAT, payload)
return result
async def get_chat(self, chat_id: typing.Union[base.Integer, base.String]) -> types.Chat:
"""
Use this method to get up to date information about the chat
(current name of the user for one-on-one conversations, current username of a user, group or channel, etc.).
Source: https://core.telegram.org/bots/api#getchat
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns a Chat object on success.
:rtype: :obj:`types.Chat`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_CHAT, payload)
return types.Chat(**result)
async def get_chat_administrators(self, chat_id: typing.Union[base.Integer, base.String]
) -> typing.List[types.ChatMember]:
"""
Use this method to get a list of administrators in a chat.
Source: https://core.telegram.org/bots/api#getchatadministrators
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: On success, returns an Array of ChatMember objects that contains information about all
chat administrators except other bots.
If the chat is a group or a supergroup and no administrators were appointed,
only the creator will be returned.
:rtype: :obj:`typing.List[types.ChatMember]`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_CHAT_ADMINISTRATORS, payload)
return [types.ChatMember(**chatmember) for chatmember in result]
async def get_chat_members_count(self, chat_id: typing.Union[base.Integer, base.String]) -> base.Integer:
"""
Use this method to get the number of members in a chat.
Source: https://core.telegram.org/bots/api#getchatmemberscount
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns Int on success.
:rtype: :obj:`base.Integer`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_CHAT_MEMBERS_COUNT, payload)
return result
async def get_chat_member(self, chat_id: typing.Union[base.Integer, base.String],
user_id: base.Integer) -> types.ChatMember:
"""
Use this method to get information about a member of a chat.
Source: https://core.telegram.org/bots/api#getchatmember
:param chat_id: Unique identifier for the target chat or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:return: Returns a ChatMember object on success.
:rtype: :obj:`types.ChatMember`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_CHAT_MEMBER, payload)
return types.ChatMember(**result)
async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],
sticker_set_name: base.String) -> base.Boolean:
"""
Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Use the field can_set_sticker_set optionally returned in getChat requests to check
if the bot can use this method.
Source: https://core.telegram.org/bots/api#setchatstickerset
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param sticker_set_name: Name of the sticker set to be set as the group sticker set
:type sticker_set_name: :obj:`base.String`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)
return result
async def delete_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String]) -> base.Boolean:
"""
Use this method to delete a group sticker set from a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Use the field can_set_sticker_set optionally returned in getChat requests
to check if the bot can use this method.
Source: https://core.telegram.org/bots/api#deletechatstickerset
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.DELETE_CHAT_STICKER_SET, payload)
return | |
<filename>thermo/equilibrium.py
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains an object designed to store the result of a flash
calculation and provide convinient access to all properties of the calculated
phases and bulks.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/thermo/>`_.
.. contents:: :local:
EquilibriumState
================
.. autoclass:: EquilibriumState
:members:
:undoc-members:
:exclude-members: dH_dP_V, dH_dT_V, dH_dV_P, dH_dV_T, dS_dP_V, dS_dT, dS_dT_P, dS_dT_V
'''
from __future__ import division
__all__ = ['EquilibriumState']
from fluids.constants import R, R_inv
from fluids.core import thermal_diffusivity
from chemicals.utils import log, exp, normalize, zs_to_ws, vapor_mass_quality, mixing_simple, Vm_to_rho, SG
from chemicals.virial import B_from_Z
from chemicals.elements import atom_fractions, mass_fractions, simple_formula_parser, molecular_weight, mixture_atomic_composition
from thermo.phases import gas_phases, liquid_phases, solid_phases, Phase, derivatives_thermodynamic, derivatives_thermodynamic_mass, derivatives_jacobian
from thermo.chemical_package import ChemicalConstantsPackage, PropertyCorrelationsPackage, constants_docstrings
from thermo.bulk import Bulk, BulkSettings, default_settings
all_phases = gas_phases + liquid_phases + solid_phases
CAS_H2O = '7732-18-5'
PHASE_GAS = 'gas'
PHASE_LIQUID0 = 'liquid0'
PHASE_LIQUID1 = 'liquid1'
PHASE_LIQUID2 = 'liquid2'
PHASE_LIQUID3 = 'liquid3'
PHASE_BULK_LIQUID = 'liquid_bulk'
PHASE_WATER_LIQUID = 'water_phase'
PHASE_LIGHTEST_LIQUID = 'lightest_liquid'
PHASE_HEAVIEST_LIQUID = 'heaviest_liquid'
PHASE_SOLID0 = 'solid0'
PHASE_SOLID1 = 'solid1'
PHASE_SOLID2 = 'solid2'
PHASE_SOLID3 = 'solid3'
PHASE_BULK_SOLID = 'solid_bulk'
PHASE_BULK = 'bulk'
PHASE_REFERENCES = [PHASE_GAS, PHASE_LIQUID0, PHASE_LIQUID1, PHASE_LIQUID2,
PHASE_LIQUID3, PHASE_BULK_LIQUID, PHASE_WATER_LIQUID,
PHASE_LIGHTEST_LIQUID, PHASE_HEAVIEST_LIQUID, PHASE_SOLID0,
PHASE_SOLID1, PHASE_SOLID2, PHASE_SOLID3, PHASE_BULK_SOLID,
PHASE_BULK]
__all__.extend(['PHASE_GAS', 'PHASE_LIQUID0', 'PHASE_LIQUID1', 'PHASE_LIQUID2',
'PHASE_LIQUID3', 'PHASE_BULK_LIQUID', 'PHASE_WATER_LIQUID',
'PHASE_LIGHTEST_LIQUID', 'PHASE_HEAVIEST_LIQUID', 'PHASE_SOLID0',
'PHASE_SOLID1', 'PHASE_SOLID2', 'PHASE_SOLID3', 'PHASE_BULK_SOLID',
'PHASE_BULK', 'PHASE_REFERENCES'])
class EquilibriumState(object):
r'''Class to represent a thermodynamic equilibrium state with one or more
phases in it. This object is designed to be the output of the
:obj:`thermo.flash.Flash` interface and to provide easy acess to all
properties of the mixture.
Properties like :obj:`Cp <EquilibriumState.Cp>` are calculated using the
mixing rules configured by the
:obj:`BulkSettings <thermo.bulk.BulkSettings>` object. For states with a
single phase, this will always reduce to the properties of that phase.
This interface allows calculation of thermodynamic properties,
and transport properties. Both molar and mass outputs are provided, as
separate calls (ex. :obj:`Cp <EquilibriumState.Cp>` and
:obj:`Cp_mass <EquilibriumState.Cp_mass>`).
Parameters
----------
T : float
Temperature of state, [K]
P : float
Pressure of state, [Pa]
zs : list[float]
Overall mole fractions of all species in the state, [-]
gas : :obj:`Phase <thermo.phases.Phase>`
The calcualted gas phase object, if one was found, [-]
liquids : list[:obj:`Phase <thermo.phases.Phase>`]
A list of liquid phase objects, if any were found, [-]
solids : list[:obj:`Phase <thermo.phases.Phase>`]
A list of solid phase objects, if any were found, [-]
betas : list[float]
Molar phase fractions of every phase, ordered [`gas beta`,
`liquid beta0`, `liquid beta1`, ..., `solid beta0`, `solid beta1`, ...]
flash_specs : dict[str : float], optional
A dictionary containing the specifications for the flash calculations,
[-]
flash_convergence : dict[str : float], optional
A dictionary containing the convergence results for the flash
calculations; this is to help support development of the library only
and the contents of this dictionary is subject to change, [-]
constants : :obj:`ChemicalConstantsPackage <thermo.chemical_package.ChemicalConstantsPackage>`, optional
Package of chemical constants; all cases these properties are
accessible as attributes of this object, [-]
:obj:`EquilibriumState <thermo.equilibrium.EquilibriumState>` object, [-]
correlations : :obj:`PropertyCorrelationsPackage <thermo.chemical_package.PropertyCorrelationsPackage>`, optional
Package of chemical T-dependent properties; these properties are
accessible as attributes of this object object, [-]
flasher : :obj:`Flash <thermo.flash.Flash>` object, optional
This reference can be provided to this object to allow the object to
return properties which are themselves calculated from results of flash
calculations, [-]
settings : :obj:`BulkSettings <thermo.bulk.BulkSettings>`, optional
Object containing settings for calculating bulk and transport
properties, [-]
Examples
--------
The following sample shows a flash for the CO2-n-hexane system with all
constants provided, using no data from thermo.
>>> from thermo import *
>>> constants = ChemicalConstantsPackage(names=['carbon dioxide', 'hexane'], CASs=['124-38-9', '110-54-3'], MWs=[44.0095, 86.17536], omegas=[0.2252, 0.2975], Pcs=[7376460.0, 3025000.0], Tbs=[194.67, 341.87], Tcs=[304.2, 507.6], Tms=[216.65, 178.075])
>>> correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True,
... HeatCapacityGases=[HeatCapacityGas(poly_fit=(50.0, 1000.0, [-3.1115474168865828e-21, 1.39156078498805e-17, -2.5430881416264243e-14, 2.4175307893014295e-11, -1.2437314771044867e-08, 3.1251954264658904e-06, -0.00021220221928610925, 0.000884685506352987, 29.266811602924644])),
... HeatCapacityGas(poly_fit=(200.0, 1000.0, [1.3740654453881647e-21, -8.344496203280677e-18, 2.2354782954548568e-14, -3.4659555330048226e-11, 3.410703030634579e-08, -2.1693611029230923e-05, 0.008373280796376588, -1.356180511425385, 175.67091124888998]))])
>>> eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas}
>>> gas = CEOSGas(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases)
>>> liq = CEOSLiquid(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases)
>>> flasher = FlashVL(constants, correlations, liquid=liq, gas=gas)
>>> state = flasher.flash(P=1e5, T=196.0, zs=[0.5, 0.5])
>>> type(state) is EquilibriumState
True
>>> state.phase_count
2
>>> state.bulk.Cp()
108.3164692
>>> state.flash_specs
{'zs': [0.5, 0.5], 'T': 196.0, 'P': 100000.0}
>>> state.Tms
[216.65, 178.075]
>>> state.liquid0.H()
-34376.4853
>>> state.gas.H()
-3608.0551
Attributes
----------
gas_count : int
Number of gas phases present (0 or 1), [-]
liquid_count : int
Number of liquid phases present, [-]
solid_count : int
Number of solid phases present, [-]
phase_count : int
Number of phases present, [-]
gas_beta : float
Molar phase fraction of the gas phase; 0 if no gas phase is present,
[-]
liquids_betas : list[float]
Liquid molar phase fractions, [-]
solids_betas : list[float]
Solid molar phase fractions, [-]
liquid_zs : list[float]
Overall mole fractions of each component in the overall liquid phase,
[-]
liquid_bulk : :obj:`Bulk<thermo.bulk.Bulk>`
Liquid phase bulk, [-]
solid_zs : list[float]
Overall mole fractions of each component in the overall solid phase,
[-]
solid_bulk : :obj:`Bulk<thermo.bulk.Bulk>`
Solid phase bulk, [-]
bulk : :obj:`Bulk<thermo.bulk.Bulk>`
Overall phase bulk, [-]
'''
max_liquid_phases = 1
reacted = False
flashed = True
liquid_bulk = None
solid_bulk = None
T_REF_IG = Phase.T_REF_IG
T_REF_IG_INV = Phase.T_REF_IG_INV
P_REF_IG = Phase.P_REF_IG
P_REF_IG_INV = Phase.P_REF_IG_INV
__full_path__ = "%s.%s" %(__module__, __qualname__)
def __str__(self):
s = '<EquilibriumState, T=%.4f, P=%.4f, zs=%s, betas=%s, phases=%s>'
s = s %(self.T, self.P, self.zs, self.betas, str([str(i) for i in self.phases]).replace("'", ''))
return s
def __repr__(self):
s = '%s(T=%s, P=%s, zs=%s, betas=%s' %(self.__class__.__name__, self.T, self.P, self.zs, self.betas)
s += ', gas=%s' %(self.gas)
s += ', liquids=%s' %(self.liquids)
s += ', solids=%s' %(self.solids)
s += ')'
return s
def __init__(self, T, P, zs,
gas, liquids, solids, betas,
flash_specs=None, flash_convergence=None,
constants=None, correlations=None, flasher=None,
settings=default_settings):
# T, P are the only properties constant across phase
self.T = T
self.P = P
self.zs = zs
self.N = N = len(zs)
self.gas_count = gas_count = 1 if gas is not None else 0
self.liquid_count = liquid_count = len(liquids)
self.solid_count = solid_count = len(solids)
self.phase_count = gas_count + liquid_count + solid_count
self.gas = gas
self.liquids = liquids
self.solids = solids
if gas is not None:
self.phases = [gas] + liquids + solids
gas.assigned_phase = 'g'
else:
self.phases = liquids + solids
self.betas = betas
self.gas_beta = betas[0] if gas_count else 0.0
self.liquids_betas = betas_liquids = betas[gas_count:gas_count + liquid_count]
self.solids_betas = betas_solids = betas[gas_count + liquid_count:]
if liquid_count > 1:
# tot_inv = 1.0/sum(values)
# return [i*tot_inv for i in values]
self.liquid_zs = normalize([sum([betas_liquids[j]*liquids[j].zs[i] for j in range(liquid_count)])
for i in range(self.N)])
self.liquid_bulk = liquid_bulk = Bulk(T, P, self.liquid_zs, self.liquids, self.liquids_betas, 'l')
liquid_bulk.flasher = flasher
liquid_bulk.result = self
liquid_bulk.constants = constants
liquid_bulk.correlations = correlations
liquid_bulk.settings = settings
for i, l in enumerate(liquids):
setattr(self, 'liquid%d'%(i), l)
l.assigned_phase = 'l'
elif liquid_count:
l = liquids[0]
self.liquid_zs = l.zs
self.liquid_bulk = l
self.liquid0 = l
l.assigned_phase = 'l'
if solids:
self.solid_zs = normalize([sum([betas_solids[j]*solids[j].zs[i] for j in range(self.solid_count)])
for i in range(self.N)])
self.solid_bulk = solid_bulk = Bulk(T, P, self.solid_zs, solids, self.solids_betas, 's')
solid_bulk.result = self
solid_bulk.constants = constants
solid_bulk.correlations = correlations
solid_bulk.flasher = flasher
for i, s in enumerate(solids):
setattr(self, 'solid%d' %(i), s)
self.bulk = bulk = Bulk(T, P, | |
# Shim file to support IDA 6.x-7.3 and 7.5+
# Documentation provided by Hex-Rays:
# https://hex-rays.com/products/ida/support/ida74_idapython_no_bc695_porting_guide.html
import idc
import idaapi
try:
import ida_bytes
except ImportError:
ida_bytes = None
try:
import ida_name
except ImportError:
ida_name = None
try:
import ida_kernwin
except ImportError:
ida_kernwin = None
try:
import ida_nalt
except ImportError:
ida_nalt = None
try:
import ida_ua
except ImportError:
ida_ua = None
try:
import ida_funcs
except ImportError:
ida_funcs = None
def _get_fn_by_version(lib, curr_fn, archive_fn, archive_lib=None):
'''
Determine which function should be called based on the version of IDA.
:param curr_fn: 7.X version of the function.
:param archive_fn: 6.X version of the function.
:param archive_lib: If the archive lib is different than the current lib,
set it here.
:return: Function based on the version of IDA.
'''
if idaapi.IDA_SDK_VERSION >= 700:
try:
return getattr(lib, curr_fn)
except AttributeError:
raise Exception('%s is not a valid function in %s' % (curr_fn,
lib))
use_lib = lib if archive_lib is None else archive_lib
try:
return getattr(use_lib, archive_fn)
except AttributeError:
raise Exception('%s is not a valid function in %s' % (archive_fn,
use_lib))
def print_insn_mnem(ea):
'''
Get instruction mnemonics.
:param ea: Linear address of the instruction.
:type ea: int
:return: Instruction mnemonic. "" if not instruction is found.
:note: *Heavy breath* This function may not return exactly the same
mnemonics as you see on the screen.
'''
fn = _get_fn_by_version(idc, 'print_insn_mnem', 'GetMnem')
return fn(ea)
def print_operand(ea, n):
'''
Get operand of an instruction or data.
:param ea: Linear address of the item.
:type ea: int
:param n: Number of operand: 0 - the first operand 1 - the second operand.
:type n: int
:return: The current text representation of operand or "".
'''
fn = _get_fn_by_version(idc, 'print_operand', 'GetOpnd')
return fn(ea, n)
def define_local_var(start, end, location, name):
'''
Create a local variable.
:param start: Start address range for the local variable.
:type start: int
:param end: End of address range for the local variable.
:type end: int
:param location: The variable location in the "[bp+xx]" form where xx is
a number. The location can also be specified as a
register name.
:type location: str
:param name: Name of the local variable.
:type name: str
:return: 1-ok, 0-failure
'''
fn = _get_fn_by_version(idc, 'define_local_var', 'MakeLocal')
return fn(start, end, location, name)
def find_func_end(ea):
'''
Determine a new function boundaries.
:param ea: Start address of the new function.
:type ea: int
:return: If a function already exists, then return its end address. If a
function end cannot be determine, the return BADADDR otherwise return the
end address of the new function.
'''
fn = _get_fn_by_version(idc, 'find_func_end', 'FindFuncEnd')
return fn(ea)
def is_code(flag):
'''
Does flag denote start of an instruction.
:param flag: Flag for an instruction.
:type flag: int
:return: True if flags indicate code, False otherwise.
'''
fn = _get_fn_by_version(ida_bytes, 'is_code', 'isCode', idaapi)
return fn(flag)
def get_full_flags(ea):
'''
Get flags value for address 'ea'
:param ea: Linear address.
:type ea: int
:return: 0 if flags not present in the program
'''
fn = _get_fn_by_version(ida_bytes, 'get_full_flags', 'getFlags', idaapi)
return fn(ea)
def get_name(ea):
'''
Get name at the specified address.
:param ea: Linear address
:type ea: int
:return: "" - byte has no name.
'''
fn = _get_fn_by_version(idc, 'get_name', 'Name')
if idaapi.IDA_SDK_VERSION > 700:
return fn(ea, ida_name.GN_VISIBLE)
return fn(ea)
def get_func_off_str(ea):
'''
Convert address to 'funcname+offset' string.
:param ea: Address to convert.
:type ea: int
:return: If the address belongs to a function then return a string formed as
'name+offset' where 'name' is a function name, 'offset' is offset within
the function else return null string.
'''
fn = _get_fn_by_version(idc, 'get_func_off_str', 'GetFuncOffset')
return fn(ea)
def jumpto(ea, opnum=-1, uijmp_flags=0x0001):
'''
Jump to the specified address.
:param ea: Destination
:type ea: int
:param opnum: -1: don't change the x coord.
:type opnum: int
:param uijmp_flags: Jump flags.
:type uijmp_flags: int
:return: success
'''
fn = _get_fn_by_version(ida_kernwin, 'jumpto', 'Jump', idc)
if idaapi.IDA_SDK_VERSION >= 700:
return fn(ea, opnum, uijmp_flags)
return fn(ea)
def ask_yn(default, format_str):
'''
Display a dialog box and get choice from "Yes", "No", "Cancel".
:param default: Default choice: one of Button IDs
:type default: int
:param format_str: The question in printf() style format.
:type format_str: str
:return: The selected button (one of Button IDs).
'''
fn = _get_fn_by_version(ida_kernwin, 'ask_yn', 'AskYN', idc)
return fn(default, format_str)
def ask_file(for_saving, default, dialog):
'''
Get file from user.
:param for_saving: File is for saving.
:type for_saving: int
:param default: File extension.
:type default: str
:param dialog: Dialog box to display to the user.
:type dialog: str
:return: file path.
'''
fn = _get_fn_by_version(ida_kernwin, 'ask_file', 'AskFile', idc)
return fn(for_saving, default, dialog)
def get_func_attr(ea, attr):
'''
Get a function attribute.
:param ea: Any address belonging to the function.
:type ea: int
:param attr: One of FUNCATTR_... constants
:return: BADADDR - error otherwise returns the attribute value.
'''
fn = _get_fn_by_version(idc, 'get_func_attr', 'GetFunctionAttr')
return fn(ea, attr)
def get_name_ea_simple(name):
'''
Get linear address of a name.
:param name: Name of program byte.
:type name: str
:return: Address of the name or BADADDR - No such name.
'''
fn = _get_fn_by_version(idc, 'get_name_ea_simple', 'LocByName')
return fn(name)
def next_head(ea, maxea=4294967295):
'''
Get next defined item (instruction or data) in the program.
:param ea: Linear address to start search from.
:type ea: int
:param maxea: The search will stop at the address maxea. maxea is not
included in the search range
:type maxea: int
:return: BADADDR - no (more) defined items
'''
fn = _get_fn_by_version(idc, 'next_head', 'NextHead')
return fn(ea, maxea)
def get_screen_ea():
'''
Return the linear address of the current screen location.
:return: Address of screen focus.
'''
fn = _get_fn_by_version(idc, 'get_screen_ea', 'ScreenEA')
return fn()
def choose_func(title):
'''
Ask the user to select a function.
:param title: Title of the dialog box.
:type title: str
:return: -1 user refused to select a function, otherwise function start addr
'''
fn = _get_fn_by_version(idc, 'choose_func', 'ChooseFunction')
return fn(title)
def ask_ident(default, prompt):
'''
Ask for a long text.
:param default: The default value.
:type default: str
:param prompt: The prompt value.
:type prompt: str
:return: None or the entered string.
'''
fn = _get_fn_by_version(ida_kernwin, 'ask_str', 'AskIdent', idc)
if idaapi.IDA_SDK_VERSION >= 700:
return fn(default, ida_kernwin.HIST_IDENT, prompt)
return fn(default, prompt)
def set_name(ea, name):
'''
Rename an address.
:param ea: Linear address.
:type ea: int
:param name: New name of address. If name == "" then delete old name.
:type name: str
:return: 1-ok, 0-failure
'''
fn = _get_fn_by_version(idc, 'set_name', 'MakeName')
if idaapi.IDA_SDK_VERSION >= 700:
return fn(ea, name, ida_name.SN_CHECK)
return fn(ea, name)
def get_wide_dword(ea):
'''
Get one wide word of the program at 'ea'
:param ea: linear address.
:type ea: int
:return: uint64
'''
fn = _get_fn_by_version(idc, 'get_wide_dword', 'Dword')
return fn(ea)
def get_strlit_contents(ea):
'''
Get string contents.
:param ea: Linear address.
:type ea: int
:return: String contents or empty string.
'''
fn = _get_fn_by_version(idc, 'get_strlit_contents', 'GetString')
return fn(ea)
def get_func_name(ea):
'''
Retrieve function name.
:param ea: Any address belonging to the function.
:type ea: int
:return: Null string if not found, otherwise the functions name.
'''
fn = _get_fn_by_version(idc, 'get_func_name', 'GetFunctionName')
return fn(ea)
def get_first_seg():
'''
Get first segment.
:return: Address of the start of the first segment or BADADDR if no
segments found.
'''
fn = _get_fn_by_version(idc, 'get_first_seg', 'FirstSeg')
return fn()
def get_segm_attr(segea, attr):
'''
Get segment attribute.
:param segea: Any address within the segment.
:type segea: int
:param attr: One of SEGATTR_... constants.
:type attr: int
:return: Segment attributes.
'''
fn = _get_fn_by_version(idc, 'get_segm_attr', 'GetSegmentAttr')
return fn(segea, attr)
def get_next_seg(ea):
'''
Get next segment.
:param ea: Linear address.
:type ea: int
:return: Start of the next segment or BADADDR
'''
fn = _get_fn_by_version(idc, 'get_next_seg', 'NextSeg')
return fn(ea)
def is_strlit(flags):
'''
Do flags indicate a string.
:param flags: Flags for address.
:type flags: int
:return: bool
'''
fn = _get_fn_by_version(ida_bytes, 'is_strlit', 'isASCII', idc)
return fn(flags)
def create_strlit(start, lenth):
'''
Convert to string literal and give a meaningful name.
:param start: Start ea.
:type start: int
:param lenth: Length of string, or 0 to determine dynamically.
:type lenth: int
:return: bool
'''
fn = _get_fn_by_version(ida_bytes, 'create_strlit', 'MakeStr', idc)
if idaapi.IDA_SDK_VERSION >= 700:
return fn(start, lenth, ida_nalt.STRTYPE_C)
return fn(start, idc.BADADDR)
def is_unknown(flags):
'''
Do | |
is successfully stopped: FAILED", tid)
output = runr(self, 'cat /var/log/respawner.log| grep "Child exited: 15 /usr/local/bin/unitcomm">/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that unitcom process is successfully exited: PASSED", tid)
else:
log_red("Verify that unitcom process is successfully exited: FAILED", tid)
output = runr(self, 'cat /var/log/respawner.log| grep "Restarting /usr/local/bin/unitcomm">/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that unitcom process is successfully restarted by Respawner: PASSED", tid)
else:
log_red("Verify that unitcom process is successfully restarted by Respawner: FAILED", tid)
@unittest.skipIf(dmverity == 0, "The build does not have dmverity enabled")
def test_REFP_019_dm_verity(self):
'''Verify the design of file system structure'''
tid = 'REFP_019'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print(
'[Title ]: Verify the implementation of dm-verity feature')
print('[Product Requirement ]: EINST-018')
print('[Development Task ]: CONLAREINS-95')
print('[Test Automation Task ]: CONLAREINS-206')
log_blue('[================================================================================================================]')
ssh = self.ssh # handle
try:
stdin, stdout, stderr = ssh.exec_command('mount')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red(
"error in executing the test command encountered" + result + ": FAILED", tid)
file_structure = stdout.read().decode('UTF-8')
# print(file_structure)
# Testing "/"
# if "/dev/dm-0 on / type ext4 (ro,relatime,data=ordered)" in file_structure:
if "/dev/dm-0 on / type ext4 (ro,noatime,data=ordered)" in file_structure:
log_green(
'Verify that the root partition is mounted as "/dev/dm-0" to support dm-verity: PASSED', tid)
else:
log_red(
'Verify that the root partition is mounted as "/dev/dm-0" to support dm-verity: FAILED', tid)
if "/dev/dm-0" in file_structure:
log_green(
'Verify that root partition "/" is NOT encrypted: PASSED', tid)
else:
log_red('Verify that root partition "/" is encrypted: FAILED', tid)
if "/dev/dm-0 on / type ext4" in file_structure:
log_green(
'Verify that the root partition "/" is ext4 type file structure: PASSED', tid)
else:
log_red(
'Verify that the root partition"/" is ext4 type file structure: FAILED', tid)
if "/dev/dm-0 on / type ext4 (ro,noatime,data=ordered)" in file_structure:
log_green(
'Verify that the root partition "/" is Read-Only type file structure: PASSED', tid)
else:
log_red(
'Verify that the root partition"/" is Read-Only type file structure: FAILED', tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
def test_REFP_020_OS_Linux_Hardened(self):
'''Verify if the OS(Linux) is hardened'''
tid = 'REFP_020'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify if the OS(Linux) has hardened design')
print('[Product Requirement ]: EINST-015, EINST-016, EINST-017, EINST-018, EINST-005, EINST-006')
print('[Development Task ]: CONLAREINS-9, CONLAREINS-11')
print('[Test Automation Task ]: CONLAREINS-117')
log_blue('[================================================================================================================]')
ssh = self.ssh # handle
# poison positive
try:
stdin, stdout, stderr = ssh.exec_command('/autonet/tests/poison/test_page_poison >/dev/null; echo $?')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red("An error in executing the test command encountered" + result + ": FAILED", tid)
output = stdout.read().decode('UTF-8').replace("\n", "")
# print(output)
if output == '0':
log_green(
"Verify that the Poison positive test works: PASSED", tid)
else:
log_red(
"Verify that the Poison positive test works: PASSED: FAILED", tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
# fortify positive
try:
stdin, stdout, stderr = ssh.exec_command('/autonet/tests/fortify/test_fortify 1 >/dev/null 2>&1; echo $?')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red("An error in executing the test command encountered" + result + ": FAILED", tid)
output = stdout.read().decode('UTF-8').replace("\n", "")
# print(output)
if output == '0':
log_green(
"Verify that the Fortify positive test works: PASSED", tid)
else:
log_red(
"Verify that the Fortify positive test works: FAILED", tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
# fortify negative
try:
stdin, stdout, stderr = ssh.exec_command('/autonet/tests/fortify 111111 >/dev/null 2>&1; echo $?')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red("An error in executing the test command encountered" + result + ": FAILED", tid)
output = stdout.read().decode('UTF-8').replace("\n", "")
# print(output)
if output != '0':
log_green(
"Verify that the Fortify negative test works: PASSED", tid)
else:
log_red(
"Verify that the Fortify negative test works: FAILED", tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
def test_REFP_021_Legato(self):
'''Verify if Legato is installed and running successfully'''
tid = 'REFP_021'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify if Legato is installed and running successfully')
print('[Product Requirement ]: EINST-008')
print('[Development Task ]: CONLAREINS-19')
print('[Test Automation Task ]: CONLAREINS-137')
log_blue('[================================================================================================================]')
ssh = self.ssh # handle
# legato running
try:
stdin, stdout, stderr = ssh.exec_command('pidof supervisor > /dev/null; echo $?')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red("An error in executing the test command encountered" + result + ": FAILED", tid)
output = stdout.read().decode('UTF-8').replace("\n", "")
# print(output)
if output == '0':
log_green(
"Verify that the Legato (supervisor) is running successfully: PASSED", tid)
else:
log_red(
"Verify that the Legato (supervisor) is running successfully: FAILED", tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
# legato file structure
try:
stdin, stdout, stderr = ssh.exec_command('mount | grep "legato" > /dev/null; echo $?')
# check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
log_red("An error in executing the test command encountered" + result + ": FAILED", tid)
output = stdout.read().decode('UTF-8').replace("\n", "")
# print(output)
if output == '0':
log_green(
"Verify that the Legato file structure is mounted: PASSED", tid)
else:
log_red(
"Verify that the Legato file structure is mounted: FAILED", tid)
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
@unittest.skipIf(connected == 0, "No internet accessible")
def test_REFP_022_Sierra_Wirless_eCall(self):
'''Verify if eCall Functionality works on Sierra Wireless NAD'''
tid = 'REFP_022'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify if eCall Functionality works on Sierra Wireless NAD')
print('[Product Requirement ]: EINST-055, EINST-40')
print('[Development Task ]: CONLAREINS-50, CONLAREINS-417, CONLAREINS-402')
print('[Test Automation Task ]: CONLAREINS-166, CONLAREINS-441, CONLAREINS-426')
log_blue('[================================================================================================================]')
# Start of creating new local (ssh_local) conection to kill the cell_shell process
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
warnings.simplefilter("ignore", category=ResourceWarning)
ssh_local = paramiko.SSHClient() # handle
ssh_local.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while True:
# writing next two lines to suppress the console error "Error reading SSH protocol banner", which is displayed at the time or board is "rebooting"
original_stderr = sys.stderr
sys.stderr = NullDevice()
try:
ssh_local.connect(host, username=user, password=pw, look_for_keys=False, allow_agent=False, banner_timeout=None, auth_timeout=None)
break
except paramiko.ssh_exception.socket.error as e:
# print('\nATTENTION: SSH transport has socket error...\n')
continue
except paramiko.ssh_exception.AuthenticationException as e:
# print('\nATTENTION: SSH transport has Authentication excepetion...\n')
continue
except paramiko.ssh_exception.BadHostKeyException as e:
# print('\nATTENTION: SSH transport has BadHostKeyException...\n')
continue
except paramiko.ssh_exception.SSHException as e:
# print('\nATTENTION: SSH transport has SSHException...\n')
continue
except Exception as e:
# print('\nATTENTION: SSH transport has undefined exception...\n')
continue
# setting the standard errors back again
original_stderr = sys.stderr
# END of creating new local (ssh_local) conection to kill the cell_shell process
ssh = self.ssh # handle
# check ping to SW
output = run(self, 'ping -c 2 192.168.127.12', tid)
if '2 packets transmitted, 2 packets received' in output:
log_green("Verify that ping works from imx6 to SW (192.168.127.12): PASSED", tid)
else:
log_red("Verify that ping works from imx6 to SW (192.168.127.12): FAILED", tid)
# check if cellular connection is up
output = run(self, 'cell_shell isconnected', tid)
if 'yes' in output:
log_green("Verify that cellular connection is UP(cell_shell isconnected): PASSED", tid)
else:
log_red("Verify that cellular connection is UP(cell_shell isconnected): FAILED", tid)
# check if cellular connection is up
output = run(self, 'cell_shell getprofile', tid)
output = output.replace("\n", "")
# print(output)
if 'sim:Ready' in output:
log_green("Verify that SIM is present and ready: PASSED", tid)
else:
log_red("Verify that SIM is present and ready: FAILED", tid)
if 'provider:AT&T' in output:
log_green("Verify that cellular service provider exist and is ATT&T: PASSED", tid)
else:
log_red("Verify | |
# report.py
# Copyright 2008 <NAME>
# Licence: See LICENCE (BSD licence)
"""Generate a report comparing results with schedule.
"""
import re
from solentware_misc.core import utilities
from solentware_misc.core.null import Null
from .gameobjects import (
MatchReport,
MatchGame,
Section,
Game,
Player,
UnfinishedGame,
code_in_name,
)
from .gameresults import resultmap, NULL_PLAYER
from .eventparser import PLAYED_ON
cross_table_row = re.compile(
"".join(("(?P<pin>\d*)\.?", "(?P<row>(?:\s+[wb]?[-+=~])* *\Z)"))
)
cross_table_result = re.compile(
"".join(("(?P<colour>[wb]?)", "(?P<score>[-+=~])\Z"))
)
swiss_table_row = re.compile(
"".join(
(
"(?P<pin>\d*)\.?",
"(?P<row>",
"(?:\s+(?:x|--|def[-+]|bye[-=+]|[wb][123456789][0123456789]*[-=+pme]))*",
" *\Z)",
)
)
)
swiss_table_result = re.compile(
"".join(
(
"(?P<notplayed>x|--|def[-+]|bye[-=+])\Z|",
"(?P<colour>[wb])",
"(?P<opponent>[123456789][0123456789]*)",
"(?P<score>[-=+pme])\Z",
)
)
)
match_name = re.compile(
"".join(
(
"(?P<hometeam>.*?)(?=\s[0-9]*(\.5)?\s*-\s*[0-9]*(\.5)?\s)",
"(?P<score>\s[0-9]*(\.5)?\s*-\s*[0-9]*(\.5)?\s)",
"(?P<awayteam>.*)\Z",
)
)
)
game_result = re.compile(
"".join(
(
"(?:(?P<board>\d*(?:\.\d*)?)?",
"(?P<colour>[w|b])?[ \t])?",
"(?P<date_player>.*?[ \t]|[ \t]*)?",
"(?P<score>dbld|def-|def[=+]|bye[=+]|draw|1-0|0-1|void|unfinished|default)",
"(?P<player>[ \t].*)?\Z",
)
)
)
individual_game_result = re.compile(
"".join(
(
"(?P<white>.*?)(?=\s[dD][rR][aA][wW]\s|\s1-0\s|\s0-1\s)",
"(?P<score>\s[dD][rR][aA][wW]\s|\s1-0\s|\s0-1\s)",
"(?P<black>.*)\Z",
)
)
)
opposite_colour = {"w": "b", "b": "w", "": ""}
opposite_score = {
"-": "+",
"m": "+",
"=": "=",
"e": "=",
"+": "-",
"p": "-",
"1": "0",
"0": "1",
"x": "x",
"~": "~",
}
# BOARD_COLOUR defined for first-named team's players black on odd boards in
# first game (perhaps a multi-game rapidplay match ).
BOARD_COLOUR = {
(True,): True,
(False,): False,
(None,): None,
(True, True): False,
(True, False): True,
(True, None): None,
(False, True): True,
(False, False): False,
(False, None): None,
(None, True): None,
(None, False): None,
(None, None): None,
}
BOARD_COLOUR_REVERSE = {True: False, False: True, None: None}
class Report(object):
"""Results extracted from event report file containing one event."""
def __init__(self):
"""Initialise results report attributes for events."""
super().__init__()
self.textlines = None
self.error = []
self.er_source = ""
self.er_results = dict()
self.er_matchresults = []
self.er_unfinishedgames = []
self.error_repeat = False
self.er_section = dict()
self.er_report_order = []
self.er_name = None
self.er_rounds = dict()
self.er_pins = dict()
self.er_players = dict()
self.er_swiss_table = dict()
self.er_team_number = dict()
# set_match changed to populate er_matchresults in the way sl_report added
# SLArticle.er_matchresults to SLReportWeekly.er_matchresults. SLArticle
# and SLReportWeekly are subclasses of ReportLeague.
# Putting something in ReportLeague.er_results is left to collate_matches
# method of Collation.
def set_match(self, result):
"""Add match result entry to match result data structures."""
# append not extend because matches are not batched by email instance.
self.er_matchresults.append(result)
def set_match_result(self, result):
"""Add match result entry to report data structures."""
if result.competition not in self.er_results:
self.er_results[result.competition] = dict()
self.er_results[result.competition][
(
result.hometeam,
result.awayteam,
result.date,
result.round,
result.source,
)
] = result
def build_results(self, textlines):
"""Populate the event results report from textlines."""
def black_on_all(board):
return False
def black_on_odd(board):
try:
return BOARD_COLOUR[
tuple(
[
False
if b[-1] in "13579"
else True
if b[-1] in "02468"
else None
for b in board.split(".")
]
)
]
except:
return None
def colour_none(board):
return None
def white_on_all(board):
return True
def white_on_odd(board):
try:
return BOARD_COLOUR_REVERSE[black_on_odd(board)]
except:
return None
def add_player_record(name, event, team, tagger):
codes = set([s.strip() for s in code_in_name.findall(name)])
name = " ".join(
[s.strip() for s in code_in_name.split(name)]
).strip()
key = (name, event, team)
if key not in players:
players[key] = Player(
tagger=tagger,
name=name,
event=event,
club=team,
reported_codes=codes,
)
if codes - players[key].reported_codes:
players[key].add_reported_codes(codes)
return players[key]
def get_allplayall_games(text, tagger):
spc = text.split()
if spc[0] in sectiontypes:
return get_section(text, tagger)
elif spc[0] == "source":
get_source(spc)
return get_allplayall_games
ctr = cross_table_row.match(text)
if ctr is None:
tagger.append_generated_report(
self.error,
"".join(
(
'"',
text,
'" is not recognised as a cross table row.\n',
)
),
)
self.error_repeat = False
return get_allplayall_games
get_crosstablerow_results(
ctr.group("pin"), ctr.group("row"), tagger
)
return get_allplayall_games
def get_crosstablerow_results(pin, row, tagger):
cross_table = self.er_swiss_table[self._section]
if len(pin):
pin = int(pin)
else:
pin = len(cross_table) + 1
row = row.split()
if pin > len(row):
tagger.append_generated_report(
self.error,
"".join(
(
"PIN is greater than number of players implied in ",
'cross-table row "',
" ".join((str(pin), self._section, " ".join(row))),
'".\n',
)
),
)
self.error_repeat = False
return
if pin in cross_table:
tagger.append_generated_report(
self.error,
"".join(
(
'Cross-table row for PIN in "',
" ".join((str(pin), self._section, " ".join(row))),
'" already exists.\n',
)
),
)
self.error_repeat = False
return
card = []
num_rounds = len(row)
if num_rounds % 2 == 0:
num_rounds -= 1
for p in row:
opponent_pin = len(card) + 1
strm = cross_table_result.match(p)
if strm is None:
tagger.append_generated_report(
self.error,
"".join(
(
'"',
p,
'" in cross-table row "',
" ".join(
(str(pin), self._section, " ".join(row))
),
'" is not a recognised result.\n',
)
),
)
return False
colour = strm.group("colour")
score = strm.group("score")
if opponent_pin == pin:
if p != "~":
tagger.append_generated_report(
self.error,
"".join(
(
'Crosstable entry where opponent is self in "',
" ".join(
(
str(pin),
self._section,
" ".join(row),
)
),
'" must be "~".\n',
)
),
)
continue
nominal_round = None
else:
if opponent_pin > num_rounds:
if pin * 2 > opponent_pin:
nominal_round = (pin * 2) - opponent_pin
else:
nominal_round = (pin * 2) - 1
elif pin > num_rounds:
if opponent_pin * 2 > pin:
nominal_round = (opponent_pin * 2) - pin
else:
nominal_round = (opponent_pin * 2) - 1
else:
nominal_round = (pin + opponent_pin - 1) % num_rounds
if nominal_round == 0:
nominal_round = num_rounds
if opponent_pin in cross_table:
# when opponent_pin == pin it will not be there
opponent_entry = cross_table[opponent_pin][pin - 1]
error = False
if score != opposite_score[opponent_entry["score"]]:
error = True
if colour != opposite_colour[opponent_entry["colour"]]:
error = True
if nominal_round != opponent_entry["nominal_round"]:
error = True
if error:
tagger.append_generated_report(
self.error,
"".join(
(
'Cross table row "',
" ".join(
(
str(pin),
self._section,
" ".join(row),
)
),
'") is not consistent with row for opponent "',
str(opponent_pin),
'".\n',
)
),
)
card.append(
dict(
tagger=tagger,
colour=colour,
score=score,
nominal_round=nominal_round,
)
)
cross_table[pin] = card
return True
def get_date(tokens, tagger, exact=True):
datestr = " ".join(tokens[1:])
gdate = utilities.AppSysDate()
d = gdate.parse_date(datestr)
if d == len(datestr):
self._date = gdate.iso_format_date()
return d
elif d < 0:
tagger.append_generated_report(
self.error,
"".join(
('Date not recognised in "', " ".join(tokens), '".\n')
),
)
elif exact:
tagger.append_generated_report(
self.error,
"".join(
(
'Date found in "',
" ".join(tokens),
'" but extra text is present.\n',
)
),
)
else:
self._date = gdate.iso_format_date()
return d
return False
def get_event_name(text, tagger):
en = text.split()
self.er_name = " ".join(en)
if self.er_name in self.er_section:
tagger.append_generated_report(
self.error,
"".join(
(
'Event name "',
self.er_name,
'" in "',
text,
'" is a duplicate.\n',
)
),
)
self.er_section[self.er_name] = None
return get_section
def get_game(text, tagger):
gr = game_result.match(text)
if gr is None:
return False
board = gr.group("board")
if not board:
board = str(len(self._games) + 1)
colour = gr.group("colour")
if not colour:
colour = self._colourrule(board)
gamescore = gr.group("score")
if gamescore is not None:
gamescore = resultmap.get(
gamescore.lower().strip(), resultmap[None]
)
awayplayer = gr.group("player")
awayplayer = (
"" if awayplayer is None else " ".join(awayplayer.split())
)
if awayplayer:
awayplayer = add_player_record(
awayplayer, self.er_name, self._match[1], tagger
)
else:
awayplayer = NullPlayer()
date_player = gr.group("date_player")
if date_player is not None:
date_player = " ".join(date_player.split())
datetime = utilities.AppSysDate()
d = datetime.parse_date(date_player)
if d < 0:
gamedate = None
else:
gamedate = datetime.iso_format_date()
date_player = date_player[d:].strip()
else:
gamedate = None
if date_player:
homeplayer = add_player_record(
date_player, self.er_name, self._match[0], tagger
)
else:
homeplayer = NullPlayer()
if board not in self._games:
if self._played_on is PlayedOnStatus.game_report_played_on:
self.er_unfinishedgames.append(
UnfinishedGame(
tagger=tagger,
board=board,
date=gamedate,
homeplayer=homeplayer,
awayplayer=awayplayer,
result=gamescore,
homeplayerwhite=colour,
source=" ".join(
(self._match[4], self._section)
).strip(),
section=self._section,
# competition='',
hometeam=self._match[0],
awayteam=self._match[1],
)
)
elif self._playerlimit:
c = self._match_homeplayers.setdefault(homeplayer, 0)
if c >= self._playerlimit:
tagger.append_generated_report(
self.error,
"".join(
(
'Player "',
homeplayer.name,
'" in game "',
text,
'" occurs too many times in match.\n',
)
),
)
self._match_homeplayers[homeplayer] += 1
c = self._match_awayplayers.setdefault(awayplayer, 0)
if c >= self._playerlimit:
tagger.append_generated_report(
self.error,
"".join(
(
'Player "',
awayplayer.name,
'" in game "',
text,
'" occurs too many times in match.\n',
)
),
)
self._match_awayplayers[awayplayer] += 1
rsm = self.er_matchresults[-1]
rsm.games.append(
MatchGame(
tagger=tagger,
board=board,
date=gamedate,
homeplayer=homeplayer,
awayplayer=awayplayer,
result=gamescore,
homeplayerwhite=colour,
)
)
# Do the check on number of times a player appears for full
# match report only. When reporting played-on games assume
# repeated names are valid, and that problems will be seen
# because the report does not tally with a game originally
# reported unfinished. If board numbers are not used in
# the reports the technique cannot work because the derived
# board numbers are likely wrong.
self._games[board] = rsm.games[-1]
else:
rsm = | |
# Importar librerías/modulos
import arcade
import time
import webbrowser
# Versión del juego
version = "Alpha"
# Tamaño de la ventana (ancho y alto)
s_width = 800
s_height = 600
# Parámetro relacionado a la velocidad del personaje
MOVEMENT_SPEED = 5
# Margen de pixeles Jugador - Pantalla
LEFT_VIEWPORT_MARGIN = 400
RIGHT_VIEWPORT_MARGIN = 400
BOTTOM_VIEWPORT_MARGIN = 400
TOP_VIEWPORT_MARGIN = 400
# Menú de inicio
class MainMenu(arcade.View):
def __init__(self):
super().__init__()
print("Now in main menu.")
self.background = None # Parámetro del background (imagen de fondo) vacio
arcade.set_background_color(arcade.color.BLACK)
# Configurar la vista
def setup(self):
self.background = arcade.load_texture("resources/images/menu_screenf.jpg") # Se añade la imagen al background
# Mostrar la imagen de fondo y generar texto en pantalla
def on_draw(self):
arcade.start_render()
# Generar la imagen mediante un rectángulo
arcade.draw_lrwh_rectangle_textured(0, 0, s_width, s_height, self.background)
# Casos donde se hace uso del mouse/ratón
def on_mouse_press(self, x, y, button, modifiers):
if button == arcade.MOUSE_BUTTON_LEFT and 334 <= x <= 482 and 315 <= y <= 371: # Main Game
tutorial_view = TutorialView()
tutorial_view.setup()
self.window.show_view(tutorial_view)
elif button == arcade.MOUSE_BUTTON_LEFT and 264 <= x <= 536 and 220 <= y <= 278: # Settings Menu
settings_view = Settings()
settings_view.setup()
self.window.show_view(settings_view)
elif button == arcade.MOUSE_BUTTON_LEFT and 264 <= x < 536 and 133 <= y <= 189: # Exit game
print("User left the game through main menu.")
exit()
# Nivel 1
class GameView1(arcade.View):
def __init__(self):
super().__init__()
print("Now in game, level 1.")
self.window.set_mouse_visible(False)
self.player_list = None
self.player = None
self.background = None
self.ground_list = None
self.walls_list = None
# Movimiento de la pantalla
self.view_bottom = 0
self.view_left = 0
self.setup()
def setup(self):
self.background = arcade.load_texture('resources/images/galaxy.png')
self.view_left = 0
self.view_bottom = 0
# Cargar al jugador
self.player_list = arcade.SpriteList()
self.player = arcade.AnimatedWalkingSprite()
# Animaciones
# Personaje quieto a la derecha
self.player.stand_right_textures = []
self.player.stand_right_textures.append(arcade.load_texture("resources/images/animated_characters"
"/charlie/charlie_stand.png"))
# Personaje quieto a la izquierda
self.player.stand_left_textures = []
self.player.stand_left_textures.append(arcade.load_texture("resources/images/animated_characters/charlie"
"/charlie_stand.png", mirrored=True))
# Personaje caminando a la derecha
self.player.walk_right_textures = []
for i in range(0, 8):
self.player.walk_right_textures.append(arcade.load_texture(f"resources/images/animated_characters"
f"/charlie/charlie_walk{i}.png"))
# Personaje caminando a la izquierda
self.player.walk_left_textures = []
for i in range(0, 8):
self.player.walk_left_textures.append(arcade.load_texture(f"resources/images/animated_characters"
f"/charlie/charlie_walk{i}.png",
mirrored=True))
# Personaje caminando arriba y/o abajo
self.player.walk_down_textures = []
self.player.walk_up_textures = []
for i in range(0, 4):
self.player.walk_down_textures.append(arcade.load_texture(f"resources/images/animated_characters/charlie"
f"/charlie_wdown{i}.png"))
for i in range(0, 4):
self.player.walk_up_textures.append(arcade.load_texture(f"resources/images/animated_characters/charlie"
f"/charlie_wdown{i}.png"))
# Parámetros de aparición del jugador
self.player.scale = 1
self.player.center_x = 50
self.player.center_y = 434
self.player_list.append(self.player)
# Cargar el mapa y los layers
my_map = arcade.tilemap.read_tmx("resources/mapas/nivel 3.tmx")
self.ground_list = arcade.tilemap.process_layer(my_map, "ground", 1)
def on_draw(self):
arcade.start_render()
arcade.set_background_color(arcade.color.AMAZON)
arcade.draw_lrwh_rectangle_textured(-625, -625, 6400, 6400, self.background)
self.ground_list.draw()
self.player.update_animation()
self.player_list.draw()
# Movimiento de la pantalla
changed = False
# Izquierda
left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN
if self.player.left < left_boundary:
self.view_left -= left_boundary - self.player.left
changed = True
# Derecha
right_boundary = self.view_left + s_width - RIGHT_VIEWPORT_MARGIN
if self.player.right > right_boundary:
self.view_left += self.player.right - right_boundary
changed = True
# Arriba
top_boundary = self.view_bottom + s_height - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
changed = True
# Abajo
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
changed = True
if changed:
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Desplazamiento
arcade.set_viewport(
self.view_left,
s_width + self.view_left,
self.view_bottom,
s_height + self.view_bottom,
)
def update(self, delta_time):
self.player.update()
# Límites de ventana
if self.player.center_x >= 1014:
print("Boundary!")
self.player.center_x = 1014
elif self.player.center_x <= 10:
print("Boundary!")
self.player.center_x = 10
if self.player.center_y >= 1014:
print("Boundary!")
self.player.center_y = 1014
elif self.player.center_y <= 16:
print("Boundary!")
self.player.center_y = 16
# Siguiente nivel
if self.player.center_x == 1014 and 192 <= self.player.center_y <= 288:
print("Next level!")
next_view = GameView2()
next_view.setup()
self.window.show_view(next_view)
def on_key_press(self, key, modifiers):
if key == arcade.key.ESCAPE: # 65307
self.window.set_mouse_visible(True)
pause = PauseView(self)
self.window.show_view(pause)
print(f"Pressed {key}")
if key == arcade.key.W or key == arcade.key.UP: # 119 and 65362
self.player.change_y = MOVEMENT_SPEED
print(f"Pressed {key}")
if key == arcade.key.A or key == arcade.key.LEFT: # 97 and 65361
self.player.change_x = -MOVEMENT_SPEED
print(f"Pressed {key}")
if key == arcade.key.S or key == arcade.key.DOWN: # 115 and 65364
self.player.change_y = -MOVEMENT_SPEED
print(f"Pressed {key}")
elif key == arcade.key.D or key == arcade.key.RIGHT: # 100 and 65363
self.player.change_x = MOVEMENT_SPEED
print(f"Pressed {key}")
def on_key_release(self, key, modifiers):
if key == arcade.key.W or key == arcade.key.S or key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
print(f"Released {key}")
elif key == arcade.key.A or key == arcade.key.D or key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
print(f"Released {key}")
# Nivel 2
class GameView2(arcade.View):
def __init__(self):
super().__init__()
print("Now in game, level 2.")
self.window.set_mouse_visible(False)
self.player_list = None
self.player = None
self.background = None
self.ground_list = None
self.walls_list = None
self.view_bottom = 0
self.view_left = 0
# Movimiento de la pantalla
self.view_bottom = 0
self.view_left = 0
self.setup()
def setup(self):
self.background = arcade.load_texture('resources/images/galaxy.png')
self.view_left = 0
self.view_bottom = 0
# Cargar al jugador
self.player_list = arcade.SpriteList()
self.player = arcade.AnimatedWalkingSprite()
# Animaciones
# Personaje quieto a la derecha
self.player.stand_right_textures = []
self.player.stand_right_textures.append(arcade.load_texture("resources/images/animated_characters"
"/charlie/charlie_stand.png"))
# Personaje quieto a la izquierda
self.player.stand_left_textures = []
self.player.stand_left_textures.append(arcade.load_texture("resources/images/animated_characters/charlie"
"/charlie_stand.png", mirrored=True))
# Personaje caminando a la derecha
self.player.walk_right_textures = []
for i in range(0, 8):
self.player.walk_right_textures.append(arcade.load_texture(f"resources/images/animated_characters"
f"/charlie/charlie_walk{i}.png"))
# Personaje caminando a la izquierda
self.player.walk_left_textures = []
for i in range(0, 8):
self.player.walk_left_textures.append(arcade.load_texture(f"resources/images/animated_characters"
f"/charlie/charlie_walk{i}.png",
mirrored=True))
# Personaje caminando arriba y/o abajo
self.player.walk_down_textures = []
self.player.walk_up_textures = []
for i in range(0, 4):
self.player.walk_down_textures.append(arcade.load_texture(f"resources/images/animated_characters/charlie"
f"/charlie_wdown{i}.png"))
for i in range(0, 4):
self.player.walk_up_textures.append(arcade.load_texture(f"resources/images/animated_characters/charlie"
f"/charlie_wdown{i}.png"))
# Parámetros de aparición del jugador
self.player.scale = 1
self.player.center_x = 50
self.player.center_y = 434
self.player_list.append(self.player)
# Cargar el mapa y los layers
my_map = arcade.tilemap.read_tmx("resources/mapas/nivel 1.tmx")
self.ground_list = arcade.tilemap.process_layer(my_map, "ground", 1)
self.walls_list = arcade.tilemap.process_layer(my_map, "planet", 1)
def on_draw(self):
arcade.start_render()
arcade.set_background_color(arcade.color.AMAZON)
arcade.draw_lrwh_rectangle_textured(-625, -625, 6400, 6400, self.background)
self.ground_list.draw()
self.walls_list.draw()
self.player.update_animation()
self.player_list.draw()
# Movimiento de la pantalla
changed = False
# Izquierda
left_boundary = self.view_left + LEFT_VIEWPORT_MARGIN
if self.player.left < left_boundary:
self.view_left -= left_boundary - self.player.left
changed = True
# Derecha
right_boundary = self.view_left + s_width - RIGHT_VIEWPORT_MARGIN
if self.player.right > right_boundary:
self.view_left += self.player.right - right_boundary
changed = True
# Arriba
top_boundary = self.view_bottom + s_height - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
changed = True
# Abajo
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
changed = True
if changed:
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Desplazamiento
arcade.set_viewport(
self.view_left,
s_width + self.view_left,
self.view_bottom,
s_height + self.view_bottom,
)
def update(self, delta_time):
self.player.update()
# Límites de ventana
if self.player.center_x >= 1014:
print("Boundary!")
self.player.center_x = 1024
elif self.player.center_x <= 10:
print("Boundary!")
self.player.center_x = 10
if self.player.center_y >= 1014:
print("Boundary!")
self.player.center_y = 1014
elif self.player.center_y <= 16:
print("Boundary!")
self.player.center_y = 16
# Siguiente nivel
if self.player.center_x == 1024 and 352 <= self.player.center_y <= 448:
print("Next level")
next_view = GameView3()
next_view.setup()
self.window.show_view(next_view)
def on_key_press(self, key, modifiers):
if key == arcade.key.ESCAPE: # 65307
self.window.set_mouse_visible(True)
pause = PauseView(self)
self.window.show_view(pause)
print(f"Pressed {key}")
if key == arcade.key.W or key == arcade.key.UP: # 119 and 65362
self.player.change_y = MOVEMENT_SPEED
print(f"Pressed {key}")
if key == arcade.key.A or key == arcade.key.LEFT: # 97 and 65361
self.player.change_x = -MOVEMENT_SPEED
print(f"Pressed {key}")
if key == arcade.key.S or key == arcade.key.DOWN: # 115 and 65364
self.player.change_y = -MOVEMENT_SPEED
print(f"Pressed {key}")
elif key == arcade.key.D or key == arcade.key.RIGHT: # 100 and 65363
self.player.change_x = MOVEMENT_SPEED
print(f"Pressed {key}")
def on_key_release(self, key, modifiers):
if key == arcade.key.W or key == arcade.key.S or key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
print(f"Released {key}")
elif key == arcade.key.A or key == arcade.key.D or key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
print(f"Released {key}")
# Nivel 3
class GameView3(arcade.View):
def __init__(self):
super().__init__()
print("Now in game, level 3.")
self.window.set_mouse_visible(False)
self.player_list = None
self.player = None
self.background = None
self.ground_list = None
self.walls_list = None
self.walls2_list = None
self.view_bottom = 0
self.view_left = 0
self.setup()
# Movimiento de la pantalla
self.view_bottom = 0
self.view_left = 0
def setup(self):
self.background = arcade.load_texture('resources/images/galaxy.png')
self.view_left = 0
self.view_bottom = 0
# Cargar al jugador
self.player_list = arcade.SpriteList()
self.player = arcade.AnimatedWalkingSprite()
# Animaciones
# Personaje quieto a la derecha
self.player.stand_right_textures = []
self.player.stand_right_textures.append(arcade.load_texture("resources/images/animated_characters"
"/charlie/charlie_stand.png"))
# Personaje quieto a la izquierda
self.player.stand_left_textures = []
self.player.stand_left_textures.append(arcade.load_texture("resources/images/animated_characters/charlie"
"/charlie_stand.png", mirrored=True))
# | |
<reponame>kalaspuff/stockholm<filename>stockholm/money.py
from __future__ import annotations
import decimal
import json
import re
from decimal import ROUND_HALF_UP, Decimal
from functools import reduce
from typing import Any, Dict, Generic, Iterable, List, Optional, Tuple, Type, TypeVar, Union, cast
from .currency import BaseCurrencyType, CurrencyValue, DefaultCurrency, DefaultCurrencyValue
from .exceptions import ConversionError, CurrencyMismatchError, InvalidOperandError
from .protobuf import GenericProtobufMessage, MoneyProtobufMessage
__all__ = ["Money"]
DEFAULT_MIN_DECIMALS = 2
DEFAULT_MAX_DECIMALS = 9
UNITS_MAX_LENGTH = 18
NANOS_LENGTH = 9
HIGHEST_SUPPORTED_AMOUNT = "999999999999999999.999999999"
LOWEST_SUPPORTED_AMOUNT = "-999999999999999999.999999999"
RoundingContext = decimal.Context(rounding=ROUND_HALF_UP)
_parse_format_specifier_regex = re.compile(
r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>[,_])?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[cCdfFmMs])?
\Z
""",
re.VERBOSE,
)
MoneyType = TypeVar("MoneyType", bound="MoneyModel")
ProtobufMessageType = TypeVar("ProtobufMessageType", bound=GenericProtobufMessage)
class MoneyModel(Generic[MoneyType]):
__slots__ = ("_amount", "_currency")
_amount: Decimal
_currency: Optional[Union[CurrencyValue, str]]
@classmethod
def sort(cls, iterable: Iterable, reverse: bool = False) -> Iterable:
return sorted(iterable, key=lambda x: x if isinstance(x, cls) else cls(x), reverse=reverse)
@classmethod
def sum(
cls,
iterable: Iterable,
currency: Optional[Union[DefaultCurrencyValue, CurrencyValue, str]] = DefaultCurrency,
currency_code: Optional[str] = None,
from_sub_units: Optional[bool] = None,
**kwargs: Any,
) -> MoneyType:
return cast(
MoneyType,
reduce(
lambda v, e: v + (e if isinstance(e, cls) else cls(e, from_sub_units=from_sub_units)),
iterable,
cls(0, currency=currency, currency_code=currency_code, from_sub_units=from_sub_units),
),
)
@classmethod
def _is_unknown_amount_type(cls, amount: Optional[Union[MoneyType, Decimal, int, float, str, object]]) -> bool:
return not any(map(lambda type_: isinstance(amount, type_), (Money, Decimal, int, bool, float, str)))
@classmethod
def from_sub_units(
cls: Type[MoneyType],
amount: Optional[Union[MoneyType, Decimal, int, float, str, object]],
currency: Optional[Union[DefaultCurrencyValue, CurrencyValue, str]] = DefaultCurrency,
value: Optional[Union[MoneyType, Decimal, int, float, str]] = None,
currency_code: Optional[str] = None,
**kwargs: Any,
) -> MoneyType:
return cls(amount=amount, currency=currency, from_sub_units=True, value=value, **kwargs)
@classmethod
def from_dict(cls: Type[MoneyType], input_dict: Dict) -> MoneyType:
return cls(**input_dict)
@classmethod
def from_json(cls: Type[MoneyType], input_value: Union[str, bytes]) -> MoneyType:
return cls(**json.loads(input_value))
@classmethod
def from_protobuf(
cls: Type[MoneyType],
input_value: Union[str, bytes, object],
proto_class: Type[GenericProtobufMessage] = MoneyProtobufMessage,
) -> MoneyType:
if input_value is not None and isinstance(input_value, bytes):
input_value = proto_class.FromString(input_value)
return cls(
**{
k: getattr(input_value, k)
for k in (
"value",
"units",
"nanos",
"amount",
"currency",
"currency_code",
"from_sub_units",
)
if hasattr(input_value, k)
}
)
def __init__(
self,
amount: Optional[Union[MoneyType, Decimal, Dict, int, float, str, object]] = None,
currency: Optional[Union[DefaultCurrencyValue, CurrencyValue, str]] = DefaultCurrency,
from_sub_units: Optional[bool] = None,
units: Optional[int] = None,
nanos: Optional[int] = None,
value: Optional[Union[MoneyType, Decimal, int, float, str]] = None,
currency_code: Optional[str] = None,
**kwargs: Any,
) -> None:
validate_amounts = []
if units is not None or nanos is not None:
try:
units = units or 0
nanos = nanos or 0
if (units > 0 and nanos < 0) or (units < 0 and nanos > 0):
raise ValueError
units_str = str(units).lstrip("-")
nanos_str = str(nanos).lstrip("-").rjust(NANOS_LENGTH, "0")
if len(units_str) > UNITS_MAX_LENGTH:
raise ValueError
if len(nanos_str) != NANOS_LENGTH:
raise ValueError
sign = "-" if nanos < 0 or units < 0 else ""
new_decimal = Decimal(f"{sign}{units_str}.{nanos_str}")
if amount is None:
amount = new_decimal
else:
validate_amounts.append(new_decimal)
except Exception:
raise ConversionError("Invalid values for 'units' and 'nanos'")
if value is not None:
try:
new_amount = cast(MoneyType, self.__class__(value))
if currency_code is None:
currency_code = new_amount.currency_code
elif new_amount.currency_code and new_amount.currency_code != currency_code:
raise ConversionError("Invalid value for 'value'")
if amount is None:
amount = new_amount
else:
validate_amounts.append(new_amount.amount)
except Exception:
raise ConversionError("Invalid value for 'value'")
if amount is not None and isinstance(amount, Dict):
amount = self.__class__.from_dict(amount)
if amount is None:
raise ConversionError("Missing input values for monetary amount")
if currency is DefaultCurrency and currency_code:
if not isinstance(currency_code, str):
raise ConversionError("Invalid 'currency_code' value, must be string")
currency = str(currency_code)
elif currency is not DefaultCurrency and currency_code and str(currency) != str(currency_code):
raise ConversionError("Invalid 'currency' value, does not match 'currency_code'")
if (
isinstance(amount, Money)
and currency is DefaultCurrency
and from_sub_units is None
and units is None
and nanos is None
):
object.__setattr__(self, "_amount", amount._amount)
object.__setattr__(self, "_currency", amount._currency)
return
if (
currency is not DefaultCurrency
and not isinstance(currency, str)
and not isinstance(currency, BaseCurrencyType)
and currency is not None
):
raise ConversionError("Invalid 'currency' value")
output_amount = None
output_currency: Optional[Union[CurrencyValue, str]] = None
if currency is not DefaultCurrency:
if isinstance(currency, BaseCurrencyType):
output_currency = currency
else:
output_currency = str(currency or "").strip() or None
output_currency = (
output_currency.upper() if output_currency and len(output_currency) == 3 else output_currency
)
if amount is not None and (
(isinstance(amount, str) and len(amount) > 1 and amount[0] == "{")
or (isinstance(amount, bytes) and len(amount) > 1 and amount[0] == 123)
):
try:
amount = str(self.__class__.from_dict(json.loads(amount)))
except Exception:
pass
if amount is not None and isinstance(amount, bytes):
try:
amount = MoneyProtobufMessage.FromString(amount)
except Exception:
pass
if amount is not None and isinstance(amount, GenericProtobufMessage):
amount = str(
self.__class__.from_dict(
{
k: getattr(amount, k)
for k in (
"value",
"units",
"nanos",
"amount",
"currency",
"currency_code",
"from_sub_units",
)
if hasattr(amount, k)
}
)
)
if Money._is_unknown_amount_type(amount):
try:
match_amount = getattr(amount, "amount")
match_amount = (match_amount()) if match_amount and callable(match_amount) else match_amount
if match_amount is None or Money._is_unknown_amount_type(match_amount):
raise AttributeError
match_currency = None
try:
match_currency = getattr(amount, "currency")
match_currency = (
(match_currency()) if match_currency and callable(match_currency) else match_currency
)
if not match_currency:
raise AttributeError
except AttributeError:
matches = re.match(r"^(?:[-+]?[0-9.]+)[ ]+([a-zA-Z]+)$", str(amount))
if not matches:
matches = re.match(r"^([a-zA-Z]+)[ ]+(?:[-+]?[0-9.]+)$", str(amount))
if matches:
match_currency = matches.group(1)
if match_currency is not None:
match_currency = str(match_currency).strip()
match_currency = (
match_currency.upper()
if match_currency and isinstance(match_currency, str) and len(match_currency) == 3
else match_currency
)
if output_currency is not None and match_currency != output_currency:
raise ConversionError("Mismatching currency in input value and 'currency' argument")
output_currency = (
output_currency if isinstance(output_currency, BaseCurrencyType) else match_currency
)
amount = match_amount
except AttributeError:
amount = str(amount)
if amount is not None and isinstance(amount, int) and not isinstance(amount, bool):
output_amount = Decimal(amount)
elif amount is not None and isinstance(amount, float):
output_amount = Decimal(str(amount))
elif amount is not None and isinstance(amount, str) and amount.strip():
amount = amount.strip()
match_currency = None
matches = re.match(r"^(?P<amount>[-+]?[0-9.]+)[ ]+(?P<currency>[a-zA-Z]+)$", amount)
if not matches:
matches = re.match(r"^(?P<currency>[a-zA-Z]+)[ ]+(?P<amount>[-+]?[0-9.]+)$", amount)
if matches:
amount = matches.group("amount").strip()
match_currency = matches.group("currency").strip()
match_currency = (
match_currency.upper()
if match_currency and isinstance(match_currency, str) and len(match_currency) == 3
else match_currency
)
if match_currency is not None:
if output_currency is not None and match_currency != output_currency:
raise ConversionError("Mismatching currency in input value and 'currency' argument")
output_currency = output_currency if isinstance(output_currency, BaseCurrencyType) else match_currency
try:
output_amount = Decimal(amount)
except Exception:
raise ConversionError("Input value cannot be used as monetary amount")
elif amount is not None and isinstance(amount, Money):
if amount.currency and not output_currency and currency is DefaultCurrency:
output_currency = amount.currency
output_amount = amount._amount
elif amount is not None and isinstance(amount, Decimal):
output_amount = amount
if output_amount is None:
raise ConversionError("Missing input values for monetary amount")
if output_amount.is_infinite():
raise ConversionError("Monetary amounts cannot be infinite")
if output_amount.is_nan():
raise ConversionError("Input amount is not a number")
if from_sub_units:
if output_currency and isinstance(output_currency, BaseCurrencyType):
if output_currency.decimal_digits != 0:
output_amount = output_amount / Decimal(pow(10, output_currency.decimal_digits))
else:
output_amount = output_amount / 100
if output_amount > Decimal(HIGHEST_SUPPORTED_AMOUNT):
raise ConversionError(f"Input amount is too high, max value is {HIGHEST_SUPPORTED_AMOUNT}")
if output_amount < Decimal(LOWEST_SUPPORTED_AMOUNT):
raise ConversionError(f"Input amount is too low, min value is {LOWEST_SUPPORTED_AMOUNT}")
if output_currency and not re.match(r"^[A-Za-z]+$", str(output_currency)):
raise ConversionError("Invalid 'currency' or 'currency_code'")
if output_amount == 0 and output_amount.is_signed():
output_amount = Decimal(0)
if any([output_amount != a for a in validate_amounts]):
raise ConversionError("Values in input arguments does not match")
object.__setattr__(self, "_amount", output_amount)
object.__setattr__(self, "_currency", output_currency)
@property
def amount(self) -> Decimal:
return self._amount
@property
def currency(self) -> Optional[Union[CurrencyValue, str]]:
return self._currency
@property
def currency_code(self) -> Optional[str]:
return str(self._currency) if self._currency else None
@property
def _amount_tuple(self) -> Tuple[str, str]:
amount = self._amount.quantize(Decimal(f"1e-{NANOS_LENGTH}"), ROUND_HALF_UP)
sign, digits, exponent = amount.as_tuple()
units_str = "".join(map(str, digits))[:exponent] or "0"
nanos_str = "".join(map(str, digits))[exponent:]
nanos_str = nanos_str.rjust((-exponent), "0").ljust(NANOS_LENGTH, "0")[0:NANOS_LENGTH]
if sign and int(units_str):
units_str = f"-{units_str}"
if sign and int(nanos_str):
nanos_str = f"-{nanos_str}"
return units_str, nanos_str
@property
def units(self) -> int:
units, _ = self._amount_tuple
return int(units)
@property
def nanos(self) -> int:
_, nanos = self._amount_tuple
return int(nanos)
@property
def value(self) -> str:
return str(self)
@property
def sub_units(self) -> Decimal:
if self._currency and isinstance(self._currency, BaseCurrencyType):
if self._currency.decimal_digits == 0:
output = self._amount
else:
output = self._amount * Decimal(pow(10, self._currency.decimal_digits))
else:
output = self._amount * 100
if output == output.to_integral():
return output.to_integral()
return output
def asdict(self) -> Dict[str, Optional[Union[str, int]]]:
return {"value": self.value, "units": self.units, "nanos": self.nanos, "currency_code": self.currency_code}
def as_dict(self) -> Dict[str, Optional[Union[str, | |
<path i\
d=\x22path4496\x22 d=\x22\
m1410.8-552.18-2\
.6811-0.78534v-1\
.6593-1.6593l2.6\
172-2.8863c1.439\
4-1.5875 2.6774-\
2.9468 2.7509-3.\
0206 0.1162-0.11\
664 0.1338 0.583\
79 0.1338 5.3399\
0 3.0108 0.1003\
5.5024 0.061 5.\
4976-0.039 0-1.4\
08-0.39464-2.882\
4-0.82659z\x22/>\x0a \
<path id=\x22path4\
494\x22 d=\x22m1405.3-\
557.14-1.7147-0.\
39383-0.058-2.22\
64c-0.096-3.7432\
-0.2183-5.9319-0\
.1147-5.9319 0.1\
35 0 9.4995 2.25\
19 9.7031 2.3367\
0.1425 0.0594-0\
.2973 0.59569-2.\
7252 3.3251-2.31\
19 2.599-2.9432 \
3.2574-3.1359 3.\
2692-0.1326 0.01\
04-1.0128-0.1622\
5-1.956-0.37882z\
\x22/>\x0a </g>\x0a <g \
fill=\x22none\x22>\x0a \
<g stroke=\x22#5050\
50\x22 stroke-width\
=\x221px\x22>\x0a <pat\
h id=\x22path4486\x22 \
stroke-linejoin=\
\x22round\x22 d=\x22m1402\
.9-566.33 0.2525\
12.374-9.0914 6\
.8185\x22 stroke-li\
necap=\x22round\x22/>\x0a\
<path id=\x22pa\
th4492\x22 d=\x22m1403\
.2-553.95 11.112\
3.283\x22/>\x0a <p\
ath id=\x22path4448\
\x22 d=\x22m1489.2-553\
.95 10.859 3.030\
4\x22/>\x0a <path i\
d=\x22path4442\x22 str\
oke-linejoin=\x22ro\
und\x22 d=\x22m1488.9-\
566.33 0.2525 12\
.374-9.0914 6.81\
85\x22 stroke-linec\
ap=\x22round\x22/>\x0a \
<path id=\x22path4\
430\x22 stroke-line\
join=\x22round\x22 d=\x22\
m1432.9-566.33 0\
.2525 12.374-9.0\
914 6.8185\x22 stro\
ke-linecap=\x22roun\
d\x22/>\x0a <path i\
d=\x22path4436\x22 d=\x22\
m1433.2-553.95 1\
1.112 3.283\x22/>\x0a \
<path id=\x22pat\
h4418\x22 stroke-li\
nejoin=\x22round\x22 d\
=\x22m1374.9-566.33\
0.2525 12.374-9\
.0914 6.8185\x22 st\
roke-linecap=\x22ro\
und\x22/>\x0a <path\
id=\x22path4424\x22 d\
=\x22m1375.2-553.95\
11.112 3.283\x22/>\
\x0a <path id=\x22p\
ath4375\x22 stroke-\
linejoin=\x22round\x22\
d=\x22m1346.9-566.\
33 0.2525 12.374\
-9.0914 6.8185\x22 \
stroke-linecap=\x22\
round\x22/>\x0a <pa\
th id=\x22path4381\x22\
d=\x22m1347.2-553.\
95 11.112 3.283\x22\
/>\x0a </g>\x0a <g\
stroke=\x22#000\x22>\x0a\
<g stroke-wi\
dth=\x2210\x22>\x0a <\
path id=\x22path425\
6\x22 stroke-linejo\
in=\x22round\x22 d=\x22m1\
732.1-507.64 138\
.56-80 138.56 80\
-138.56 80-138.5\
6-80v160\x22 stroke\
-linecap=\x22round\x22\
/>\x0a <g>\x0a \
<path id=\x22path\
4258\x22 stroke-lin\
ejoin=\x22round\x22 d=\
\x22m1732.1-347.64 \
138.56 80v-160\x22/\
>\x0a <path id\
=\x22path4260\x22 d=\x22m\
1870.6-267.64 13\
8.56-80v-160\x22/>\x0a\
<path id=\x22\
path4262\x22 stroke\
-linejoin=\x22round\
\x22 d=\x22m1870.6-587\
.64v160l-138.56 \
80\x22/>\x0a <pat\
h id=\x22path4264\x22 \
d=\x22m1870.6-427.6\
4 138.56 80\x22/>\x0a \
</g>\x0a <g\
stroke-linejoin\
=\x22round\x22 stroke-\
linecap=\x22round\x22>\
\x0a <path id=\
\x22path4266\x22 d=\x22m1\
620-887.64-80 80\
80 80 80-80z\x22/>\
\x0a <path id=\
\x22path4270\x22 d=\x22m1\
620-727.64v80l80\
-80v-80\x22/>\x0a \
<path id=\x22path4\
272\x22 d=\x22m1620-64\
7.64-80-80v-80\x22/\
>\x0a <path id\
=\x22path4274\x22 d=\x22m\
1620-887.64v80l-\
80 80\x22/>\x0a <\
path id=\x22path427\
6\x22 d=\x22m1620-807.\
64 80 80\x22/>\x0a \
<path id=\x22path\
4280\x22 d=\x22m2252.9\
-967.64-164.85 6\
0v160l164.85-60z\
\x22/>\x0a <path \
id=\x22path4282\x22 d=\
\x22m2088.1-907.64-\
164.85-60 164.85\
-60 164.85 60\x22/>\
\x0a <path id=\
\x22path4284\x22 d=\x22m1\
923.2-967.64v160\
l164.85 60\x22/>\x0a \
<path id=\x22pa\
th4286\x22 d=\x22m2088\
.1-1027.6v160l-1\
64.85 60\x22/>\x0a \
<path id=\x22path\
4288\x22 d=\x22m2088.1\
-867.64 164.85 6\
0\x22/>\x0a </g>\x0a \
</g>\x0a <g s\
troke-width=\x221px\
\x22>\x0a <path id\
=\x22path4320\x22 d=\x22m\
1188.1-341.67 15\
3.31-71.488 153.\
31 88.512-153.31\
71.488-153.31-8\
8.512v160l153.31\
88.512v-160\x22/>\x0a\
<path id=\x22p\
ath4322\x22 d=\x22m134\
1.4-93.16 153.31\
-71.488v-160\x22/>\x0a\
<path id=\x22p\
ath4324\x22 d=\x22m134\
1.4-413.16v160l-\
153.31 71.488\x22/>\
\x0a <path id=\x22\
path4326\x22 d=\x22m13\
41.4-253.16 153.\
31 88.512\x22/>\x0a \
</g>\x0a </g>\x0a \
</g>\x0a <image id\
=\x22image4336\x22 sty\
le=\x22image-render\
ing:optimizeSpee\
d\x22 xlink:href=\x22d\
ata:image/png;ba\
se64,iVBORw0KGgo\
AAAANSUhEUgAAAR8\
AAAApCAYAAAAMAy1\
JAAAABHNCSVQICAg\
IfAhkiAAAHihJREF\
U eJztnXd4VFX6xz\
/TazKTmUkjjYTQQi\
gK2ClW1r5Y17JiXc\
ta1rauZXUta/np6u\
4qrgXdta0gCwIW m\
oAFkCIlQAgQEkhIS\
G/T59bfHxMiJYHAD\
PCH83mePDyZO+feb\
055zznv+96D5sUXX\
lBJkCBBgmOM PiMz\
43hrSJAgwS8QfUF+\
/vHWkCBBgl8gerfb\
fbw1JEiQ4BeIPjk5\
+XhrSJAgwS8QvdVq\
Pd4aEiRI 8AtEbzA\
YYrqBJMv886tvmL1\
6PXkeN4NzMhmcnUm\
/jHRy3Ck4bfEzbk2\
NDWTn5BGJhON2z4r\
6Rn4o KyfP4yLLnU\
KG00Gy1RK3+x8uNS\
1t1DS3kuZIJt2ZjM\
1sOm5aAFZsreC7zV\
vJ87jJT/eQkeIkzZ\
GM xRhbv9mfo9G2K\
7ZW8NXaDeSneRiQm\
UGK3UZuqoskizluz\
zhcPceiLnvLko1lz\
Fu/iVy3m34ZqfRx \
p5DuSMKTlIROp43b\
c3pqW30sN21o7+Dx\
T2aiNxi5aPSJXDv+\
NEqrayitrmXBhi3s\
ampBr9WS5XIw sE8\
mRTmZFGak4U62Yzc\
fnw6wB1lR+GL1el6\
fu4QBWRnsbG6juqm\
FhvYOjFodGc5kclP\
dFGamMiAz g3RnMk\
kWM1aTEY1GE3c9ki\
zzxU8lPDZjLq06Ey\
ZFwoaMx2KhwJNCQa\
qb/DQP+Rmp5HlcXV\
pMMU4e PREWRN5d+\
D1fr9vIOSOKaQ8Lz\
FhVQl1rG80dPmwWE\
+nJyWS7nfTPTKMgP\
Y2MFAcOq+W4G8w92\
qf/ +BN1QZG+A4pp\
W7mVkK8dgypi0enI\
djkYmp1JujOJQdl9\
KEhPxWYy4kqyHzU9\
05avxm4x8+uTRx7X\
uvSHwrz2xULeXLy\
UkXnZjOxfyJqqWma\
v2UBdazthQcRtt5H\
hTCY/3UNBeir5aam\
4kqykOZLj1v81 3o\
72I8rzWVOxk8c++Z\
y7LjyX80cNZ+XW7V\
wwasQ+31EUhYZ2L9\
VNLWyp2c2mqho2Vd\
ews6GRmX+8 i2y36\
7CeGa/ZMRgReGrqb\
NpCYZ6+5jLKanYz8\
dRRQNQoNXt91La0U\
dXYzLbaOrbW1rO7t\
Q1vMEiS 2cS7d04i\
xW6LScPedASCPPHp\
LL7aUU9y0QhUgxFR\
lohIIuFQkEgwgBAK\
QCgE4SAGUcBj0JJm\
MnJy YV9evP5y9Dp\
d3PTUtXbw8IfT6Ne\
nD5eeciIfLV7KlHt\
v67ouShIN7V7q2tq\
pbmphW20dFXWNVNQ\
3 0Njezv8e/j3Z7p\
TDema82nZv7b8983\
Rufm8mv7rsRvxCGH\
8kTHvAR3NbC20drb\
S3t+JrbyXkbUcJ B\
UjXKsz78wP0TfPEp\
KEnPTedO5bZK9bwx\
NW/7rreU13uaGikv\
q3tiOryYGytrePu9\
z5lh2LE1ieX k1Qf\
Ux++q+u6qqp4Q2Ea\
2tqpa22nor6R7XUN\
lO2qZXFJKQufeojB\
OZmH9cy4rXwkWeY/\
S5Yxa3UJ 7933OwZ\
lZ7K9roF/f/P9AcZ\
Hq9WS6XKS6XJy8sB\
+hEWR370+hRVbymn\
2+g/b+MSDDVU1PPL\
RdK4/ cwy3TTiTnY\
3NfLxkaZfx0Wm1pD\
sdpDsdnNivLxBtEE\
VV+dvnX/On/0zFGw\
zFxfioqsqysu384e\
OZ mPOHMPysUUiKg\
ihLhCWRkCgQNJoIW\
mz4hTBhUUAFRFWlT\
lWpKy8jsKGMsCBit\
8RufFRVZWFJKS/N \
msdT11zOBaOG09Th\
46oxp+zzPYNeT7bH\
RbbHxej+BQAEIxGu\
eXky35aVE4xEYtYS\
D+07G5tRFBkV FUV\
RkBUFtFqMNjsWvQ7\
JZkdxpyKHAgRKS9j\
d2kxEFI+anvZAsKt\
P7aG7utwzTlaVV8a\
tLkVJ4sNv f+SvXy\
yi3+ixjMzuS3tzA0\
pzxz7f02g0OKwWHF\
YLA7IyGTd0MCFB4J\
Z/vItOp0NR45eTfF\
jGp8Xn 56lpszGbz\
Mx54gEcnf6cbLeLJ\
66+9KBlJVnmmU8/R\
6PRMPHUUSjKsU2sF\
iWZD75bzherS3j99\
hsZ UZCHRqOhj8vJ\
09ddfsjyU+Z/y+wV\
azhjyEB0cVhlBMIR\
XvtiITM2VTLirIkk\
pbiRFIXIHqMjRNBr\
tWjQoKogKTKSLCM\
qMqgqmqoKrikqYFv\
N7rjsz/2hMK/MmU9\
FYwv/e/Q+cjxuNBo\
NIUFg5dbtnHfC 0B\
7LCpLE/VM+YVGDF6\
PLg/kobQV7oiftBp\
0OURJoDwWjdSpGCI\
kiYUkkIklEOleXkc\
ptYDSiSXbE RXtPe\
lp8fj5asvSASXpv9\
h4n44cOjoueXc2tP\
PrJTMp8ChOuugWrL\
QlZUdAGA8iHGIeiJ\
PHYh5+R 5kxmzJBB\
cfVP9brXbqmp49rX\
3mFMcRGT75jUZXgA\
OoJBnp8+56Dl/zV3\
Eesqd/LGHTdiMuhR\
VOXI VR8m7YEgt//\
rA7bXNzPjsT9wQr+\
+XfvWVp+fpz/9/KD\
lpy9dyZQFS/j0j3e\
TZDGjjXHPu72ugct\
f eYvvWhUuuPwmBv\
YtJDPZidtqJ8lkxm\
owYtIbMOj06LVadF\
oNWo3m5712TTUT0l\
N4+66bURWVWHfg O\
xqa+M1r7+BxOpn68\
O/JTfV0PUtWFBrbv\
Qct/+L0L/isdAcpQ\
4Zj0GrRauPnrDwUB\
9Nu0OuQJIm2 kJ+O\
UABvOIxfCBMQIp2G\
SCCwcztSOAx5BaAo\
MWs/mJ5st4tnrrvi\
oOX3HicaNDHrmbd2\
Ixe+NBmv u4ALJ15\
PQUYO6UkOnBYrFqP\
pkCuZl2Z8SX1bBy9\
MuhpJljEa4re979X\
K59MfVvLCzK/55KH\
fc+qg wgMcTlaTiZ\
P69+u2rKqqzFi+mk\
+/W86XTz7UOXi10S\
XwMeCbklIe/vB/PH\
z5Rdx23vgDGtNmNj\
N+ 6OBuy6qqytw1J\
Tw7bRZznniA3FQ3i\
qKi0x75cH/9q0W8N\
u87Rp03kbzcAvR6A\
7KiIMkygiwiyNLP \
P5K0z++SLENdDeOS\
jEz7493YzNHOo9Uc\
eQed+sNKnpo2hzfu\
mMTEU0cd0LYZKQ6u\
HX9at2VVVeX9 hd/\
x2pKV5Jx2Jl5RQK8\
BXQx6DoepP6zkrzO\
/Zso9t3H28KIDtBt\
0OmRZoiXgR5B+3so\
GhAhBIYJ3 1w4iLY\
0wsBi0WrSoxDKvHE\
pPU4eXh97/L7OfeO\
CAst2NE1mRY6rLv0\
ydzVvfruD0868gNy\
cfWVUJ ChFkVSEkC\
kRkCXoYh4qq8u68x\
Xy/aQszH/8DFqMRW\
ZFi6mv70yvj8/7ip\
eSmenjwvU9w2q0U5\
WQx un8BwwvyyHA6\
MBkM6Hqw0Es2bOb5\
z2Yz64n7uyIJOq32\
kMu9ePHCrPnoUjL4\
5/ylvL9oGcPy+lCc\
m80pAwvJTXVhNBi\
IiFL32jeW8acPpvH\
ZI/eSn5EGRFcCPf2\
tveG/P6ykKCudHas\
Ws2PDKqwON660 Pt\
hSXGAwIABBITpA9j\
hJ/ZEIQUFAaaxjuB\
ph+p8eIdlqQZQkdD\
ot2hiM4b+XLOO0wf\
35++x5vDpr LoV90\
hmWl8PJAwvpm55KW\
BB4/rM5fPHkgweUn\
b1iDY//by5Dx1+AX\
1VQhAh6QKeLfzSwO\
95bvIy8 VA9//ng6\
f5t1YL/U6bQoskRL\
wIeoyFEHvthpgBp2\
E66pgsHF0LmNjrbq\
kWs/lB6r2cRZw4q6\
Ldvd OJFkJaa6/Gr\
tRiwpHtYu/Yb1Gg3\
JDidOTzrJ7jRMSQ6\
8vg4c3RgfVVX539K\
VfLhkKV/8+cGuyLQ\
k xzbx7k+vjI8n2c\
7kO28mI8XB1to6dj\
Y08X3pVj75djnVTS\
24k2xs211PpsvJ6P\
4F9E3zoNVq2VS1 i\
/unfMz79/2O3NSfI\
whareaY+XzsVgtDz\
7iEVJcHv99LY1M90\
yqq+McPMxB8baSZd\
TS0dWDU6xia l8Po\
Af2wmoysrdjJA1M+\
5r17bmVQ9s/efUWV\
Y9p2ZXtcvH33LXiS\
k9jR0ET57npWbatg\
0+YyNuxu wqtoMDi\
caOxJSFYbIZ2WiCS\
htDZTGGhnxlMPkup\
IAkCQojNjLHpcSTZ\
evfV6stwp1Le1U1H\
XyLrK Kt6Zv5httX\
WERYmwIPDPL+ZTnJ\
fDKQP7YTWZ+HFLOf\
f+ZzojzroI2WzG6/\
chKQp6rSYm43w4eJ\
Jt vHnXLaQ7k7vvl\
8l2mtvaCFZuQ5OUj\
GQwEJEkwm0tSJXlM\
LAI9D/7MHQQ08rnU\
HpSHUnkpXmY9sOK \
Xo0TJcaJzmw04O4/\
FIcnDVUUiPh9tLa1\
UL1pLSFfO3I4jEsH\
z02bxajCAk7ol0ea\
I5klG8t4acaX fPb\
IPfukHsQ68e5Pr4y\
P2WAkJAiYDAaG9c1\
lWN9cLjl5JGrnfrG\
qqZlPvl3Ouooqpiz\
4ljafH48j mZrmVt\
6444YDPPw6rRb5GP\
l8zAYjvlAAU8SOxm\
DEmZGFyZ1K5qBhBI\
Qw7X4v2oqt/GV5Kf\
4vlyD7 feQ77IiRC\
B/84XZG9S/YZ/ksS\
rE1gN1sxBsMkZfmo\
Tgvm+K8bH59ysiu6\
6u2VfDER9MZ0z+DF\
Vu3 U9Hcyg5/iAwd\
zHrqIfp1rsCiWiT0\
uth8LCa9gbAgotNq\
yXK7yHK7GFs8qKtt\
a1vbuPrF19FqNHy4\
6Ace/WBq9PN2P6M\
nXIHJ6aLJ70VSZAR\
ZwoJ6zIzPHu099cu\
djc2c9qfnaGyoRan\
YApIYNTaiCP0H g3\
nfZFK9RhPTNudQej\
ZV1XDr6++S7nT0ap\
zIshxTXVpNBnb5Og\
gajWi1WrQGA2paBq\
bUNHSyQtjX gat2O\
31cKUxftpK/fjYLX\
yhMY4eXuX95mH6Z6\
fvqUWLTsz+9Mz56P\
WHhwBDknkGZl+phy\
YbNfP74 /djNJjoC\
QaqbWrj7rQ8YWzz4\
gL2vTquNa8juYFgN\
OrzBAIZwEA0gKQqC\
LBESBUKiiCDLtO4s\
x3Xy WKSMPvjDIcq\
FCIW7K7siYnujKGp\
M0aUkixlfKLTPZ3s\
/w5VkpyAznSevmYi\
qqvjDYT5Y9AMN7V6\
K crL2KSfIMvoYO4\
NJr+82vLxHk9Nq5d\
RB/bn7ovMAiIgiNc\
2tjHtuMpl9cmnu3N\
IIUjRylHQMVz6H 0\
t43zYPNZkXJygWtD\
mQZwkHYWQndvFZk0\
Gpi2sIeSs/gnD68e\
/etDMvPRVXVQ44TK\
caVRpLZTHvA h89o\
7FrRqYCqgqIqKOEg\
Q+02bj53HDefOw5J\
lpm/diMzlq9iWN/c\
A+4nyfFd+fTqTmaT\
oVvjsweN RsNN54z\
FoNOi0Whw2m30y0x\
HRY3+pfs/VKNBOUY\
OZ7NBjzcUoCMUpD0\
cxBsO4QuHCESiEY+\
IImPN LSDS6dyVUc\
FoRNVoovr3Q1aUmJ\
xuTpuVNn+wx+t9XC\
ncePYYIFqvSRYLGS\
kONBoO6JyiJGHQx5\
Sk jtGgI3yQ3BaTw\
cAJ/fK6Zm+TwUBGi\
hONGt0WCLIU9aNIA\
hFRxKiNbRsYT+0aj\
QazXg+KGt1P6fVg \
skSdrN1oNMRoOA+l\
JxQRuPutD1BVtVfj\
RFbUmOrSabOiESVk\
SUaUZARJRpRlJEXu\
mvylvcahXqfD nWx\
Hs3dkdS+iwZZjvu0\
yEBKEHq+rqsqGHdW\
cUTSQvM7sUL1Oh6y\
o3dVpdOVzjHw+NqM\
RXzCANhzs 0irK0S\
1CRBLxNjfgq6qEgA\
8xyQGyBBZrdLB1I1\
FRlJg6hNtuo9nbc+\
j6m/WbuOftD1j47K\
N8uXod Zw8fgqrS7\
YwqSLGHPs2Gg08s2\
3bX8fxncxhVWMCm6\
l2MKRqIzRyNxATFa\
NQo+q+ApMgYNZq4R\
kRi 0T5j2SoQBfrW\
VFAdEVFsydEVj6pE\
B/t+7WjUamPzsRxC\
z6INm9lc30TRfU8y\
IieDkwYUMKxvDpKs\
RKOW+31fluXYJjq\
rFWNzBI2qRUJF0qh\
0DTtVgd276NAqnP3\
480y+80Y2VdUgKwq\
iJHcZyH30HI9o l9\
1kwh/uOe39rbmLWL\
ShlMtOG80zUz9nfP\
FgzhkxBFGWkVUF3X\
7VqtUeu1C7zWTE1x\
GAUNT4KKqK rMiIs\
oS/uZGwEIHcvghaL\
WrAB63NkJFFY0srP\
2zeistuIyPFSU6qG\
61GgxSj8XHZ7TR7/\
Qd8Lsky X/20nix3\
Cgue/RM5qS7SnQ7q\
2tpZV1nFV6tLmHTW\
WGat+InLTxuNx5FM\
RyCIQRub8bEajd1m\
0Sqq ii8Y4r0F3/H\
qrddhM5vYsGMX6Q4\
Hb89bjChL1NbspK6\
lkaDNRjAcRFXVaKg\
9jhGRI9EuSjJba3f\
j DYWZ9/Qj2C0map\
vbWL29khVbK/h3eR\
n27Ztp1xpQ7ElgTw\
KTCZNOG9M2tkc9ss\
yU+Ut4ZcFyTr/k O\
kSNhorWJn7cUI1v8\
Wr8NdWc/tjzjM7P5\
dRBhZzYL59Ml7Nz2\
3XkdWk1GTCqoNdqk\
VQ1+oOKJIlQ Vc69\
pwzniasuJSyIWExG\
vlq9nqYOH/PXbuC6\
V97kud9eybrKKsYU\
DSDJYolGu+IYyeyV\
8bGZTfhD BxofXyj\
Mkg2byU9P5ZvnHou\
mZdushAWReWs2sKu\
phdKqGuasWst1406\
jICMNbWfS3LFKcLa\
ajfjq W1H2WvlIik\
Kovhb/rh3oi4YhqG\
p0pWOxgjsVVBW73U\
7/zAy+3biZOSvXMn\
pAP/719TfodbHNju\
4k G+urdx/w+aKSU\
j797kfev+82rKboy\
4TXn3k6ABajgcb2D\
jJdTgoy0ghEBN6eN\
oulm7cxIDPtgHsd \
DlazkUD4wAHz1tff\
sLayinfuvqXL2O7J\
BB/VP58+dz6OX4jQ\
VFeD7E4lUroeXB6M\
el1c34g+Eu2v zZ5\
LVWMzb9wx6Wfflc3\
GkLxsJp01hp/KK5n\
x6L1U1jexalsFK8s\
rWbmjHLtRj0F/5Ma\
8Oz0RUeTx j6Yzb3\
sDZ158LWFVxRcJYU\
vx4LHaMGbl0NHSyE\
ZnJquqm5lcWoku4C\
fHpCPNYoqpLi1GIy\
aNBovO gKgoiKpCO\
BJEW1nGC5dN4J6LJ\
+wzkT448QI2Ve2if\
Hc9z99wFSaDnnUVO\
7GZjMxYvhqbyYg+x\
slu b3q38rGY8O1n\
fMKCyAvT55Cb6ub2\
X53V1ciDsvsAMDw/\
l8lfLyTL42JQdh8C\
EYFrXp5MtseFQa8/\
Zisfu8lMIBhEDAU\
AUEWRSHkZ5PRFGVh\
MpDsdGg0GvY5URxK\
3n3929N0uJZpwWJy\
VEVOH8CTbafb6 up\
a1HYEg973zEQ9ddi\
H/feiubiNXJoMBRV\
VJsdv4zdhTAchLdX\
Pxs3/jwUvOO2ItEK\
0f/14DpiMQ ZPrSl\
Zw0oJAbzxnbbdaL2\
WhEVmQUqw3bgCLqf\
R2oQ4ZDxVauOX/sM\
XM476+9xefnb59/z\
e8vPCfq l+pmhRr9\
TCUjxUleWipnDitC\
VhTun/IxBamumE4J\
6E7PTf94lyaTm3EX\
XElIFBCEMIIsISqd\
/hdZ BkA2GMHpAqc\
LWVHYubWUO8afGqM\
PSo8ZcBgMCIqCt70\
Vw+5y3rvnJiacOLT\
b+tHrdGg00Dc9FYB\
n r7+CsCDyzy8W8M\
hlF8TkkD/gWb35kt\
1kwruX8VlcUsr733\
zP5DsmkWy19NjIih\
qN7lzd+WLif+6/ n\
ZrmVm549S0uHdnzu\
0LxxGo2EhHCCJEwh\
EOooghJyWAwHDSfb\
G9flUajYVHJJupa2\
nj5+sti0uOy 22jx\
RbddtS2tNHX4OGt4\
EYOyM3sMmZv2S4SU\
FYVnps3i8lNOpHC/\
cOjhEp2to23rDQZ5\
f+F3JNss nNAv76A\
dX1EUvJ0OfEESoaO\
Ns9NdTDqz+2zoo8H\
e2ps7fHy3qYzivGw\
yUpwH166yT7R10fp\
SKnY3 8PSVF8VNz5\
Zdu7npjfcw5hUzaP\
AwfJHwXu+XCYRFgb\
AkEpYkFFVFUeSfb9\
TcyBlpDn533riY9J\
j1 BkwaSDEZqavbh\
au9jk+fuI8hudk9H\
ouh1+mQ5H0n5LfmL\
qJvqovxQwbGpGd/e\
mVW7WYz/nAYSZaZ \
vWINKvDipKtx2KwH\
PdtDVdSuKAlEO+z9\
Uz7mzgnjKM7N6rFc\
PLGbTCDLqMEA6rbN\
0YhHWsYhs8mU vbS\
3eH08N20Wz11zacx\
HV1hNJgLhaC7Fta+\
8SUQUueGsMQe9r9l\
g2Mfh/OWqdVQ3NHL\
TmWfEpAX2 +PMiLC\
4p5dqX3+TWCeO5+Z\
xxh5xxZUWhIxTCHw\
mjiiKepjpe/u1lcT\
3a41Ds0b6+soorXv\
wHpw7u z7XjTjukd\
vUote0ePd+XbuGSl\
yajLTwBd35/2kPB6\
Ptl4SDecBhfJIy/6\
/2yCKrKz+MkEsbRW\
Mvf Jl0de18zG9Gq\
Er66KvI1AeY9+QDF\
eTkHHbN6rbZrNQZQ\
sqOamctX8dhlF8ak\
pTt6Z3ysZlp9fj5e\
soxlZds4o2gA2Z5\
DH4ehqErXCiIiitz\
79keMHVTIxJNPjEn\
04WAzm6GxHoQIDBk\
e9ev0AkVVO3Mi VB\
7/aDq/OX10zKsMAJ\
NBT21LWzQv6rE/cN\
KA7t+J27fMzyufut\
Z2np36OS9cf0VM/o\
k92MxmFqzb iDvZz\
jv33ILdbO7VYVGKI\
tMRDiIqMrrqSp6+5\
Ny41M/hYDObmbumB\
FlR+OiBO8lMcfaq3\
J5+Ge+2 tZnNzFrx\
ExP//j72E05Hl+Km\
JeCnJeinJRigLRjo\
fME1iC8cNdyBSISu\
sKqqoqncxpOXTmBw\
9uGd mdMdyVYL5du\
3cKLHzOeP3kdWL84\
F0ut0XeH3sCBw7zs\
f8vTVl+Kwxf90z14\
Zn2SzmY+XLKNfZhq\
P XnkJxl7mliidjl\
xFUXj6089JthhjXk\
oeLrmpLrLNBnTNjd\
DRDoLQbU7F/qid2u\
euKaGhrY2rzxgd F\
z1JZjNnDOrPyzO/4\
tmps5i3ZgPNHb6D5\
j2ZjdHktYgocv+Uj\
3ng4nPJcvVuoB2K3\
FQX22vreGvu YtZX\
VtHk9fUqAVRVVcKi\
AK3NXJDliVv9HA57\
tL/+5QI2Vu3qtXbl\
KLVtTmoK22rrUEwW\
WluaaG5v oTXopzX\
opy3opy0UoL0z58z\
baXxEWaJr/9/UwNl\
Zqfx23CkHfU5vOW1\
gIVecciKbqqq5950\
Pmb50 JXWt7Qdsq/\
ZGr4uufBRF4fnpcx\
hX1J9RhX3joueAZ/\
XmS0U5fXj2mon8fd\
bX1LV7yU9P49wRxZ\
w5 rIjcVHePM+WeQ\
7jeXfAt22vrmHzbt\
cfMGbmH4twsVrz4O\
GsqdrKgpJQFm7ezP\
RBBTXFBigeMxm63 \
YIqq0tjh5S//ncmH\
997Sa4N7KIwGPS9P\
upKOYIi1FTuZu3oN\
L0yfjTs5iQknDuOi\
0SeQ5U7Zp05N ne8\
kfbBoKRaDjotH93w\
ezOFSnJvF0ucfZU3\
FTr5etYbnP5tDutP\
B+SOHcf6oEfRxde+\
4RVVBiJDS UMv/PX\
V/3OrncLX/2Nm2h6\
NdVVUa2jvi3rZDc7\
NZ9tfHWLm9kvnrNr\
Fw2ToCRgtGTzo6dy\
qK0YAg yURkEUmW9\
00ji4Sx1e3itWcex\
Gw0xkVPZoqDKXfdi\
C8UZt2OahZvKOO12\
V+TbLVxzohiLjnpB\
Pr3 ydinjnRaHbKi\
sLysnO82ljHzj3cd\
lWOD4QiOUZVlhU27\
alm8sYylZdsJiSIj\
CvIYXzyYkYX55KX9\
fH5J0V1/5MGJFzD\
12+X89/7bYq7UeB2\
1uW13PfPXlfLNhs2\
saWzDa7ZCihvs9mg\
aPuAqXccZ/fO5 ZG\
QxV51+UkzP6w0V9Y\
0sXF/Kgg1lKKrKKQ\
ML+dXI4YwoyEWDhg\
G3P0R+upupD9wR10\
P5u2NP/Sza WIZGq\
2XMkIGcd8JQRhTkY\
elsQ9M1d6Ba7fzri\
glxqZ94t+3BtA+84\
yGG5+cxYdigo962K\
7ZWsLBk M0s2bWFj\
q5dIkiMa1bInRd+m\
V2TYsBad2cyrl5zN\
TWfF7sc7FNVNLSwo\
KWX++lJ8oQij+udz\
wagR jCrMR6vVMua\
RZ3HarLw66arDPjK\
1O3pq2yM+wxmiDuQ\
2f5BNu2r5fnM5JTt\
3gUbDqP75jB9axBU\
v /IOi7Ew+uOdWMl\
2Oo/ZHHCmyrFDf4e\
XHLeXMKyljaWUVjR\
oDkiMFTfkWLh81lH\
funHRMnaiKotDk 9\
bG6fCcLN26mor6Jv\
umpTP1+BTMf+T1ji\
wYcMy1718+iTVupb\
m5haF4uE04cxpUvv\
c7FI4fx7l3x qZ+j\
2bb7a//N/73BhSOH\
8c6dNxyzthUlmZqW\
Vr7ZsJn5JWWsramn\
VWdAcaTA9i38avgQ\
Prnvlphf lzkcFEW\
hxRdgbeVO5q/fTFl\
tHelOJ7NW/MRLN1z\
B7eeNj8tzjorx6Y6\
a5lbW7ajm+83l7Gh\
s4rlr Ju5zJEUsHI\
3/XmVvfKEwayurWL\
SxjK21dbwy6Wpyeu\
FYP5qEIiKlu2rwhs\
KMHzLwmJ4SuD9Hs3\
5+ aW3b1OHjp+07W\
VCyibLaOt687XoKM\
mJLGI2ViCiyedduV\
pXvYNKZp2OO05Gpx\
8z4JEiQIEFvOH7T \
aIIECX7RJIxPggQJ\
jgsJ45MgQYLjQsL4\
JEiQ4LiQMD4JEiQ4\
LiSMT4IECY4LCeOT\
IEGC40LC+CRI kOC\
4kDA+CRIkOC4kjE+\
CBAmOC/8P22cGy+B\
/iX0AAAAASUVORK5\
CYII= \x22 height=\x22\
41\x22 width=\x22287\x22 \
y=\x22-655.37\x22 x=\x221\
299.7\x22 preserveA\
spectRatio=\x22none\
\x22/>\x0a <g stroke=\
\x22#000\x22 stroke-wi\
dth=\x221px\x22 fill=\x22\
none\x22>\x0a <g>\x0a \
<path id=\x22path\
4339\x22 d=\x22m1268.2\
-591.83-25.238 2\
5.238\x22/>\x0a <pa\
th id=\x22path4347\x22\
d=\x22m1243-566.59\
31.547 8.453 25\
.238-25.238\x22/>\x0a \
<path id=\x22pat\
h4349\x22 d=\x22m1268.\
2-591.83 31.547 \
8.453\x22/>\x0a <pa\
th id=\x22path4353\x22\
d=\x22m1274.5-558.\
14v32l-31.547-8.\
453v-32\x22/>\x0a <\
path id=\x22path435\
5\x22 d=\x22m1274.5-52\
6.14 25.238-25.2\
38v-32\x22/>\x0a <p\
ath id=\x22path4357\
\x22 d=\x22m1268.2-591\
.83v32l-25.238 2\
5.238\x22/>\x0a <pa\
th id=\x22path4359\x22\
d=\x22m1268.2-559.\
83 31.547 8.453\x22\
/>\x0a <path id=\
\x22path4361\x22 d=\x22m1\
236.6-656.28-12.\
619 12.619v16l18\
.928 5.0718v-16l\
-18.928-5.0718\x22/\
>\x0a <path id=\x22\
path4363\x22 d=\x22m12\
43-622.59 12.619\
-12.619\x22/>\x0a <\
path id=\x22path436\
5\x22 d=\x22m1243-638.\
59 12.619-12.619\
v16\x22/>\x0a <path\
id=\x22path4367\x22 d\
=\x22m1255.6-651.21\
-18.928-5.0718v1\
6\x22/>\x0a <path i\
d=\x22path4369\x22 d=\x22\
m1224-627.66 12.\
619-12.619 18.92\
8 5.0718\x22/>\x0a <\
/g>\x0a <g stroke\
-linejoin=\x22round\
\x22 stroke-linecap\
=\x22round\x22>\x0a <p\
ath id=\x22path4371\
\x22 d=\x22m1338.1-559\
.26 13.637 3.030\
4v12.879l-13.637\
-3.7881z\x22/>\x0a \
<path id=\x22path43\
73\x22 d=\x22m1351.7-5\
56.48 6.3134-7.0\
711-11.112-2.777\
9-8.5863 6.8186\x22\
/>\x0a <path id=\
\x22path4377\x22 d=\x22m1\
358-563.55v12.62\
7\x22/>\x0a <path i\
d=\x22path4379\x22 d=\x22\
m1351.7-543.35 6\
.3134-7.5762\x22/>\x0a\
</g>\x0a </g>\x0a \
<path id=\x22path4\
414\x22 stroke-line\
join=\x22round\x22 d=\x22\
m1366.1-559.26 1\
3.637 3.0304v12.\
879l-13.637-3.78\
81z\x22 fill-rule=\x22\
evenodd\x22 fill-op\
acity=\x22.66667\x22 s\
troke=\x22#000\x22 str\
oke-linecap=\x22rou\
nd\x22 stroke-width\
=\x221px\x22 fill=\x22#00\
f\x22/>\x0a <g stroke\
-linejoin=\x22round\
\x22 stroke=\x22#000\x22 \
stroke-linecap=\x22\
round\x22 stroke-wi\
dth=\x221px\x22 fill=\x22\
none\x22>\x0a <path \
id=\x22path4416\x22 d=\
\x22m1379.7-556.48 \
6.3134-7.0711-11\
.112-2.7779-8.58\
63 6.8186\x22/>\x0a \
<path id=\x22path44\
20\x22 d=\x22m1386-563\
.55v12.627\x22/>\x0a \
<path id=\x22path4\
422\x22 d=\x22m1379.7-\
543.35 6.3134-7.\
5762\x22/>\x0a <path\
id=\x22path4426\x22 d\
=\x22m1424.1-559.26\
13.637 3.0304v1\
2.879l-13.637-3.\
7881z\x22/>\x0a </g>\x0a\
<path id=\x22path\
4428\x22 stroke-lin\
ejoin=\x22round\x22 d=\
\x22m1437.7-556.48 \
6.3134-7.0711-11\
.112-2.7779-8.58\
63 6.8186\x22 fill-\
rule=\x22evenodd\x22 f\
ill-opacity=\x22.66\
667\x22 stroke=\x22#00\
0\x22 stroke-lineca\
p=\x22round\x22 stroke\
-width=\x221px\x22 fil\
l=\x22#00f\x22/>\x0a <g \
stroke-linejoin=\
\x22round\x22 stroke=\x22\
#000\x22 stroke-lin\
ecap=\x22round\x22 str\
oke-width=\x221px\x22 \
fill=\x22none\x22>\x0a \
<path id=\x22path44\
32\x22 d=\x22m1444-563\
.55v12.627\x22/>\x0a \
<path id=\x22path4\
434\x22 d=\x22m1437.7-\
543.35 6.3134-7.\
5762\x22/>\x0a <path\
id=\x22path4438\x22 d\
=\x22m1480.1-559.26\
13.637 3.0304v1\
2.879l-13.637-3.\
7881z\x22/>\x0a <pat\
h id=\x22path4440\x22 \
d=\x22m1493.7-556.4\
8 6.3134-7.0711-\
11.112-2.7779-8.\
5863 6.8186\x22/>\x0a \
<path id=\x22path\
4444\x22 d=\x22m1500-5\
63.55v12.627\x22/>\x0a\
</g>\x0a <path i\
d=\x22path4480\x22 fil\
l-opacity=\x22.6666\
7\x22 fill=\x22#00f\x22 d\
=\x22m1494.2-550.46\
0.001-5.8666 2.\
6521-2.9705c1.45\
87-1.6338 2.6787\
-2.9981 2.7112-3\
.0319 0.035-0.03\
68 0.059 2.1709 \
0.059 5.5859v5.6\
472l-2.7111 3.25\
14-2.7111 3.2514\
v-5.8668z\x22/>\x0a <\
g stroke-width=\x22\
1px\x22 fill=\x22none\x22\
>\x0a <g stroke-l\
inejoin=\x22round\x22 \
stroke=\x22#000\x22 st\
roke-linecap=\x22ro\
und\x22>\x0a <path \
id=\x22path4446\x22 d=\
\x22m1493.7-543.35 \
6.3134-7.5762\x22/>\
\x0a <path id=\x22p\
ath4482\x22 d=\x22m139\
4.1-559.26 13.63\
7 3.0304v12.879l\
-13.637-3.7881z\x22\
/>\x0a <path id=\
\x22path4484\x22 d=\x22m1\
407.7-556.48 6.3\
134-7.0711-11.11\
2-2.7779-8.5863 \
6.8186\x22/>\x0a <p\
ath id=\x22path4488\
\x22 d=\x22m1414-563.5\
5v12.627\x22/>\x0a \
<path id=\x22path44\
90\x22 d=\x22m1407.7-5\
43.35 6.3134-7.5\
762\x22/>\x0a </g>\x0a \
<path id=\x22path\
4508\x22 stroke-lin\
ejoin=\x22round\x22 d=\
\x22m1460.9-566.33 \
0.2525 12.374-9.\
0914 6.8185\x22 str\
oke=\x22#505050\x22 st\
roke-linecap=\x22ro\
und\x22/>\x0a <path \
id=\x22path4510\x22 d=\
\x22m1461.2-553.95 \
11.112 3.283\x22 st\
roke=\x22#505050\x22/>\
\x0a <g stroke-li\
nejoin=\x22round\x22 s\
troke=\x22#000\x22 str\
oke-linecap=\x22rou\
nd\x22>\x0a <path i\
d=\x22path4512\x22 d=\x22\
m1452.1-559.26 1\
3.637 3.0304v12.\
879l-13.637-3.78\
81z\x22/>\x0a <path\
id=\x22path4514\x22 d\
=\x22m1465.7-556.48\
6.3134-7.0711-1\
1.112-2.7779-8.5\
863 6.8186\x22/>\x0a \
<path id=\x22path\
4516\x22 d=\x22m1472-5\
63.55v12.627\x22/>\x0a\
<path id=\x22pa\
th4518\x22 d=\x22m1465\
.7-543.35 6.3134\
-7.5762\x22/>\x0a </\
g>\x0a <path id=\x22\
path4524\x22 stroke\
-linejoin=\x22round\
\x22 d=\x22m1516.9-566\
.33 0.2525 12.37\
4-9.0914 6.8185\x22\
stroke=\x22#505050\
\x22 stroke-linecap\
=\x22round\x22/>\x0a <p\
ath id=\x22path4526\
\x22 d=\x22m1517.2-553\
.95 11.112 3.283\
\x22 stroke=\x22#50505\
0\x22/>\x0a <g strok\
e-linejoin=\x22roun\
d\x22 stroke=\x22#000\x22\
stroke-linecap=\
\x22round\x22>\x0a <pa\
th id=\x22path4528\x22\
d=\x22m1508.1-559.\
26 13.637 3.0304\
v12.879l-13.637-\
3.7881z\x22/>\x0a <\
path id=\x22path453\
0\x22 d=\x22m1521.7-55\
6.48 6.3134-7.07\
11-11.112-2.7779\
-8.5863 6.8186\x22/\
>\x0a <path id=\x22\
path4532\x22 d=\x22m15\
28-563.55v12.627\
\x22/>\x0a <path id\
=\x22path4534\x22 d=\x22m\
1521.7-543.35 6.\
3134-7.5762\x22/>\x0a \
</g>\x0a </g>\x0a <\
/g>\x0a</svg>\x0a\
\x00\x00NL\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22 standalone=\x22\
no\x22?>\x0a<!-- Creat\
ed with Inkscape\
(http://www.ink\
scape.org/) -->\x0a\
<svg id=\x22svg2\x22 x\
mlns:rdf=\x22http:/\
/www.w3.org/1999\
/02/22-rdf-synta\
x-ns#\x22 xmlns=\x22ht\
tp://www.w3.org/\
2000/svg\x22 height\
=\x2223.994\x22 width=\
\x2220.961\x22 version\
=\x221.1\x22 xmlns:cc=\
\x22http://creative\
commons.org/ns#\x22\
xmlns:xlink=\x22ht\
tp://www.w3.org/\
1999/xlink\x22 view\
Box=\x220 0 20.9607\
82 23.993591\x22 xm\
lns:dc=\x22http://p\
url.org/dc/eleme\
nts/1.1/\x22>\x0a <met\
adata id=\x22metada\
ta7\x22>\x0a <rdf:RDF\
>\x0a <cc:Work rd\
f:about=\x22\x22>\x0a \
<dc:format>image\
/svg+xml</dc:for\
mat>\x0a <dc:typ\
e rdf:resource=\x22\
http://purl.org/\
dc/dcmitype/Stil\
lImage\x22/>\x0a <d\
c:title/>\x0a </c\
c:Work>\x0a </rdf:\
RDF>\x0a </metadata\
>\x0a <g id=\x22layer1\
\x22 transform=\x22tra\
nslate(-1423.6 5\
66.84)\x22>\x0a <g fi\
ll-opacity=\x22.666\
67\x22 fill=\x22#00f\x22>\
\x0a <path id=\x22pa\
th4536\x22 d=\x22m1508\
.3-553.29c0-4.34\
31 0.011-5.4243 \
0.06-5.4243 0.06\
1 0 8.3419 1.813\
2 8.3559 1.8297 \
0 0 0.024 0.6204\
3 0.042 1.3673l0\
.034 1.3579-4.24\
58 3.1469-4.2459\
3.1469v-5.4243z\
\x22/>\x0a <path id=\
\x22path4538\x22 d=\x22m1\
512.8-558.64c-2.\
1794-0.46713-3.9\
952-0.85613-4.03\
5-0.86455-0.039-\
0.0107 1.7294-1.\
3829 3.9314-3.05\
46 2.2022-1.6716\
4.0119-3.0316 4\
.0216-3.0222 0.0\
21 0.0192 0.1691\
6.3787 0.1694 7\
.2411 0.0002 0.4\
1889-0.014 0.558\
47-0.062 0.55464\
-0.035 0-1.8456-\
0.38731-4.0253-0\
.85443z\x22/>\x0a <p\
ath id=\x22path4520\
\x22 d=\x22m1459.4-545\
.43c-3.3262-0.92\
323-6.1778-1.713\
3-6.3368-1.7557-\
0.2781-0.0744-0.\
1209-0.20279 4.0\
151-3.2887 2.367\
6-1.7665 4.3701-\
3.2059 4.4502-3.\
1985 0.08 0.0108\
1.0334 0.27542 \
2.1188 0.59587 1\
.0854 0.32057 1.\
9889 0.58273 2.0\
078 0.58273 0.01\
9 0 0.035 1.9699\
0.035 4.3775 0 \
3.4411-0.026 4.3\
763-0.1209 4.371\
5-0.066 0-2.8424\
-0.76127-6.1685-\
1.6846z\x22/>\x0a <p\
ath id=\x22path4522\
\x22 d=\x22m1465.9-548\
.09c0-2.1735 0.0\
2-3.9518 0.047-3\
.9518 0.1061 0 5\
.237 1.5041 5.28\
26 1.5487 0.035 \
0.035-4.495 5.44\
52-5.242 6.2593-\
0.048 0.053-0.08\
8-1.6695-0.088-3\
.8561z\x22/>\x0a <pa\
th id=\x22path4498\x22\
d=\x22m1405.3-553.\
8-1.7803-0.5329v\
-1.1787c0-1.1617\
0-1.178 0.2054-\
1.1252 0.1128 0.\
0299 0.946 0.215\
88 1.8516 0.4139\
1l1.6461 0.36029\
v1.3063c0 0.7184\
6-0.032 1.3024-0\
.072 1.2978-0.03\
9 0-0.8723-0.248\
32-1.8516-0.5415\
2z\x22/>\x0a <path i\
d=\x22path4496\x22 d=\x22\
m1410.8-552.18-2\
.6811-0.78534v-1\
.6593-1.6593l2.6\
172-2.8863c1.439\
4-1.5875 2.6774-\
2.9468 2.7509-3.\
0206 0.1162-0.11\
664 0.1338 0.583\
79 0.1338 5.3399\
0 3.0108 0.1003\
5.5024 0.061 5.\
4976-0.039 0-1.4\
08-0.39464-2.882\
4-0.82659z\x22/>\x0a \
<path id=\x22path4\
494\x22 d=\x22m1405.3-\
557.14-1.7147-0.\
39383-0.058-2.22\
64c-0.096-3.7432\
-0.2183-5.9319-0\
.1147-5.9319 0.1\
35 0 9.4995 2.25\
19 9.7031 2.3367\
0.1425 0.0594-0\
.2973 0.59569-2.\
7252 3.3251-2.31\
19 2.599-2.9432 \
3.2574-3.1359 3.\
2692-0.1326 0.01\
04-1.0128-0.1622\
5-1.956-0.37882z\
\x22/>\x0a </g>\x0a <g \
fill=\x22none\x22>\x0a \
<g stroke=\x22#5050\
50\x22 stroke-width\
=\x221px\x22>\x0a <pat\
h id=\x22path4486\x22 \
stroke-linejoin=\
\x22round\x22 d=\x22m1402\
.9-566.33 0.2525\
12.374-9.0914 6\
.8185\x22 stroke-li\
necap=\x22round\x22/>\x0a\
<path id=\x22pa\
th4492\x22 d=\x22m1403\
.2-553.95 11.112\
3.283\x22/>\x0a <p\
ath id=\x22path4448\
\x22 d=\x22m1489.2-553\
.95 10.859 3.030\
4\x22/>\x0a <path i\
d=\x22path4442\x22 str\
oke-linejoin=\x22ro\
und\x22 d=\x22m1488.9-\
566.33 0.2525 12\
.374-9.0914 6.81\
85\x22 stroke-linec\
ap=\x22round\x22/>\x0a \
<path id=\x22path4\
430\x22 stroke-line\
join=\x22round\x22 d=\x22\
m1432.9-566.33 0\
.2525 12.374-9.0\
914 6.8185\x22 stro\
ke-linecap=\x22roun\
d\x22/>\x0a <path i\
d=\x22path4436\x22 d=\x22\
m1433.2-553.95 1\
1.112 3.283\x22/>\x0a \
<path id=\x22pat\
h4418\x22 stroke-li\
nejoin=\x22round\x22 d\
=\x22m1374.9-566.33\
0.2525 12.374-9\
.0914 6.8185\x22 st\
roke-linecap=\x22ro\
und\x22/>\x0a <path\
id=\x22path4424\x22 d\
=\x22m1375.2-553.95\
11.112 3.283\x22/>\
\x0a <path id=\x22p\
ath4375\x22 stroke-\
linejoin=\x22round\x22\
d=\x22m1346.9-566.\
33 0.2525 12.374\
-9.0914 6.8185\x22 \
stroke-linecap=\x22\
round\x22/>\x0a <pa\
th id=\x22path4381\x22\
d=\x22m1347.2-553.\
95 11.112 3.283\x22\
/>\x0a </g>\x0a <g\
stroke=\x22#000\x22>\x0a\
<g stroke-wi\
dth=\x2210\x22>\x0a <\
path id=\x22path425\
6\x22 stroke-linejo\
in=\x22round\x22 d=\x22m1\
732.1-507.64 138\
.56-80 138.56 80\
-138.56 80-138.5\
6-80v160\x22 stroke\
-linecap=\x22round\x22\
/>\x0a <g>\x0a \
<path id=\x22path\
4258\x22 stroke-lin\
ejoin=\x22round\x22 d=\
\x22m1732.1-347.64 \
138.56 80v-160\x22/\
>\x0a <path id\
=\x22path4260\x22 d=\x22m\
1870.6-267.64 13\
8.56-80v-160\x22/>\x0a\
<path id=\x22\
path4262\x22 stroke\
-linejoin=\x22round\
\x22 d=\x22m1870.6-587\
.64v160l-138.56 \
80\x22/>\x0a <pat\
h id=\x22path4264\x22 \
d=\x22m1870.6-427.6\
4 138.56 80\x22/>\x0a \
</g>\x0a <g\
stroke-linejoin\
=\x22round\x22 stroke-\
linecap=\x22round\x22>\
\x0a <path id=\
\x22path4266\x22 d=\x22m1\
620-887.64-80 80\
80 80 80-80z\x22/>\
\x0a <path id=\
\x22path4270\x22 d=\x22m1\
620-727.64v80l80\
-80v-80\x22/>\x0a \
<path id=\x22path4\
272\x22 d=\x22m1620-64\
7.64-80-80v-80\x22/\
>\x0a <path id\
=\x22path4274\x22 d=\x22m\
1620-887.64v80l-\
80 80\x22/>\x0a <\
path id=\x22path427\
6\x22 d=\x22m1620-807.\
64 80 80\x22/>\x0a \
<path id=\x22path\
4280\x22 d=\x22m2252.9\
-967.64-164.85 6\
0v160l164.85-60z\
\x22/>\x0a <path \
id=\x22path4282\x22 d=\
\x22m2088.1-907.64-\
164.85-60 164.85\
-60 164.85 60\x22/>\
\x0a <path id=\
\x22path4284\x22 d=\x22m1\
923.2-967.64v160\
l164.85 60\x22/>\x0a \
<path id=\x22pa\
th4286\x22 d=\x22m2088\
.1-1027.6v160l-1\
64.85 60\x22/>\x0a \
<path id=\x22path\
4288\x22 d=\x22m2088.1\
-867.64 164.85 6\
0\x22/>\x0a </g>\x0a \
</g>\x0a <g s\
troke-width=\x221px\
\x22>\x0a <path id\
=\x22path4320\x22 d=\x22m\
1188.1-341.67 15\
3.31-71.488 153.\
31 88.512-153.31\
71.488-153.31-8\
8.512v160l153.31\
88.512v-160\x22/>\x0a\
<path id=\x22p\
ath4322\x22 d=\x22m134\
1.4-93.16 153.31\
-71.488v-160\x22/>\x0a\
<path id=\x22p\
ath4324\x22 d=\x22m134\
1.4-413.16v160l-\
153.31 71.488\x22/>\
\x0a <path id=\x22\
path4326\x22 d=\x22m13\
41.4-253.16 153.\
31 88.512\x22/>\x0a \
</g>\x0a </g>\x0a \
</g>\x0a <image id\
=\x22image4336\x22 sty\
le=\x22image-render\
ing:optimizeSpee\
d\x22 xlink:href=\x22d\
ata:image/png;ba\
se64,iVBORw0KGgo\
AAAANSUhEUgAAAR8\
AAAApCAYAAAAMAy1\
JAAAABHNCSVQICAg\
IfAhkiAAAHihJREF\
U eJztnXd4VFX6xz\
/TazKTmUkjjYTQQi\
gK2ClW1r5Y17JiXc\
ta1rauZXUta/np6u\
4qrgXdta0gCwIW m\
oAFkCIlQAgQEkhIS\
G/T59bfHxMiJYHAD\
PCH83mePDyZO+feb\
055zznv+96D5sUXX\
lBJkCBBgmOM PiMz\
43hrSJAgwS8QfUF+\
/vHWkCBBgl8gerfb\
fbw1JEiQ4BeIPjk5\
+XhrSJAgwS8QvdVq\
Pd4aEiRI 8AtEbzA\
YYrqBJMv886tvmL1\
6PXkeN4NzMhmcnUm\
/jHRy3Ck4bfEzbk2\
NDWTn5BGJhON2z4r\
6Rn4o KyfP4yLLnU\
KG00Gy1RK3+x8uNS\
1t1DS3kuZIJt2ZjM\
1sOm5aAFZsreC7zV\
vJ87jJT/eQkeIkzZ\
GM xRhbv9mfo9G2K\
7ZW8NXaDeSneRiQm\
UGK3UZuqoskizluz\
zhcPceiLnvLko1lz\
Fu/iVy3m34ZqfRx \
p5DuSMKTlIROp43b\
c3pqW30sN21o7+Dx\
T2aiNxi5aPSJXDv+\
NEqrayitrmXBhi3s\
ampBr9WS5XIw sE8\
mRTmZFGak4U62Yzc\
fnw6wB1lR+GL1el6\
fu4QBWRnsbG6juqm\
FhvYOjFodGc5kclP\
dFGamMiAz g3RnMk\
kWM1aTEY1GE3c9ki\
zzxU8lPDZjLq06Ey\
ZFwoaMx2KhwJNCQa\
qb/DQP+Rmp5HlcXV\
pMMU4e PREWRN5d+\
D1fr9vIOSOKaQ8Lz\
FhVQl1rG80dPmwWE\
+nJyWS7nfTPTKMgP\
Y2MFAcOq+W4G8w92\
qf/ +BN1QZG+A4pp\
W7mVkK8dgypi0enI\
djkYmp1JujOJQdl9\
KEhPxWYy4kqyHzU9\
05avxm4x8+uTRx7X\
uvSHwrz2xULeXLy\
UkXnZjOxfyJqqWma\
v2UBdazthQcRtt5H\
hTCY/3UNBeir5aam\
4kqykOZLj1v81 3o\
72I8rzWVOxk8c++Z\
y7LjyX80cNZ+XW7V\
wwasQ+31EUhYZ2L9\
VNLWyp2c2mqho2Vd\
ews6GRmX+8 i2y36\
7CeGa/ZMRgReGrqb\
NpCYZ6+5jLKanYz8\
dRRQNQoNXt91La0U\
dXYzLbaOrbW1rO7t\
Q1vMEiS 2cS7d04i\
xW6LScPedASCPPHp\
LL7aUU9y0QhUgxFR\
lohIIuFQkEgwgBAK\
QCgE4SAGUcBj0JJm\
MnJy YV9evP5y9Dp\
d3PTUtXbw8IfT6Ne\
nD5eeciIfLV7KlHt\
v67ouShIN7V7q2tq\
pbmphW20dFXWNVNQ\
3 0Njezv8e/j3Z7p\
TDema82nZv7b8983\
Rufm8mv7rsRvxCGH\
8kTHvAR3NbC20drb\
S3t+JrbyXkbUcJ B\
UjXKsz78wP0TfPEp\
KEnPTedO5bZK9bwx\
NW/7rreU13uaGikv\
q3tiOryYGytrePu9\
z5lh2LE1ieX k1Qf\
Ux++q+u6qqp4Q2Ea\
2tqpa22nor6R7XUN\
lO2qZXFJKQufeojB\
OZmH9cy4rXwkWeY/\
S5Yxa3UJ 7933OwZ\
lZ7K9roF/f/P9AcZ\
Hq9WS6XKS6XJy8sB\
+hEWR370+hRVbymn\
2+g/b+MSDDVU1PPL\
RdK4/ cwy3TTiTnY\
3NfLxkaZfx0Wm1pD\
sdpDsdnNivLxBtEE\
VV+dvnX/On/0zFGw\
zFxfioqsqysu384e\
OZ mPOHMPysUUiKg\
ihLhCWRkCgQNJoIW\
mz4hTBhUUAFRFWlT\
lWpKy8jsKGMsCBit\
8RufFRVZWFJKS/N \
msdT11zOBaOG09Th\
46oxp+zzPYNeT7bH\
RbbHxej+BQAEIxGu\
eXky35aVE4xEYtYS\
D+07G5tRFBkV FUV\
RkBUFtFqMNjsWvQ7\
JZkdxpyKHAgRKS9j\
d2kxEFI+anvZAsKt\
P7aG7utwzTlaVV8a\
tLkVJ4sNv f+SvXy\
yi3+ixjMzuS3tzA0\
pzxz7f02g0OKwWHF\
YLA7IyGTd0MCFB4J\
Z/vItOp0NR45eTfF\
jGp8Xn 56lpszGbz\
Mx54gEcnf6cbLeLJ\
66+9KBlJVnmmU8/R\
6PRMPHUUSjKsU2sF\
iWZD75bzherS3j99\
hsZ UZCHRqOhj8vJ\
09ddfsjyU+Z/y+wV\
azhjyEB0cVhlBMIR\
XvtiITM2VTLirIkk\
pbiRFIXIHqMjRNBr\
tWjQoKogKTKSLCM\
qMqgqmqoKrikqYFv\
N7rjsz/2hMK/MmU9\
FYwv/e/Q+cjxuNBo\
NIUFg5dbtnHfC 0B\
7LCpLE/VM+YVGDF6\
PLg/kobQV7oiftBp\
0OURJoDwWjdSpGCI\
kiYUkkIklEOleXkc\
ptYDSiSXbE RXtPe\
lp8fj5asvSASXpv9\
h4n44cOjoueXc2tP\
PrJTMp8ChOuugWrL\
QlZUdAGA8iHGIeiJ\
PHYh5+R 5kxmzJBB\
cfVP9brXbqmp49rX\
3mFMcRGT75jUZXgA\
OoJBnp8+56Dl/zV3\
Eesqd/LGHTdiMuhR\
VOXI VR8m7YEgt//\
rA7bXNzPjsT9wQr+\
+XfvWVp+fpz/9/KD\
lpy9dyZQFS/j0j3e\
TZDGjjXHPu72ugct\
f eYvvWhUuuPwmBv\
YtJDPZidtqJ8lkxm\
owYtIbMOj06LVadF\
oNWo3m5712TTUT0l\
N4+66bURWVWHfg O\
xqa+M1r7+BxOpn68\
O/JTfV0PUtWFBrbv\
Qct/+L0L/isdAcpQ\
4Zj0GrRauPnrDwUB\
9Nu0OuQJIm2 kJ+O\
UABvOIxfCBMQIp2G\
SCCwcztSOAx5BaAo\
MWs/mJ5st4tnrrvi\
oOX3HicaNDHrmbd2\
Ixe+NBmv u4ALJ15\
PQUYO6UkOnBYrFqP\
pkCuZl2Z8SX1bBy9\
MuhpJljEa4re979X\
K59MfVvLCzK/55KH\
fc+qg wgMcTlaTiZ\
P69+u2rKqqzFi+mk\
+/W86XTz7UOXi10S\
XwMeCbklIe/vB/PH\
z5Rdx23vgDGtNmNj\
N+ 6OBuy6qqytw1J\
Tw7bRZznniA3FQ3i\
qKi0x75cH/9q0W8N\
u87Rp03kbzcAvR6A\
7KiIMkygiwiyNLP \
P5K0z++SLENdDeOS\
jEz7493YzNHOo9Uc\
eQed+sNKnpo2hzfu\
mMTEU0cd0LYZKQ6u\
HX9at2VVVeX9 hd/\
x2pKV5Jx2Jl5RQK8\
BXQx6DoepP6zkrzO\
/Zso9t3H28KIDtBt\
0OmRZoiXgR5B+3so\
GhAhBIYJ3 1w4iLY\
0wsBi0WrSoxDKvHE\
pPU4eXh97/L7OfeO\
CAst2NE1mRY6rLv0\
ydzVvfruD0868gNy\
cfWVUJ ChFkVSEkC\
kRkCXoYh4qq8u68x\
Xy/aQszH/8DFqMRW\
ZFi6mv70yvj8/7ip\
eSmenjwvU9w2q0U5\
WQx un8BwwvyyHA6\
MBkM6Hqw0Es2bOb5\
z2Yz64n7uyIJOq32\
kMu9ePHCrPnoUjL4\
5/ylvL9oGcPy+lCc\
m80pAwvJTXVhNBi\
IiFL32jeW8acPpvH\
ZI/eSn5EGRFcCPf2\
tveG/P6ykKCudHas\
Ws2PDKqwON660 Pt\
hSXGAwIABBITpA9j\
hJ/ZEIQUFAaaxjuB\
ph+p8eIdlqQZQkdD\
ot2hiM4b+XLOO0wf\
35++x5vDpr LoV90\
hmWl8PJAwvpm55KW\
BB4/rM5fPHkgweUn\
b1iDY//by5Dx1+AX\
1VQhAh6QKeLfzSwO\
95bvIy8 VA9//ng6\
f5t1YL/U6bQoskRL\
wIeoyFEHvthpgBp2\
E66pgsHF0LmNjrbq\
kWs/lB6r2cRZw4q6\
Ldvd OJFkJaa6/Gr\
tRiwpHtYu/Yb1Gg3\
JDidOTzrJ7jRMSQ6\
8vg4c3RgfVVX539K\
VfLhkKV/8+cGuyLQ\
k xzbx7k+vjI8n2c\
7kO28mI8XB1to6dj\
Y08X3pVj75djnVTS\
24k2xs211PpsvJ6P\
4F9E3zoNVq2VS1 i\
/unfMz79/2O3NSfI\
whareaY+XzsVgtDz\
7iEVJcHv99LY1M90\
yqq+McPMxB8baSZd\
TS0dWDU6xia l8Po\
Af2wmoysrdjJA1M+\
5r17bmVQ9s/efUWV\
Y9p2ZXtcvH33LXiS\
k9jR0ET57npWbatg\
0+YyNuxu wqtoMDi\
caOxJSFYbIZ2WiCS\
htDZTGGhnxlMPkup\
IAkCQojNjLHpcSTZ\
evfV6stwp1Le1U1H\
XyLrK Kt6Zv5httX\
WERYmwIPDPL+ZTnJ\
fDKQP7YTWZ+HFLOf\
f+ZzojzroI2WzG6/\
chKQp6rSYm43w4eJ\
Jt vHnXLaQ7k7vvl\
8l2mtvaCFZuQ5OUj\
GQwEJEkwm0tSJXlM\
LAI9D/7MHQQ08rnU\
HpSHUnkpXmY9sOK \
Xo0TJcaJzmw04O4/\
FIcnDVUUiPh9tLa1\
UL1pLSFfO3I4jEsH\
z02bxajCAk7ol0ea\
I5klG8t4acaX fPb\
IPfukHsQ68e5Pr4y\
P2WAkJAiYDAaG9c1\
lWN9cLjl5JGrnfrG\
qqZlPvl3Ouooqpiz\
4ljafH48j mZrmVt\
6444YDPPw6rRb5GP\
l8zAYjvlAAU8SOxm\
DEmZGFyZ1K5qBhBI\
Qw7X4v2oqt/GV5Kf\
4vlyD7 feQ77IiRC\
B/84XZG9S/YZ/ksS\
rE1gN1sxBsMkZfmo\
Tgvm+K8bH59ysiu6\
6u2VfDER9MZ0z+DF\
Vu3 U9Hcyg5/iAwd\
zHrqIfp1rsCiWiT0\
uth8LCa9gbAgotNq\
yXK7yHK7GFs8qKtt\
a1vbuPrF19FqNHy4\
6Ace/WBq9PN2P6M\
nXIHJ6aLJ70VSZAR\
ZwoJ6zIzPHu099cu\
djc2c9qfnaGyoRan\
YApIYNTaiCP0H g3\
nfZFK9RhPTNudQej\
ZV1XDr6++S7nT0ap\
zIshxTXVpNBnb5Og\
gajWi1WrQGA2paBq\
bUNHSyQtjX gat2O\
31cKUxftpK/fjYLX\
yhMY4eXuX95mH6Z6\
fvqUWLTsz+9Mz56P\
WHhwBDknkGZl+phy\
YbNfP74 /djNJjoC\
QaqbWrj7rQ8YWzz4\
gL2vTquNa8juYFgN\
OrzBAIZwEA0gKQqC\
LBESBUKiiCDLtO4s\
x3Xy WKSMPvjDIcq\
FCIW7K7siYnujKGp\
M0aUkixlfKLTPZ3s\
/w5VkpyAznSevmYi\
qqvjDYT5Y9AMN7V6\
K crL2KSfIMvoYO4\
NJr+82vLxHk9Nq5d\
RB/bn7ovMAiIgiNc\
2tjHtuMpl9cmnu3N\
IIUjRylHQMVz6H 0\
t43zYPNZkXJygWtD\
mQZwkHYWQndvFZk0\
Gpi2sIeSs/gnD68e\
/etDMvPRVXVQ44TK\
caVRpLZTHvA h89o\
7FrRqYCqgqIqKOEg\
Q+02bj53HDefOw5J\
lpm/diMzlq9iWN/c\
A+4nyfFd+fTqTmaT\
oVvjsweN RsNN54z\
FoNOi0Whw2m30y0x\
HRY3+pfs/VKNBOUY\
OZ7NBjzcUoCMUpD0\
cxBsO4QuHCESiEY+\
IImPN LSDS6dyVUc\
FoRNVoovr3Q1aUmJ\
xuTpuVNn+wx+t9XC\
ncePYYIFqvSRYLGS\
kONBoO6JyiJGHQx5\
Sk jtGgI3yQ3BaTw\
cAJ/fK6Zm+TwUBGi\
hONGt0WCLIU9aNIA\
hFRxKiNbRsYT+0aj\
QazXg+KGt1P6fVg \
skSdrN1oNMRoOA+l\
JxQRuPutD1BVtVfj\
RFbUmOrSabOiESVk\
SUaUZARJRpRlJEXu\
mvylvcahXqfD nWx\
Hs3dkdS+iwZZjvu0\
yEBKEHq+rqsqGHdW\
cUTSQvM7sUL1Oh6y\
o3dVpdOVzjHw+NqM\
RXzCANhzs 0irK0S\
1CRBLxNjfgq6qEgA\
8xyQGyBBZrdLB1I1\
FRlJg6hNtuo9nbc+\
j6m/WbuOftD1j47K\
N8uXod Zw8fgqrS7\
YwqSLGHPs2Gg08s2\
3bX8fxncxhVWMCm6\
l2MKRqIzRyNxATFa\
NQo+q+ApMgYNZq4R\
kRi 0T5j2SoQBfrW\
VFAdEVFsydEVj6pE\
B/t+7WjUamPzsRxC\
z6INm9lc30TRfU8y\
IieDkwYUMKxvDpKs\
RKOW+31fluXYJjq\
rFWNzBI2qRUJF0qh\
0DTtVgd276NAqnP3\
480y+80Y2VdUgKwq\
iJHcZyH30HI9o l9\
1kwh/uOe39rbmLWL\
ShlMtOG80zUz9nfP\
FgzhkxBFGWkVUF3X\
7VqtUeu1C7zWTE1x\
GAUNT4KKqK rMiIs\
oS/uZGwEIHcvghaL\
WrAB63NkJFFY0srP\
2zeistuIyPFSU6qG\
61GgxSj8XHZ7TR7/\
Qd8Lsky X/20nix3\
Cgue/RM5qS7SnQ7q\
2tpZV1nFV6tLmHTW\
WGat+InLTxuNx5FM\
RyCIQRub8bEajd1m\
0Sqq ii8Y4r0F3/H\
qrddhM5vYsGMX6Q4\
Hb89bjChL1NbspK6\
lkaDNRjAcRFXVaKg\
9jhGRI9EuSjJba3f\
j DYWZ9/Qj2C0map\
vbWL29khVbK/h3eR\
n27Ztp1xpQ7ElgTw\
KTCZNOG9M2tkc9ss\
yU+Ut4ZcFyTr/k O\
kSNhorWJn7cUI1v8\
Wr8NdWc/tjzjM7P5\
dRBhZzYL59Ml7Nz2\
3XkdWk1GTCqoNdqk\
VQ1+oOKJIlQ Vc69\
pwzniasuJSyIWExG\
vlq9nqYOH/PXbuC6\
V97kud9eybrKKsYU\
DSDJYolGu+IYyeyV\
8bGZTfhD BxofXyj\
Mkg2byU9P5ZvnHou\
mZdushAWReWs2sKu\
phdKqGuasWst1406\
jICMNbWfS3LFKcLa\
ajfjq W1H2WvlIik\
Kovhb/rh3oi4YhqG\
p0pWOxgjsVVBW73U\
7/zAy+3biZOSvXMn\
pAP/719TfodbHNju\
4k G+urdx/w+aKSU\
j797kfev+82rKboy\
4TXn3k6ABajgcb2D\
jJdTgoy0ghEBN6eN\
oulm7cxIDPtgHsd \
DlazkUD4wAHz1tff\
sLayinfuvqXL2O7J\
BB/VP58+dz6OX4jQ\
VFeD7E4lUroeXB6M\
el1c34g+Eu2v zZ5\
LVWMzb9wx6Wfflc3\
GkLxsJp01hp/KK5n\
x6L1U1jexalsFK8s\
rWbmjHLtRj0F/5Ma\
8Oz0RUeTx j6Yzb3\
sDZ158LWFVxRcJYU\
vx4LHaMGbl0NHSyE\
ZnJquqm5lcWoku4C\
fHpCPNYoqpLi1GIy\
aNBovO gKgoiKpCO\
BJEW1nGC5dN4J6LJ\
+wzkT448QI2Ve2if\
Hc9z99wFSaDnnUVO\
7GZjMxYvhqbyYg+x\
slu b3q38rGY8O1n\
fMKCyAvT55Cb6ub2\
X53V1ciDsvsAMDw/\
l8lfLyTL42JQdh8C\
EYFrXp5MtseFQa8/\
Zisfu8lMIBhEDAU\
AUEWRSHkZ5PRFGVh\
MpDsdGg0GvY5URxK\
3n3929N0uJZpwWJy\
VEVOH8CTbafb6 up\
a1HYEg973zEQ9ddi\
H/feiubiNXJoMBRV\
VJsdv4zdhTAchLdX\
Pxs3/jwUvOO2ItEK\
0f/14DpiMQ ZPrSl\
Zw0oJAbzxnbbdaL2\
WhEVmQUqw3bgCLqf\
R2oQ4ZDxVauOX/sM\
XM476+9xefnb59/z\
e8vPCfq l+pmhRr9\
TCUjxUleWipnDitC\
VhTun/IxBamumE4J\
6E7PTf94lyaTm3EX\
XElIFBCEMIIsISqd\
/hdZ BkA2GMHpAqc\
LWVHYubWUO8afGqM\
PSo8ZcBgMCIqCt70\
Vw+5y3rvnJiacOLT\
b+tHrdGg00Dc9FYB\
n r7+CsCDyzy8W8M\
hlF8TkkD/gWb35kt\
1kwruX8VlcUsr733\
zP5DsmkWy19NjIih\
qN7lzd+WLif+6/ n\
ZrmVm549S0uHdnzu\
0LxxGo2EhHCCJEwh\
EOooghJyWAwHDSfb\
G9flUajYVHJJupa2\
nj5+sti0uOy 22jx\
RbddtS2tNHX4OGt4\
EYOyM3sMmZv2S4SU\
FYVnps3i8lNOpHC/\
cOjhEp2to23rDQZ5\
f+F3JNss nNAv76A\
dX1EUvJ0OfEESoaO\
Ns9NdTDqz+2zoo8H\
e2ps7fHy3qYzivGw\
yUpwH166yT7R10fp\
SKnY3 8PSVF8VNz5\
Zdu7npjfcw5hUzaP\
AwfJHwXu+XCYRFgb\
AkEpYkFFVFUeSfb9\
TcyBlpDn533riY9J\
j1 BkwaSDEZqavbh\
au9jk+fuI8hudk9H\
ouh1+mQ5H0n5LfmL\
qJvqovxQwbGpGd/e\
mVW7WYz/nAYSZaZ \
vWINKvDipKtx2KwH\
PdtDVdSuKAlEO+z9\
Uz7mzgnjKM7N6rFc\
PLGbTCDLqMEA6rbN\
0YhHWsYhs8mU vbS\
3eH08N20Wz11zacx\
HV1hNJgLhaC7Fta+\
8SUQUueGsMQe9r9l\
g2Mfh/OWqdVQ3NHL\
TmWfEpAX2 +PMiLC\
4p5dqX3+TWCeO5+Z\
xxh5xxZUWhIxTCHw\
mjiiKepjpe/u1lcT\
3a41Ds0b6+soorXv\
wHpw7u z7XjTjukd\
vUote0ePd+XbuGSl\
yajLTwBd35/2kPB6\
Ptl4SDecBhfJIy/6\
/2yCKrKz+MkEsbRW\
Mvf Jl0de18zG9Gq\
Er66KvI1AeY9+QDF\
eTkHHbN6rbZrNQZQ\
sqOamctX8dhlF8ak\
pTt6Z3ysZlp9fj5e\
soxlZds4o2gA2Z5\
DH4ehqErXCiIiitz\
79keMHVTIxJNPjEn\
04WAzm6GxHoQIDBk\
e9ev0AkVVO3Mi VB\
7/aDq/OX10zKsMAJ\
NBT21LWzQv6rE/cN\
KA7t+J27fMzyufut\
Z2np36OS9cf0VM/o\
k92MxmFqzb iDvZz\
jv33ILdbO7VYVGKI\
tMRDiIqMrrqSp6+5\
Ny41M/hYDObmbumB\
FlR+OiBO8lMcfaq3\
J5+Ge+2 tZnNzFrx\
ExP//j72E05Hl+Km\
JeCnJeinJRigLRjo\
fME1iC8cNdyBSISu\
sKqqoqncxpOXTmBw\
9uGd mdMdyVYL5du\
3cKLHzOeP3kdWL84\
F0ut0XeH3sCBw7zs\
f8vTVl+Kwxf90z14\
Zn2SzmY+XLKNfZhq\
P XnkJxl7mliidjl\
xFUXj6089JthhjXk\
oeLrmpLrLNBnTNjd\
DRDoLQbU7F/qid2u\
euKaGhrY2rzxgd F\
z1JZjNnDOrPyzO/4\
tmps5i3ZgPNHb6D5\
j2ZjdHktYgocv+Uj\
3ng4nPJcvVuoB2K3\
FQX22vreGvu YtZX\
VtHk9fUqAVRVVcKi\
AK3NXJDliVv9HA57\
tL/+5QI2Vu3qtXbl\
KLVtTmoK22rrUEwW\
WluaaG5v oTXopzX\
opy3opy0UoL0z58z\
baXxEWaJr/9/UwNl\
Zqfx23CkHfU5vOW1\
gIVecciKbqqq5950\
Pmb50 JXWt7Qdsq/\
ZGr4uufBRF4fnpcx\
hX1J9RhX3joueAZ/\
XmS0U5fXj2mon8fd\
bX1LV7yU9P49wRxZ\
w5 rIjcVHePM+WeQ\
7jeXfAt22vrmHzbt\
cfMGbmH4twsVrz4O\
GsqdrKgpJQFm7ezP\
RBBTXFBigeMxm63 \
YIqq0tjh5S//ncmH\
997Sa4N7KIwGPS9P\
upKOYIi1FTuZu3oN\
L0yfjTs5iQknDuOi\
0SeQ5U7Zp05N ne8\
kfbBoKRaDjotH93w\
ezOFSnJvF0ucfZU3\
FTr5etYbnP5tDutP\
B+SOHcf6oEfRxde+\
4RVVBiJDS UMv/PX\
V/3OrncLX/2Nm2h6\
NdVVUa2jvi3rZDc7\
NZ9tfHWLm9kvnrNr\
Fw2ToCRgtGTzo6dy\
qK0YAg yURkEUmW9\
00ji4Sx1e3itWcex\
Gw0xkVPZoqDKXfdi\
C8UZt2OahZvKOO12\
V+TbLVxzohiLjnpB\
Pr3 ydinjnRaHbKi\
sLysnO82ljHzj3cd\
lWOD4QiOUZVlhU27\
alm8sYylZdsJiSIj\
CvIYXzyYkYX55KX9\
fH5J0V1/5MGJFzD\
12+X89/7bYq7UeB2\
1uW13PfPXlfLNhs2\
saWzDa7ZCihvs9mg\
aPuAqXccZ/fO5 ZG\
QxV51+UkzP6w0V9Y\
0sXF/Kgg1lKKrKKQ\
ML+dXI4YwoyEWDhg\
G3P0R+upupD9wR10\
P5u2NP/Sza WIZGq\
2XMkIGcd8JQRhTkY\
elsQ9M1d6Ba7fzri\
glxqZ94t+3BtA+84\
yGG5+cxYdigo962K\
7ZWsLBk M0s2bWFj\
q5dIkiMa1bInRd+m\
V2TYsBad2cyrl5zN\
TWfF7sc7FNVNLSwo\
KWX++lJ8oQij+udz\
wagR jCrMR6vVMua\
RZ3HarLw66arDPjK\
1O3pq2yM+wxmiDuQ\
2f5BNu2r5fnM5JTt\
3gUbDqP75jB9axBU\
v /IOi7Ew+uOdWMl\
2Oo/ZHHCmyrFDf4e\
XHLeXMKyljaWUVjR\
oDkiMFTfkWLh81lH\
funHRMnaiKotDk 9\
bG6fCcLN26mor6Jv\
umpTP1+BTMf+T1ji\
wYcMy1718+iTVupb\
m5haF4uE04cxpUvv\
c7FI4fx7l3x qZ+j\
2bb7a//N/73BhSOH\
8c6dNxyzthUlmZqW\
Vr7ZsJn5JWWsramn\
VWdAcaTA9i38avgQ\
Prnvlphf lzkcFEW\
hxRdgbeVO5q/fTFl\
tHelOJ7NW/MRLN1z\
B7eeNj8tzjorx6Y6\
a5lbW7ajm+83l7Gh\
s4rlr Ju5zJEUsHI\
3/XmVvfKEwayurWL\
SxjK21dbwy6Wpyeu\
FYP5qEIiKlu2rwhs\
KMHzLwmJ4SuD9Hs3\
5+ aW3b1OHjp+07W\
VCyibLaOt687XoKM\
mJLGI2ViCiyedduV\
pXvYNKZp2OO05Gpx\
8z4JEiQIEFvOH7T \
aIIECX7RJIxPggQJ\
jgsJ45MgQYLjQsL4\
JEiQ4LiQMD4JEiQ4\
LiSMT4IECY4LCeOT\
IEGC40LC+CRI kOC\
4kDA+CRIkOC4kjE+\
CBAmOC/8P22cGy+B\
/iX0AAAAASUVORK5\
CYII= \x22 height=\x22\
41\x22 width=\x22287\x22 \
y=\x22-655.37\x22 x=\x221\
299.7\x22 preserveA\
spectRatio=\x22none\
\x22/>\x0a <g stroke=\
\x22#000\x22 stroke-wi\
dth=\x221px\x22 fill=\x22\
none\x22>\x0a <g>\x0a \
<path id=\x22path\
4339\x22 d=\x22m1268.2\
-591.83-25.238 2\
5.238\x22/>\x0a <pa\
th id=\x22path4347\x22\
d=\x22m1243-566.59\
31.547 8.453 25\
.238-25.238\x22/>\x0a \
<path id=\x22pat\
h4349\x22 d=\x22m1268.\
2-591.83 31.547 \
8.453\x22/>\x0a <pa\
th id=\x22path4353\x22\
d=\x22m1274.5-558.\
14v32l-31.547-8.\
453v-32\x22/>\x0a <\
path id=\x22path435\
5\x22 d=\x22m1274.5-52\
6.14 25.238-25.2\
38v-32\x22/>\x0a <p\
ath id=\x22path4357\
\x22 d=\x22m1268.2-591\
.83v32l-25.238 2\
5.238\x22/>\x0a <pa\
th id=\x22path4359\x22\
d=\x22m1268.2-559.\
83 31.547 8.453\x22\
/>\x0a <path id=\
\x22path4361\x22 d=\x22m1\
236.6-656.28-12.\
619 12.619v16l18\
.928 5.0718v-16l\
-18.928-5.0718\x22/\
>\x0a <path id=\x22\
path4363\x22 d=\x22m12\
43-622.59 12.619\
-12.619\x22/>\x0a <\
path id=\x22path436\
5\x22 d=\x22m1243-638.\
59 12.619-12.619\
v16\x22/>\x0a <path\
id=\x22path4367\x22 d\
=\x22m1255.6-651.21\
-18.928-5.0718v1\
6\x22/>\x0a <path i\
d=\x22path4369\x22 d=\x22\
m1224-627.66 12.\
619-12.619 18.92\
8 5.0718\x22/>\x0a <\
/g>\x0a <g stroke\
-linejoin=\x22round\
\x22 stroke-linecap\
=\x22round\x22>\x0a <p\
ath id=\x22path4371\
\x22 d=\x22m1338.1-559\
.26 13.637 3.030\
4v12.879l-13.637\
-3.7881z\x22/>\x0a \
<path id=\x22path43\
73\x22 d=\x22m1351.7-5\
56.48 6.3134-7.0\
711-11.112-2.777\
9-8.5863 6.8186\x22\
/>\x0a <path id=\
\x22path4377\x22 d=\x22m1\
358-563.55v12.62\
7\x22/>\x0a <path i\
d=\x22path4379\x22 d=\x22\
m1351.7-543.35 6\
.3134-7.5762\x22/>\x0a\
</g>\x0a </g>\x0a \
<path id=\x22path4\
414\x22 stroke-line\
join=\x22round\x22 d=\x22\
m1366.1-559.26 1\
3.637 3.0304v12.\
879l-13.637-3.78\
81z\x22 fill-rule=\x22\
evenodd\x22 fill-op\
acity=\x22.66667\x22 s\
troke=\x22#000\x22 str\
oke-linecap=\x22rou\
nd\x22 stroke-width\
=\x221px\x22 fill=\x22#00\
f\x22/>\x0a <g stroke\
-linejoin=\x22round\
\x22 stroke=\x22#000\x22 \
stroke-linecap=\x22\
round\x22 stroke-wi\
dth=\x221px\x22 fill=\x22\
none\x22>\x0a <path \
id=\x22path4416\x22 d=\
\x22m1379.7-556.48 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.