input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python3
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
from cat_setup import src_localDB, src_onlineDB
from buildDB import addData, check_ldb
import sys, os
import csv
from more_itertools import locate
import argparse
import subprocess
from pathlib import Path
# for the spectrum search:
from astropy import units as u
import astropy.coordinates as coord
from getSpect import queryCASSIS, queryISO
import warnings
warnings.filterwarnings('ignore', category=UserWarning)
# Describe the script:
description = \
"""
description:
script to pull photometry from set catalogs in VizieR
(specified in cat_setup.py) and from local database of
data tables not presently in VizieR. If optional
argument --getSpect is set equal to True (boolean),
the script will also pull flux calibrated infrared
spectra from the CASSIS low resolution Spitzer
Atlas and Gregory C Sloan's ISO/SWS Atlas.
"""
epilog = \
"""
examples:
queryDB.py --obj=HD_283571 --rad=10s --getSpect=True
"""
parser = argparse.ArgumentParser(description=description,epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--obj",dest="obj",default='',type=str,
help='Object name')
parser.add_argument("--rad",dest="rad",default='10s',type=str,
help='Search radius for VizieR catalog query')
parser.add_argument("--ldb",dest='ldb',default='',type=str,
help='')
parser.add_argument("--getSpect",dest="getSpect",default=False,type=bool,
help='Choose whether to query CASSIS for IRS spectra (default False)')
parser.add_argument("--closest",dest="closest",default=False,type=bool,
help='Retreive closest entry from VizieR catalogs (default False)')
parser.add_argument("--queryAll",dest="query",default='True',type=str,
help='Choose whether to query full database ("all") or specific catalog')
argopt = parser.parse_args()
obj = argopt.obj.replace('_', ' ')
searchR = argopt.rad
# Check that the local database can be found:
localDB_trunk = check_ldb(argopt.ldb) # returns a pathlib.Path object
qu = argopt.query
# Read in the details of the VizieR catalogs to be queried:
if qu == 'True':
catN, catR, catW, catA, catM, catE, catU, catB = src_onlineDB('simbad')
else:
# Expect to be given one catalog to query
try:
catN,catR,catW,catA,catM,catE,catU,catB = [{qu:item[qu]} for item in src_onlineDB('simbad')]
except KeyError:
print('No online catalog matching keyword ',qu)
catN,catR,catW,catA,catM,catE,catU,catB = [[]]*8
# Read in the details of the local catalogs to be queried:
if qu == 'True':
try:
ldbN, ldbR, ldbW, ldbA, ldbM, ldbE, ldbU, ldbB = src_localDB(localDB_trunk)
except TypeError:
print('Error: local database files not found!')
print('Please check local database directory trunk before continuing.')
print('')
sys.exit()
else:
try:
ldbN,ldbR,ldbW,ldbA,ldbM,ldbE,ldbU,ldbB = [{qu:item[qu]} for item in src_localDB(localDB_trunk)]
except KeyError:
print('No local catalog matching keyword ',qu)
if catN == []:
print('Exiting...')
sys.exit()
ldbN,ldbR,ldbW,ldbA,ldbM,ldbE,ldbU,ldbB = [[]]*8
##########
# Initialise outputs:
##########
wvlen, band, mag, emag, units = ['m'], ['--'], ['--'], ['--'], ['--']
beam, odate, ref = ['arcsec'], ['--'], ['--']
##########
# Collect SIMBAD names and VizieR catalog matches
##########
# Create custom SIMBAD (cS) query to retrieve 2MASS flux
cS = Simbad()
cS.add_votable_fields('flux(J)', 'flux(H)', 'flux(K)')
cS.add_votable_fields('flux_error(J)', 'flux_error(H)', 'flux_error(K)')
cS.add_votable_fields('flux_bibcode(J)', 'flux_bibcode(H)', 'flux_bibcode(K)')
cS.remove_votable_fields('coordinates')
objsim = cS.query_object(obj)
if not objsim:
print('')
print('Warning: object name '+obj+' not recognised by SIMBAD!')
# Try treat it as photometry of binary component (expect e.g. A or A+B label)
print(' - blindly assuming multiplicity: checking "'+' '.join(obj.split(' ')[:-1])+'"')
try:
objB = [a[0] for a in Simbad.query_objectids(' '.join(obj.split(' ')[:-1]))]
# If we get to here, the object is a component of a multiple system
print(' - Success! '+' '.join(obj.split(' ')[:-1])+' recognised by SIMBAD!')
print('Info: photometry search will be limited to the local database')
print('--------------------------------------------')
print(' CAUTION: ')
print(' Individual component identifiers can vary ')
print(' according to wavelength or between studies.')
print(' You are advised to check the collated ')
print(' references to ensure consistent naming. ')
print('--------------------------------------------')
print('')
if ' '.join(obj.split(' ')[:-1]) not in [' '.join(o.split()) for o in objB]:
for o in objB:
# Retrieve full name of parent star from SIMBAD (in case e.g. XZ Tau
# parsed instead of V* XZ Tau):
if ' '.join(obj.split(' ')[:-1]) in o:
obj2 = o+' '+obj.split(' ')[-1]
else:
# Parsed name matches required format of full simbad name of parent star plus
# component flag (e.g. A).
print('')
obj2 = obj
altIDs = [obj2]
except TypeError:
print('Error: not multiple. Object name not registered in SIMBAD!')
print('Please provide a valid object identifier.')
print('')
sys.exit()
else:
# Only get here if the object identifier is simbad-compatible
# Retrieve data from online catalogs:
for o in catN:
resM, resE = [], []
found = ''
print('Retrieving photometry from '+o+' ('+catR[o]+') ...')
if o == '2MASS':
for t in range(0, 3):
if catR[o] in str(objsim[catN[o][t]][0]):
addData(objsim[catM[o][t]][0], objsim[catE[o][t]][0], catB[o][t],
catW[o][t], catA[o][t], catU[o][t], 'unknown', catR[o],
m=mag, em=emag, b1=band, u=units, b2=beam, d=odate, r=ref,
w=wvlen)
else:
print('No match')
else:
res = Vizier(columns=['**', '+_r'], catalog=catN[o])
result = res.query_region(obj, radius=searchR)
try:
l_tmp = result[catN[o]]
except TypeError:
found = 'No match'
if result.keys() and found != 'No match':
if len(result[catN[o]]) > 1 and argopt.closest == False:
# Get the user to specify the matching catalog entry for the object:
print('Multiple results returned by Vizier within search radius')
print(result[catN[o]])
print('')
obj_r = input('Enter "_r" value for required target: ')
# Retrieve row number:
for r in range(0, len(result[catN[o]])):
if (result[catN[o]][r]['_r'] == float(obj_r)):
row = r
elif len(result[catN[o]]) > 1 and argopt.closest == True:
# Retrieve the entry with smallest _r
print('Multiple results returned by Vizier within search radius')
print(result[catN[o]])
print('')
q_r = min([r['_r'] for r in result[catN[o]]])
# Retrieve row number:
print('Closest entry has _r =',q_r)
row = None
for r in range(0, len(result[catN[o]])):
if row == None and result[catN[o]][r]['_r'] == q_r:
row = r
else:
row = 0
# Retrieve mag/flux and its error from the catalog, given the row number
#for mm in catM[o]:
for m in range(0, len(catM[o])):
# Retrieve each of the mag/flux measurements...
try:
if '--' not in str(result[catN[o]][row][catM[o][m]]):
resM = result[catN[o]][row][catM[o][m]]
else:
resM = '--'
except KeyError:
print('Warning: potential flux column name change in VizieR!')
print(result[catN[o]][row])
print (catM[o][m])
raise KeyError
# ... and their errors...
if o == 'IRAS':
t_resM = result[catN[o]][row][catE[o][m]]
resE = result[catN[o]][row][catM[o][m]]*0.01*t_resM
elif isinstance(catE[o][m], str):
if '--' not in str(result[catN[o]][row][catE[o][m]]):
resE = result[catN[o]][row][catE[o][m]]
else:
resE = '--'
else:
resE = catE[o][m] * result[catN[o]][row][catM[o][m]]
# And add it to the data to be written to file:
addData(resM, resE, catB[o][m], catW[o][m], catA[o][m], catU[o][m],
'unknown', catR[o], m=mag, em=emag, b1=band,
u=units, b2=beam, d=odate, r=ref, w=wvlen)
else:
print('No match.')
##########
# Account for specific case of Vieira+2003 which provides mag + colour table
# and object ID in PDS format:
##########
altIDs = [a[0] for a in Simbad.query_objectids(obj)]
if qu == 'True':
cmN = {'Vieira03' : 'J/AJ/126/2971/table2'}
cmR = {'Vieira03' : '2003AJ....126.2971V'}
cmW = {'Vieira03' : [540e-9, 442e-9, 364e-9, 647e-9, 786.5e-9]}
cmA = {'Vieira03' : [(1.22*w/0.60)*206265 for w in cmW['Vieira03']]}
cmM = {'Vieira03' : ['Vmag', 'B-V', 'U-B', 'V-Rc', 'Rc-Ic']}
cmE = {'Vieira03' : ['--', '--', '--', '--', '--']}
cmU = {'Vieira03' : ['mag', 'mag', 'mag', 'mag', 'mag']}
cmB = {'Vieira03' : ['Johnson:V','Johnson:B','Johnson:U','Cousins:Rc',
'Cousins:Ic']}
print('Retrieving photometry from Vieira et al. ('+cmR['Vieira03']+') ...')
if any('PDS' in b for b in altIDs):
indices = [i for i, s in enumerate(altIDs) if 'PDS' in s]
p_obj = altIDs[indices[0]]
# Ensure pds_obj is just numeric and has leading zeros so that len = 3
if len(p_obj.split()[1]) == 1:
pds_obj = '00'+p_obj.split()[1]
elif len(p_obj.split()[1]) == 2:
pds_obj = '0'+p_obj.split()[1]
elif len(p_obj.split()[1]) == 3:
pds_obj = p_obj.split()[1]
else:
print('Format of PDS identifier not recognised: '+p_obj)
print('Exiting...')
sys.exit()
result = Vizier.get_catalogs(cmN['Vieira03'])
ind = [i for i, s in enumerate([a for a in result[0]['PDS']]) if pds_obj in s]
if len(ind) > 1:
jvmag = result[0]['Vmag'][ind]
jbmag = result[0]['B-V'][ind] + jvmag
jumag = result[0]['U-B'][ind] + jbmag
crmag = jvmag - result[0]['V-Rc'][ind]
cimag = crmag - result[0]['Rc-Ic'][ind]
vieira_m = [jvmag, jbmag, jumag, crmag, cimag]
for m in range(0, len(vieira_m)):
addData(vieira_m[m], cmE['Vieira03'][m], cmB['Vieira03'][m],
cmW['Vieira03'][m], cmA['Vieira03'][m], cmU['Vieira03'][m],
'unknown', cmR['Vieira03'], m=mag, em=emag, b1=band,
u=units, b2=beam, d=odate, r=ref, w=wvlen)
else:
print('No match.')
else:
print('No match.')
##########
# Then deal with local data base of tables not on VizieR:
##########
suggestAlt = []
for o in ldbN:
print('Retrieving photometry from '+o+' ('+ldbR[o]+') ...')
with open(ldbN[o]) as f_in:
reader = csv.DictReader(f_in, delimiter=',')
entries = [a for a in reader]
targs = [row['Target'] for row in entries]
match = list(set(targs).intersection([' '.join(a.split()) for a in altIDs]))
# check for entries where any of [a for altIDs] match local database catalog
# entry.split(' ')[:-1] (i.e. the portion of the name up to the final space)
smatch = list(set([' '.join(t.split(' ')[:-1]) for t in targs]).intersection([' '.join(a.split()) for a in altIDs]))
if len(match) == 0 and len(smatch) == 0:
print(' - no match.')
elif len(match) == 0 and len(smatch) != 0:
# Alert the user to the fact that there are entries | |
<reponame>wisematch/KDMOT<gh_stars>0
import time
from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.ops import boxes as box_ops
from torchvision.ops import MultiScaleRoIAlign
from src.test_person_search.project.misc import util
from src.test_person_search.project.misc.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from src.test_person_search.project.misc.transform import GeneralizedRCNNTransform
from src.test_person_search.project.loss.getloss import CriterionReID
from src.test_person_search.project.head.embedding import EmbDet
from src.test_person_search.project.head.gethead import FastRCNNPredictor
from src.test_person_search.project.head.basehead import BaseRoIHeads
from src.test_person_search.project.network.getnet import _split_backbone
from src.test_person_search.project.detector.getdet import DetectorBackbone
class BSL(nn.Module):
def __init__(self, num_classes,
# re-ID
num_train_pids, cls_type="", in_level=["C5"],
# Transform
min_size=800, max_size=1333,
image_mean=None, image_std=None,
# RPN
rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
# Box
box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
box_batch_size_per_image=512, box_positive_fraction=0.25,
bbox_reg_weights=None,
# Misc
eval_gt=False, display=False, cws=False
):
super(BSL, self).__init__()
# ------- Backbone -------
base_model, top_model = _split_backbone(backbone_name='resnet50', conv5_stride=2)
return_layers = {
'conv1': "C1",
'conv2': "C2",
'conv3': "C3",
'conv4_3': "C4",
}
self.backbone = DetectorBackbone(base_model, return_layers)
# ------- RPN -------
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
rpn_kwargs = [rpn_fg_iou_thresh, rpn_bg_iou_thresh,
rpn_batch_size_per_image, rpn_positive_fraction,
rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh]
rpn_anchor_generator = AnchorGenerator(
sizes=((8, 16, 32),),
aspect_ratios=((1, 2),))
self.RPN = RegionProposalNetwork(
rpn_anchor_generator,
RPNHead(1024, rpn_anchor_generator.num_anchors_per_location()[0]),
*rpn_kwargs
)
# ------- R-CNN -------
roi_align = MultiScaleRoIAlign(
featmap_names=["C4"],
output_size=(14, 7),
sampling_ratio=0
)
resolution_h, resolution_w = roi_align.output_size[0], roi_align.output_size[1]
box_emb = EmbDet(1024, 256, resolutions=[resolution_h, resolution_w])
box_predictor = FastRCNNPredictor(box_emb.representation_size, num_classes)
box_kwargs = [
# Faster R-CNN training
box_fg_iou_thresh, box_bg_iou_thresh,
box_batch_size_per_image, box_positive_fraction,
bbox_reg_weights,
# Faster R-CNN inference
box_score_thresh, box_nms_thresh, box_detections_per_img
]
self.RCNN = RCNN(
roi_align, box_emb, box_predictor,
*box_kwargs
)
self.RCNN.cws = cws
# ------- re-ID -------
out_channels = 256
in_ch_list = [2048, 1024, 512, 256, 256][:len(in_level)][::-1]
reid_emb = EmbedReID(
top_model,
roi_align,
featmap_names=in_level,
in_ch_list=in_ch_list,
out_ch=out_channels
)
reid_crit = nn.ModuleDict()
for name, in_ch in zip(in_level, in_ch_list):
reid_crit[name] = CriterionReID(cls_type, in_ch, num_train_pids)
self.reid_head = ReIDHead(
reid_emb,
reid_crit,
# PK sampling
n_roi_per_gt=4,
fg_iou_thresh=0.5
)
# -------- Others -------
if image_mean is None:
image_mean = [0.485, 0.456, 0.406] # NOTE: RGB order is given here
if image_std is None:
image_std = [0.229, 0.224, 0.225]
self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
self.eval_gt = eval_gt
self.display = display
def train(self, mode=True):
self.training = mode
for module in self.children():
module.train(mode)
self.backbone.train(mode)
return self
def extra_box_feat(self, images, targets):
"""
images (list[Tensor]): length=1
targets (list[Dict[Tensor]]): length=1
"""
assert len(images) == len(targets) == 1, "Only support single image input"
images, targets = self.transform(images, targets)
# Backbone
x = self.backbone(images.tensors)
x = x['C4']
box_coord = [targets[0]['boxes']] # should be (1 4)
# box features
results, _ = self.reid_head(OrderedDict([("C4", x)]), box_coord, images.image_sizes)
box_feat = results['feats']
return box_feat
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should not be None.")
# ---------------------------------------------------------------------------------------------------
# Data pre-processing
num_images = len(images)
original_image_sizes = [img.shape[-2:] for img in images]
images, targets = self.transform(images, targets)
# Backbone forward
featmaps = self.backbone(images.tensors)
x = featmaps['C4']
c4_det = featmaps['C4']
c4_reid = featmaps['C4']
# ---------------------------------------------------------------------------------------------------
# RPN
# List[Tensor(post_nms_top_n 4)], Dict{losses}, len(proposals)=batch_size
proposals, proposal_losses = self.RPN(images, OrderedDict([("C4", x)]), targets)
if not self.training and self.eval_gt:
proposals = [t['boxes'] for t in targets]
# R-CNN
# Dict{List} "fg_cnt", "bg_cnt" in training, Dict{Tensor} 'class_logits','box_regressions' in test, Dict{losses}
det_res, rcnn_losses = self.RCNN(OrderedDict([("C4", c4_det)]), proposals, images.image_sizes, targets)
# ---------------------------------------------------------------------------------------------------
# re-ID Head
# pooling re-ID RoI feature using R-CNN detections
# reid_props = self.RCNN.box_decoder(detections['box_regression'], proposals, images.image_sizes)
reid_tic = time.time()
# Dict{List} "boxes", "pids", "acc" in training, Dict{Tensor} "feats" in test. Dict{losses}
reid_res, reid_losses = self.reid_head(
OrderedDict([("C4", c4_reid)]), proposals, images.image_sizes, targets
)
self.reid_time = time.time() - reid_tic
# ---------------------------------------------------------------------------------------------------
# Collecting detections
detections = self.collect_detections(reid_res, det_res, num_images, images, proposals, targets)
# mapping boxes to origin image size, only return input when training
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
# collect losses
losses = {}
losses.update(proposal_losses)
losses.update(rcnn_losses)
losses.update(reid_losses)
# Return
return detections, losses
def collect_detections(self, reid_res, det_res, num_images, images, proposals, targets):
detections = []
if self.training:
reid_res.update(det_res) # "boxes", "pids", "acc", "fg_cnt", "bg_cnt"
if self.display:
reid_res.update({"img": [img.detach() for img in images.tensors], "tgt": targets})
reid_res.update({"labels": [torch.ones(t.size(0), dtype=torch.long, device=t.device)
for t in reid_res['boxes']]})
detections = util.format_detections(num_images, reid_res) # Dict{List} to List[Dict]
else:
class_logits, box_regression, box_feats = \
det_res['class_logits'], det_res['box_regression'], reid_res['feats']
assert class_logits.size(0) == box_regression.size(0) == box_feats.size(0)
# boxes: List[Tensor(detections_per_img (num_cls-1)*4)]
if self.eval_gt:
boxes = [t['boxes'] for t in targets]
scores = [torch.ones(b.size(0)).to(b.device) for b in boxes]
labels = [torch.ones(b.size(0), dtype=torch.long).to(b.device) for b in boxes]
box_feats = box_feats.split([b.size(0) for b in boxes], dim=0)
else:
boxes, scores, labels, box_feats = self.RCNN._postprocess_detections(
class_logits, box_regression, proposals, images.image_sizes, box_feats, mode="rcnn")
# One image one Dict
for i in range(num_images):
detections.append(
dict(
boxes=boxes[i], # box coordinates
labels=labels[i], # class index, e.g., bg or person
scores=scores[i], # classification confidence
feats=box_feats[i] # reid features of boxes
)
)
return detections
class RCNN(BaseRoIHeads):
def __init__(self,
box_roi_pool,
box_head,
box_predictor,
# Faster R-CNN training
fg_iou_thresh, bg_iou_thresh,
batch_size_per_image, positive_fraction,
bbox_reg_weights,
# Faster R-CNN inference
score_thresh,
nms_thresh,
detections_per_img,
):
super(RCNN, self).__init__(box_roi_pool,
box_head,
box_predictor,
# Faster R-CNN training
fg_iou_thresh, bg_iou_thresh,
batch_size_per_image, positive_fraction,
bbox_reg_weights,
# Faster R-CNN inference
score_thresh,
nms_thresh,
detections_per_img, )
self.cws = False
def forward(self,
features,
proposals,
image_shapes,
targets=None):
"""
Arguments:
features (Dict[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
"""
if self.training:
proposals, matched_idxs, labels, regression_targets, _ = \
self.select_training_samples(proposals, targets)
box_features = self.box_roi_pool(features, proposals, image_shapes) # (n_roi_per_img*bs c h w)
box_features = self.box_head(box_features) # (n_roi_per_img*bs dim_feat)
class_logits, box_regression = \
self.box_predictor(box_features) # (n_roi_per_img*bs n_cls) (n_roi_per_img*bs 4)
result, losses = {}, {}
if self.training:
loss_classifier, loss_box_reg = _fastrcnn_loss(
class_logits, box_regression, labels, regression_targets)
losses = dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg)
# return for re-ID and visualization
fg_cnt = [torch.sum(label == 1).item() for label in labels]
bg_cnt = [torch.sum(label == 0).item() for label in labels]
result.update({"fg_cnt": fg_cnt, "bg_cnt": bg_cnt})
else:
result.update({"class_logits": class_logits, "box_regression": box_regression})
return result, losses
def _postprocess_detections(self,
class_logits,
box_regression,
proposals,
image_shapes,
box_features,
mode="rcnn"):
"""
class_logits: 2D tensor(n_roi_per_img*bs C)
box_regression: 2D tensor(n_roi_per_img*bs C*4)
proposals: list[tensor(n_roi_per_img 4)]
image_shapes: list[tuple[H, W]]
box_features: 2D tensor(n_roi_per_img*bs dim_feat)]
mode: test with RPN or RCNN detections
"""
device = class_logits.device
boxes_per_image = [len(boxes_in_image) for boxes_in_image in proposals]
pred_boxes = self.box_coder.decode(box_regression, proposals) # tensor(n_roi_per_img*bs C 4)
pred_scores = F.softmax(class_logits, -1)
pred_scores = pred_scores[:, 1:]
if self.cws:
box_features = box_features * pred_scores.view(-1, 1) # CWS
box_features = box_features.split(boxes_per_image, 0)
# split boxes and scores per image
pred_boxes = pred_boxes.split(boxes_per_image, 0) # list[tensor(n_roi_per_img C 4)], length=bs
pred_scores = pred_scores.split(boxes_per_image, 0) # list[tensor(n_roi_per_img 1)], length=bs
all_boxes = []
all_scores = []
all_labels = []
all_feats = []
n_iter = 0
# go through batch_size
for boxes, scores, image_shape in zip(pred_boxes, pred_scores, image_shapes):
#
if box_features is not None:
features = box_features[n_iter]
boxes = box_ops.clip_boxes_to_image(boxes, image_shape) # tensor(n_roi_per_img C 4)
# create labels for each prediction
labels = torch.ones(scores.size(0), device=device)
# remove predictions with the background label
boxes = boxes[:, 1:] # tensor(n_roi_per_img C-1 4)
labels = labels.unsqueeze(1) # tensor(n_roi_per_img 1)
### using rpn proposals for testing ###
if "rpn" == mode:
boxes = proposals[n_iter]
#######################################
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4) # 2D tensor(n_roi_per_img*(C-1) 4)
scores = scores.flatten()
labels = labels.flatten()
# remove low scoring boxes
inds = torch.nonzero(scores > self.score_thresh).squeeze(1)
boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
if box_features is not None:
features = features[inds]
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[:self.detections_per_img]
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
if box_features is not None:
features = features[keep]
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
if box_features is not None:
all_feats.append(features)
n_iter += 1
return all_boxes, all_scores, all_labels, all_feats
def _fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
"""
Computes the loss for Faster R-CNN.
Arguments:
class_logits (Tensor): include background
box_regression (Tensor)
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
classification_loss = F.cross_entropy(class_logits, labels)
# classification_loss = F.binary_cross_entropy_with_logits(class_logits.squeeze(1), labels.float())
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, | |
import functools
import operator
import pickle
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from packaging.version import Version
import xarray as xr
from xarray.core.alignment import broadcast
from xarray.core.computation import (
_UFuncSignature,
apply_ufunc,
broadcast_compat_data,
collect_dict_values,
join_dict_keys,
ordered_set_intersection,
ordered_set_union,
result_name,
unified_dim_sizes,
)
from xarray.core.pycompat import dask_version
from . import has_dask, raise_if_dask_computes, requires_dask
def assert_identical(a, b):
"""A version of this function which accepts numpy arrays"""
__tracebackhide__ = True
from xarray.testing import assert_identical as assert_identical_
if hasattr(a, "identical"):
assert_identical_(a, b)
else:
assert_array_equal(a, b)
def test_signature_properties() -> None:
sig = _UFuncSignature([["x"], ["x", "y"]], [["z"]])
assert sig.input_core_dims == (("x",), ("x", "y"))
assert sig.output_core_dims == (("z",),)
assert sig.all_input_core_dims == frozenset(["x", "y"])
assert sig.all_output_core_dims == frozenset(["z"])
assert sig.num_inputs == 2
assert sig.num_outputs == 1
assert str(sig) == "(x),(x,y)->(z)"
assert sig.to_gufunc_string() == "(dim0),(dim0,dim1)->(dim2)"
assert (
sig.to_gufunc_string(exclude_dims=set("x")) == "(dim0_0),(dim0_1,dim1)->(dim2)"
)
# dimension names matter
assert _UFuncSignature([["x"]]) != _UFuncSignature([["y"]])
def test_result_name() -> None:
class Named:
def __init__(self, name=None):
self.name = name
assert result_name([1, 2]) is None
assert result_name([Named()]) is None
assert result_name([Named("foo"), 2]) == "foo"
assert result_name([Named("foo"), Named("bar")]) is None
assert result_name([Named("foo"), Named()]) is None
def test_ordered_set_union() -> None:
assert list(ordered_set_union([[1, 2]])) == [1, 2]
assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3]
def test_ordered_set_intersection() -> None:
assert list(ordered_set_intersection([[1, 2]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1]
assert list(ordered_set_intersection([[1, 2], [2]])) == [2]
def test_join_dict_keys() -> None:
dicts = [dict.fromkeys(keys) for keys in [["x", "y"], ["y", "z"]]]
assert list(join_dict_keys(dicts, "left")) == ["x", "y"]
assert list(join_dict_keys(dicts, "right")) == ["y", "z"]
assert list(join_dict_keys(dicts, "inner")) == ["y"]
assert list(join_dict_keys(dicts, "outer")) == ["x", "y", "z"]
with pytest.raises(ValueError):
join_dict_keys(dicts, "exact")
with pytest.raises(KeyError):
join_dict_keys(dicts, "foobar")
def test_collect_dict_values() -> None:
dicts = [{"x": 1, "y": 2, "z": 3}, {"z": 4}, 5]
expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]]
collected = collect_dict_values(dicts, ["x", "y", "z"], fill_value=0)
assert collected == expected
def identity(x):
return x
def test_apply_identity() -> None:
array = np.arange(10)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
apply_identity = functools.partial(apply_ufunc, identity)
assert_identical(array, apply_identity(array))
assert_identical(variable, apply_identity(variable))
assert_identical(data_array, apply_identity(data_array))
assert_identical(data_array, apply_identity(data_array.groupby("x")))
assert_identical(dataset, apply_identity(dataset))
assert_identical(dataset, apply_identity(dataset.groupby("x")))
def add(a, b):
return apply_ufunc(operator.add, a, b)
def test_apply_two_inputs() -> None:
array = np.array([1, 2, 3])
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
zero_array = np.zeros_like(array)
zero_variable = xr.Variable("x", zero_array)
zero_data_array = xr.DataArray(zero_variable, [("x", -array)])
zero_dataset = xr.Dataset({"y": zero_variable}, {"x": -array})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby("x"), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby("x")))
assert_identical(dataset, add(data_array.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby("x")))
assert_identical(dataset, add(dataset.groupby("x"), zero_data_array))
assert_identical(dataset, add(dataset.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby("x")))
assert_identical(dataset, add(zero_dataset, dataset.groupby("x")))
def test_apply_1d_and_0d() -> None:
array = np.array([1, 2, 3])
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
zero_array = 0
zero_variable = xr.Variable((), zero_array)
zero_data_array = xr.DataArray(zero_variable)
zero_dataset = xr.Dataset({"y": zero_variable})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby("x"), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby("x")))
assert_identical(dataset, add(data_array.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby("x")))
assert_identical(dataset, add(dataset.groupby("x"), zero_data_array))
assert_identical(dataset, add(dataset.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby("x")))
assert_identical(dataset, add(zero_dataset, dataset.groupby("x")))
def test_apply_two_outputs() -> None:
array = np.arange(5)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []])
out0, out1 = twice(array)
assert_identical(out0, array)
assert_identical(out1, array)
out0, out1 = twice(variable)
assert_identical(out0, variable)
assert_identical(out1, variable)
out0, out1 = twice(data_array)
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset)
assert_identical(out0, dataset)
assert_identical(out1, dataset)
out0, out1 = twice(data_array.groupby("x"))
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset.groupby("x"))
assert_identical(out0, dataset)
assert_identical(out1, dataset)
@requires_dask
def test_apply_dask_parallelized_two_outputs() -> None:
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []], dask="parallelized")
out0, out1 = twice(data_array.chunk({"x": 1}))
assert_identical(data_array, out0)
assert_identical(data_array, out1)
def test_apply_input_core_dimension() -> None:
def first_element(obj, dim):
def func(x):
return x[..., 0]
return apply_ufunc(func, obj, input_core_dims=[[dim]])
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(["x", "y"], array)
data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]})
dataset = xr.Dataset({"data": data_array})
expected_variable_x = xr.Variable(["y"], [1, 2])
expected_data_array_x = xr.DataArray(expected_variable_x, {"y": [-1, -2]})
expected_dataset_x = xr.Dataset({"data": expected_data_array_x})
expected_variable_y = xr.Variable(["x"], [1, 3])
expected_data_array_y = xr.DataArray(expected_variable_y, {"x": ["a", "b"]})
expected_dataset_y = xr.Dataset({"data": expected_data_array_y})
assert_identical(expected_variable_x, first_element(variable, "x"))
assert_identical(expected_variable_y, first_element(variable, "y"))
assert_identical(expected_data_array_x, first_element(data_array, "x"))
assert_identical(expected_data_array_y, first_element(data_array, "y"))
assert_identical(expected_dataset_x, first_element(dataset, "x"))
assert_identical(expected_dataset_y, first_element(dataset, "y"))
assert_identical(expected_data_array_x, first_element(data_array.groupby("y"), "x"))
assert_identical(expected_dataset_x, first_element(dataset.groupby("y"), "x"))
def multiply(*args):
val = args[0]
for arg in args[1:]:
val = val * arg
return val
# regression test for GH:2341
with pytest.raises(ValueError):
apply_ufunc(
multiply,
data_array,
data_array["y"].values,
input_core_dims=[["y"]],
output_core_dims=[["y"]],
)
expected = xr.DataArray(
multiply(data_array, data_array["y"]), dims=["x", "y"], coords=data_array.coords
)
actual = apply_ufunc(
multiply,
data_array,
data_array["y"].values,
input_core_dims=[["y"], []],
output_core_dims=[["y"]],
)
assert_identical(expected, actual)
def test_apply_output_core_dimension() -> None:
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[["sign"]])
if isinstance(result, (xr.Dataset, xr.DataArray)):
result.coords["sign"] = [1, -1]
return result
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(["x", "y"], array)
data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]})
dataset = xr.Dataset({"data": data_array})
stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]])
stacked_variable = xr.Variable(["x", "y", "sign"], stacked_array)
stacked_coords = {"x": ["a", "b"], "y": [-1, -2], "sign": [1, -1]}
stacked_data_array = xr.DataArray(stacked_variable, stacked_coords)
stacked_dataset = xr.Dataset({"data": stacked_data_array})
assert_identical(stacked_array, stack_negative(array))
assert_identical(stacked_variable, stack_negative(variable))
assert_identical(stacked_data_array, stack_negative(data_array))
assert_identical(stacked_dataset, stack_negative(dataset))
assert_identical(stacked_data_array, stack_negative(data_array.groupby("x")))
assert_identical(stacked_dataset, stack_negative(dataset.groupby("x")))
def original_and_stack_negative(obj):
def func(x):
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ["sign"]])
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
result[1].coords["sign"] = [1, -1]
return result
out0, out1 = original_and_stack_negative(array)
assert_identical(array, out0)
assert_identical(stacked_array, out1)
out0, out1 = original_and_stack_negative(variable)
assert_identical(variable, out0)
assert_identical(stacked_variable, out1)
out0, out1 = original_and_stack_negative(data_array)
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset)
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
out0, out1 = original_and_stack_negative(data_array.groupby("x"))
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset.groupby("x"))
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
def test_apply_exclude() -> None:
def concatenate(objects, dim="x"):
def func(*x):
return np.concatenate(x, axis=-1)
result = apply_ufunc(
func,
*objects,
input_core_dims=[[dim]] * len(objects),
output_core_dims=[[dim]],
exclude_dims={dim},
)
if isinstance(result, (xr.Dataset, xr.DataArray)):
# note: this will fail if dim is not a coordinate on any input
new_coord = np.concatenate([obj.coords[dim] for obj in objects])
result.coords[dim] = new_coord
return result
arrays = [np.array([1]), np.array([2, 3])]
variables = [xr.Variable("x", a) for a in arrays]
data_arrays = [
xr.DataArray(v, {"x": c, "y": ("x", range(len(c)))})
for v, c in zip(variables, [["a"], ["b", "c"]])
]
datasets = [xr.Dataset({"data": data_array}) for data_array in data_arrays]
expected_array = np.array([1, 2, 3])
expected_variable = xr.Variable("x", expected_array)
expected_data_array = xr.DataArray(expected_variable, [("x", list("abc"))])
expected_dataset = xr.Dataset({"data": expected_data_array})
assert_identical(expected_array, concatenate(arrays))
assert_identical(expected_variable, concatenate(variables))
assert_identical(expected_data_array, concatenate(data_arrays))
assert_identical(expected_dataset, concatenate(datasets))
# must also be a core dimension
with pytest.raises(ValueError):
apply_ufunc(identity, variables[0], exclude_dims={"x"})
def test_apply_groupby_add() -> None:
array = np.arange(5)
variable = xr.Variable("x", array)
coords = {"x": -array, "y": ("x", [0, 0, 1, 1, 2])}
data_array = xr.DataArray(variable, coords, dims="x")
dataset = xr.Dataset({"z": variable}, coords)
other_variable = xr.Variable("y", [0, 10])
other_data_array = xr.DataArray(other_variable, dims="y")
other_dataset = xr.Dataset({"z": other_variable})
expected_variable = xr.Variable("x", [0, 1, 12, 13, np.nan])
expected_data_array = xr.DataArray(expected_variable, coords, dims="x")
expected_dataset = xr.Dataset({"z": expected_variable}, coords)
assert_identical(
expected_data_array, add(data_array.groupby("y"), other_data_array)
)
assert_identical(expected_dataset, add(data_array.groupby("y"), other_dataset))
assert_identical(expected_dataset, add(dataset.groupby("y"), other_data_array))
assert_identical(expected_dataset, add(dataset.groupby("y"), other_dataset))
# cannot be performed with xarray.Variable objects that share a dimension
with pytest.raises(ValueError):
add(data_array.groupby("y"), other_variable)
# if they are all grouped the same way
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array[:4].groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array[1:].groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), other_data_array.groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array.groupby("x"))
def test_unified_dim_sizes() -> None:
assert unified_dim_sizes([xr.Variable((), 0)]) == {}
assert | |
import os
import pygal
import shutil
from pygal.style import Style
from random import randint
from datetime import timedelta
from datetime import time
from datetime import datetime
from datetime import date
from graphs import Graphs
from dbtools import DbTools
gintMachineID = 0
gintCountryNr = 0
gintPlantNr = 0
gintSubPlantNr = 0
gintDepartmentNr = 0
gintDefMachID = 3
service = Service()
@auth.requires_login()
def index():
gintCountryNr = 0
gintPlantNr = 0
gintSubPlantNr = 0
gintDepartmentNr = 0
intScreenWidth = 0
arrViews = dict()
strLevel = ''
strLevelUrl = ''
strCountry = ''
strPlant = ''
strSubPlant = ''
strDepartment = ''
formtable = ''
try:
intScreenWidth = int(request.vars['screenwidth'])
gintCountryNr = int(request.vars['countrynr'])
gintPlantNr = int(request.vars['plantnr'])
gintSubPlantNr = int(request.vars['subplantnr'])
gintDepartmentNr = int(request.vars['departmentnr'])
except:
pass
blnError = False
try:
if gintCountryNr == 0:
if auth.user.fldOeeUserRightID <= 10:
arrViews = dboee(dboee.tblOee_Country).select(dboee.tblOee_Country.fldOeeCountryNr, \
dboee.tblOee_Country.fldOeeCountryDescription, \
dboee.tblOee_Country.fldOeeCountryInformation, \
orderby=dboee.tblOee_Country.fldOeeCountryDescription)
else:
arrViews = dboee((dboee.tblOee_Country.fldOeeCountryNr == auth.user.fldOeeCountryID)).select(dboee.tblOee_Country.fldOeeCountryNr, \
dboee.tblOee_Country.fldOeeCountryDescription, \
dboee.tblOee_Country.fldOeeCountryInformation, \
orderby=dboee.tblOee_Country.fldOeeCountryDescription)
strLevelUrl = 'index?screenwidth=' + str(intScreenWidth) + '&countrynr='
strLevel = 'Country'
else:
if gintPlantNr == 0:
if auth.user.fldOeeUserRightID <= 20:
arrViews = dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr).select(dboee.tblOee_Plant.fldOeePlantNr, \
dboee.tblOee_Plant.fldOeePlantDescription, \
dboee.tblOee_Plant.fldOeePlantInformation, \
orderby=dboee.tblOee_Plant.fldOeePlantDescription)
else:
arrViews = dboee((dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Plant.fldOeePlantNr == auth.user.fldOeePlantID)).select(dboee.tblOee_Plant.fldOeePlantNr, \
dboee.tblOee_Plant.fldOeePlantDescription, \
dboee.tblOee_Plant.fldOeePlantInformation, \
orderby=dboee.tblOee_Plant.fldOeePlantDescription)
strCountry = dboee(dboee.tblOee_Country.fldOeeCountryNr == gintCountryNr).select()[0].get('fldOeeCountryDescription')
strLevelUrl = 'index?screenwidth=' + str(intScreenWidth) + '&countrynr=' + str(gintCountryNr) + '&plantnr='
strLevel = 'Plant'
else:
if gintSubPlantNr == 0:
if auth.user.fldOeeUserRightID <= 30:
arrViews = dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr)).select(dboee.tblOee_SubPlant.fldOeeSubPlantNr, \
dboee.tblOee_SubPlant.fldOeeSubPlantDescription, \
dboee.tblOee_SubPlant.fldOeeSubPlantInformation, \
orderby=dboee.tblOee_SubPlant.fldOeeSubPlantDescription)
else:
arrViews = dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_SubPlant.fldOeeSubPlantNr == auth.user.fldOeeSubPlantID)).select(dboee.tblOee_SubPlant.fldOeeSubPlantNr, \
dboee.tblOee_SubPlant.fldOeeSubPlantDescription, \
dboee.tblOee_SubPlant.fldOeeSubPlantInformation, \
orderby=dboee.tblOee_SubPlant.fldOeeSubPlantDescription)
strCountry = dboee(dboee.tblOee_Country.fldOeeCountryNr == gintCountryNr).select()[0].get('fldOeeCountryDescription')
strPlant = dboee(dboee.tblOee_Plant.fldOeePlantNr == gintPlantNr).select()[0].get('fldOeePlantDescription')
strLevelUrl = 'index?screenwidth=' + str(intScreenWidth) + '&countrynr=' + str(gintCountryNr) + '&plantnr=' + str(gintPlantNr) + '&subplantnr='
strLevel = 'SubPlant'
else:
if gintDepartmentNr == 0:
if auth.user.fldOeeUserRightID <= 40:
arrViews = dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr)).select(dboee.tblOee_Department.fldOeeDepartmentNr, \
dboee.tblOee_Department.fldOeeDepartmentDescription, \
dboee.tblOee_Department.fldOeeDepartmentInformation, \
orderby=dboee.tblOee_Department.fldOeeDepartmentDescription)
else:
arrViews = dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Department.fldOeeDepartmentNr == auth.user.fldOeeDepartmentID)).select(dboee.tblOee_Department.fldOeeDepartmentNr, \
dboee.tblOee_Department.fldOeeDepartmentDescription, \
dboee.tblOee_Department.fldOeeDepartmentInformation, \
orderby=dboee.tblOee_Department.fldOeeDepartmentDescription)
strCountry = dboee(dboee.tblOee_Country.fldOeeCountryNr == gintCountryNr).select()[0].get('fldOeeCountryDescription')
strPlant = dboee(dboee.tblOee_Plant.fldOeePlantNr == gintPlantNr).select()[0].get('fldOeePlantDescription')
strSubPlant = dboee(dboee.tblOee_SubPlant.fldOeeSubPlantNr == gintSubPlantNr).select()[0].get('fldOeeSubPlantDescription')
strLevelUrl = 'machselect?screenwidth=' + str(intScreenWidth) + '&countrynr=' + str(gintCountryNr) + '&plantnr=' + str(gintPlantNr) + '&subplantnr=' + str(gintSubPlantNr) + '&departmentnr='
strLevel = 'Department'
else:
if auth.user.fldOeeUserRightID <= 50:
arrViews = dboee((dboee.tblOee_Machine.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Machine.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Machine.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Machine.fldOeeDepartmentID == gintDepartmentNr)).select(dboee.tblOee_Machine.fldOeeMachineNr, \
dboee.tblOee_Machine.fldOeeMachineDescription, \
dboee.tblOee_Machine.fldOeeMachineInformation)
else:
arrViews = dboee((dboee.tblOee_Machine.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Machine.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Machine.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Machine.fldOeeDepartmentID == gintDepartmentNr)).select(dboee.tblOee_Machine.fldOeeMachineNr, \
dboee.tblOee_Machine.fldOeeMachineDescription, \
dboee.tblOee_Machine.fldOeeMachineInformation)
#define including and and and
strCountry = dboee(dboee.tblOee_Country.fldOeeCountryNr == gintCountryNr).select()[0].get('fldOeeCountryDescription')
strPlant = dboee((dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Plant.fldOeePlantNr == gintPlantNr)).select()[0].get('fldOeePlantDescription')
strSubPlant = dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_SubPlant.fldOeeSubPlantNr == gintSubPlantNr)).select()[0].get('fldOeeSubPlantDescription')
strDepartment = dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Department.fldOeeDepartmentNr == gintDepartmentNr)).select()[0].get('fldOeeDepartmentDescription')
strLevelUrl = 'machdetails?screenwidth=' + str(intScreenWidth) + '&country='
strLevel = 'Machine'
except:
blnError = True
if blnError == True:
redirect(URL('index'))
return dict(arrViews = arrViews, \
strLevelUrl = strLevelUrl, \
gintCountryNr = gintCountryNr, \
gintPlantNr = gintPlantNr, \
gintSubPlantNr = gintSubPlantNr, \
gintDepartmentNr = gintDepartmentNr, \
strLevel = strLevel, \
strCountry = strCountry, \
strPlant = strPlant, \
strSubPlant = strSubPlant, \
strDepartment = strDepartment, \
intScreenWidth = intScreenWidth, \
formtable = formtable)
@auth.requires_login()
def add():
gintCountryNr = 0
gintPlantNr = 0
gintSubPlantNr = 0
gintDepartmentNr = 0
form = SQLFORM.factory(Field('dummy', 'id'))
row = ()
strLevel = ''
strCountry = ''
strPlant = ''
strSubPlant = ''
try:
intLevel = int(request.vars['lvl'])
except:
intLevel = 0
try:
strLevel = str(request.vars['level'])
gintCountryNr = int(request.vars['countrynr'])
gintPlantNr = int(request.vars['plantnr'])
gintSubPlantNr = int(request.vars['subplantnr'])
gintDepartmentNr = int(request.vars['departmentnr'])
except:
pass
if strLevel == 'None':
strLevel = 'Country'
if strLevel == 'Country':
row = dboee(dboee.tblOee_Country).select(orderby=~dboee.tblOee_Country.fldOeeCountryNr, limitby=(0,1))
dboee.tblOee_Country.fldOeeCountryNr.default = row[0].fldOeeCountryNr + 1
form = SQLFORM(dboee.tblOee_Country)
if strLevel == 'Plant':
row = dboee(dboee.tblOee_Plant).select(orderby=~dboee.tblOee_Plant.fldOeePlantNr, limitby=(0,1))
dboee.tblOee_Plant.fldOeePlantNr.default = row[0].fldOeePlantNr + 1
dboee.tblOee_Plant.fldOeeCountryID.default = gintCountryNr
form = SQLFORM(dboee.tblOee_Plant)
if strLevel == 'SubPlant':
row = dboee(dboee.tblOee_SubPlant).select(orderby=~dboee.tblOee_SubPlant.fldOeeSubPlantNr, limitby=(0,1))
dboee.tblOee_SubPlant.fldOeeSubPlantNr.default = row[0].fldOeeSubPlantNr + 1
dboee.tblOee_SubPlant.fldOeeCountryID.default = gintCountryNr
dboee.tblOee_SubPlant.fldOeePlantID.default = gintPlantNr
if intLevel == 1:
dboee.tblOee_SubPlant.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
form = SQLFORM(dboee.tblOee_SubPlant)
if strLevel == 'Department':
row = dboee(dboee.tblOee_Department).select(orderby=~dboee.tblOee_Department.fldOeeDepartmentNr, limitby=(0,1))
dboee.tblOee_Department.fldOeeDepartmentNr.default = row[0].fldOeeDepartmentNr + 1
dboee.tblOee_Department.fldOeeCountryID.default = gintCountryNr
dboee.tblOee_Department.fldOeePlantID.default = gintPlantNr
dboee.tblOee_Department.fldOeeSubPlantID.default = gintSubPlantNr
if intLevel == 1:
dboee.tblOee_Department.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Department.fldOeeSubPlantID.requires = IS_IN_DB(dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr)), \
dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
elif intLevel == 2:
dboee.tblOee_Department.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Department.fldOeeSubPlantID.requires = IS_IN_DB(dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr)), \
dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
form = SQLFORM(dboee.tblOee_Department)
if strLevel == 'ActivityGroup':
row = dboee(dboee.tblOee_ActivityGroup).select(orderby=~dboee.tblOee_ActivityGroup.fldOeeActivityGroupNr, limitby=(0,1))
dboee.tblOee_ActivityGroup.fldOeeActivityGroupNr.default = row[0].fldOeeActivityGroupNr + 1
dboee.tblOee_ActivityGroup.fldOeeCountryID.default = gintCountryNr
dboee.tblOee_ActivityGroup.fldOeePlantID.default = gintPlantNr
dboee.tblOee_ActivityGroup.fldOeeSubPlantID.default = gintSubPlantNr
dboee.tblOee_ActivityGroup.fldOeeDepartmentID.default = gintDepartmentNr
dboee.tblOee_ActivityGroup.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_ActivityGroup.fldOeeSubPlantID.requires = IS_IN_DB(dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr)), \
dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_ActivityGroup.fldOeeDepartmentID.requires = IS_IN_DB(dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr)), \
dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
form = SQLFORM(dboee.tblOee_ActivityGroup)
if strLevel == 'Activity':
row = dboee(dboee.tblOee_Activity).select(orderby=~dboee.tblOee_Activity.fldOeeActivityNr, limitby=(0,1))
dboee.tblOee_Activity.fldOeeActivityNr.default = row[0].fldOeeActivityNr + 1
dboee.tblOee_Activity.fldOeeCountryID.default = gintCountryNr
dboee.tblOee_Activity.fldOeePlantID.default = gintPlantNr
dboee.tblOee_Activity.fldOeeSubPlantID.default = gintSubPlantNr
dboee.tblOee_Activity.fldOeeDepartmentID.default = gintDepartmentNr
dboee.tblOee_Activity.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Activity.fldOeeSubPlantID.requires = IS_IN_DB(dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr)), \
dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Activity.fldOeeDepartmentID.requires = IS_IN_DB(dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr)), \
dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_Activity.fldOeeActivityGroupID.requires = IS_IN_DB(dboee((dboee.tblOee_ActivityGroup.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_ActivityGroup.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_ActivityGroup.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_ActivityGroup.fldOeeDepartmentID == gintDepartmentNr)), \
dboee.tblOee_ActivityGroup.fldOeeActivityGroupNr, '%(fldOeeActivityGroupDescription)s')
form = SQLFORM(dboee.tblOee_Activity)
if strLevel == 'MachineActivity':
intMachineID = 0
try:
intMachineID = int(request.vars['machineid'])
except:
pass
rows = dboee((dboee.tblOee_MachineActivity.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_MachineActivity.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_MachineActivity.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_MachineActivity.fldOeeDepartmentID == gintDepartmentNr) & \
(dboee.tblOee_MachineActivity.fldOeeMachineID == intMachineID)).select(orderby=~dboee.tblOee_MachineActivity.fldOeeMachineActivitySortOrder, limitby=(0,1))
for row in rows:
dboee.tblOee_MachineActivity.fldOeeMachineActivitySortOrder.default = rows[0].fldOeeMachineActivitySortOrder + 1
lstSortOrder = []
for intX in range(1, 101):
lstSortOrder.append(intX)
dboee.tblOee_MachineActivity.fldOeeMachineActivitySortOrder.requires = IS_IN_SET(lstSortOrder)
dboee.tblOee_MachineActivity.fldOeeCountryID.default = gintCountryNr
dboee.tblOee_MachineActivity.fldOeePlantID.default = gintPlantNr
dboee.tblOee_MachineActivity.fldOeeSubPlantID.default = gintSubPlantNr
dboee.tblOee_MachineActivity.fldOeeDepartmentID.default = gintDepartmentNr
dboee.tblOee_MachineActivity.fldOeeMachineID.default = intMachineID
dboee.tblOee_MachineActivity.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineActivity.fldOeeSubPlantID.requires = IS_IN_DB(dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr)), \
dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineActivity.fldOeeDepartmentID.requires = IS_IN_DB(dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr)), \
dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_MachineActivity.fldOeeMachineActivityID.requires = IS_IN_DB(dboee((dboee.tblOee_Activity.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Activity.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Activity.fldOeeSubPlantID == gintSubPlantNr)), \
dboee.tblOee_Activity.fldOeeActivityNr, '%(fldOeeActivityDescription)s')
dboee.tblOee_MachineActivity.fldOeeMachineID.requires = IS_IN_DB(dboee((dboee.tblOee_Machine.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Machine.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Machine.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Machine.fldOeeDepartmentID == gintDepartmentNr)), \
dboee.tblOee_Machine.fldOeeMachineNr, '%(fldOeeMachineDescription)s')
form = SQLFORM(dboee.tblOee_MachineActivity)
if strLevel == 'Machines':
row = dboee(dboee.tblOee_Machine).select(orderby=~dboee.tblOee_Machine.fldOeeMachineNr, limitby=(0,1))
dboee.tblOee_Machine.fldOeeMachineNr.default = row[0].fldOeeMachineNr + 1
dboee.tblOee_Machine.fldOeeCountryID.default = gintCountryNr
dboee.tblOee_Machine.fldOeePlantID.default = gintPlantNr
dboee.tblOee_Machine.fldOeeSubPlantID.default = gintSubPlantNr
dboee.tblOee_Machine.fldOeeDepartmentID.default = gintDepartmentNr
rows = dboee((dboee.tblOee_Machine.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Machine.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Machine.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Machine.fldOeeDepartmentID == gintDepartmentNr)).select(orderby=~dboee.tblOee_Machine.fldOeeMachineSortOrder, limitby=(0,1))
for row in rows:
dboee.tblOee_Machine.fldOeeMachineSortOrder.default = rows[0].fldOeeMachineSortOrder + 1
lstSortOrder = []
for intX in range(1, 101):
lstSortOrder.append(intX)
dboee.tblOee_Machine.fldOeeMachineSortOrder.requires = IS_IN_SET(lstSortOrder)
dboee.tblOee_Machine.fldOeePlantID.requires = IS_IN_DB(dboee(dboee.tblOee_Plant.fldOeeCountryID == gintCountryNr), \
dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Machine.fldOeeSubPlantID.requires = IS_IN_DB(dboee((dboee.tblOee_SubPlant.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_SubPlant.fldOeePlantID == gintPlantNr)), \
dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Machine.fldOeeDepartmentID.requires = IS_IN_DB(dboee((dboee.tblOee_Department.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Department.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Department.fldOeeSubPlantID == gintSubPlantNr)), \
dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_Machine.fldOeeModuleID.requires = IS_IN_DB(dboee((dboee.tblOee_Module.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_Module.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_Module.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_Module.fldOeeDepartmentID == gintDepartmentNr)), \
dboee.tblOee_Module.fldOeeModuleNr, '%(fldOeeModuleDescription)s')
dboee.tblOee_Machine.fldOeeMachineShortBreakID.requires = IS_IN_DB(dboee((dboee.tblOee_MachineShortbreak.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_MachineShortbreak.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_MachineShortbreak.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_MachineShortbreak.fldOeeDepartmentID == gintDepartmentNr)), \
dboee.tblOee_MachineShortbreak.fldOeeMachineShortBreakNr, '%(fldOeeMachineShortBreakDescription)s')
dboee.tblOee_Machine.fldOeeMachineUndefinedProdID.requires = IS_IN_DB(dboee((dboee.tblOee_MachineUndefinedProduction.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_MachineUndefinedProduction.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_MachineUndefinedProduction.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_MachineUndefinedProduction.fldOeeDepartmentID == gintDepartmentNr)), \
dboee.tblOee_MachineUndefinedProduction.fldOeeMachineUndefinedProductionNr, '%(fldOeeMachineUndefinedProductionDescription)s')
dboee.tblOee_Machine.fldOeeMachineUndefinedStandStillID.requires = IS_IN_DB(dboee((dboee.tblOee_MachineUndefinedStandstill.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_MachineUndefinedStandstill.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_MachineUndefinedStandstill.fldOeeSubPlantID == gintSubPlantNr) & \
(dboee.tblOee_MachineUndefinedStandstill.fldOeeDepartmentID == gintDepartmentNr)), \
dboee.tblOee_MachineUndefinedStandstill.fldOeeMachineUndefinedStandstillNr, '%(fldOeeMachineUndefinedStandstillDescription)s')
dboee.tblOee_Machine.fldOeeMachineUnscheduledID.requires = IS_IN_DB(dboee((dboee.tblOee_MachineUnscheduled.fldOeeCountryID == gintCountryNr) & \
(dboee.tblOee_MachineUnscheduled.fldOeePlantID == gintPlantNr) & \
(dboee.tblOee_MachineUnscheduled.fldOeeSubPlantID == gintSubPlantNr) | |
<filename>overtime/algorithms/sliding_window_temporal_vertex_cover.py
import overtime as ot
import copy
import itertools
# Give a vertex set and return all subsets
def getSubSet(vertexSet):
"""
A method which returns the subset of a set
Parameter(s):
-------------
vertexSet : List
A list with nodes
Returns:
--------
subset: List
A list contains all subset of original set
Example(s):
-----------
vertexSet = ['A','B','C']
subSet = getSubSet(test)
See also:
---------
"""
N = len(vertexSet)
subSet = []
for i in range(2 ** N):
combo = []
for j in range(N):
if (i >> j) % 2:
combo.append(vertexSet[j])
subSet.append(combo)
return subSet
# Find all possible combinations of A1, A2, ... ,A_delta
def delta_A_union(subset, delta):
"""
A method which returns the all combinations of several sets
Parameter(s):
-------------
subset : List
A list with several sets
delta : int
Size of sliding window
Returns:
--------
All combinations of delta set: List
A list contains all combinations of delta sets
Example(s):
-----------
subset = [[], ['a'], ['b'], ['a', 'b']]
combination = delta_A_union(subset, delta)
See also:
---------
"""
u_set = []
b = [subset] * delta
c = list(itertools.product(*b))
for j in c:
s = {}
for i in range(1, delta + 1):
s.update({i: j[i - 1]})
u_set.append(s)
return u_set
# Check whether a combination of A_1,...A_delta is the vertex cover set of temporal graph
# Returns True if it is vertex cover set of temporal graph
def check_is_vertex_cover(temporalgraph, unionset):
"""
A method which returns the result of whether a set is the vertex cover set for a temporal graph
Parameter(s):
-------------
temporalGraph : TemporalGraph
A temporal graph.
unionset : Dictionary
A dictionary containing vertex and time slot information
Returns:
--------
True or False: bool
Judge whether it is the vertex cover set of the temporal graph
Example(s):
-----------
graph = Graph('test_graph', data=CsvInput('./network.csv'))
unionset = {1: ['A', 'C'], 2: ['B'],3:[]}
check_is_vertex_cover(temporalgraph, unionset)
See also:
---------
"""
graph = copy.deepcopy(temporalgraph)
for i in unionset.values():
for j in i:
graph.remove_node(j)
if graph.edges.labels() == []:
return True
else:
return False
# Find the minimum cardinality vertex cover set in a big set which contain all of vertex cover set
# and return this set and the minimum cardinality
def get_min_cardinality(vc_set):
"""
A method which returns the minimum set in a big set which contain all of vertex cover sets
Parameter(s):
-------------
vc_set : List
A list with several vertex cover sets
Returns:
--------
Minimum vertex cover set: Dictionary
A dictionary stored a vertex cover set
Example(s):
-----------
vc_set = [{1: ['A', 'C'], 2: ['B'],3:[]},
{1: ['A'], 2: ['B', 'C'],3:[]},
{1: [], 2: ['a','b','c'],3:[]},
{1: ['a','d'], 2: ['b'],3:[]},
{1: [], 2: ['a','b'],3:[]}]
min_set = get_min_cardinality(vc_set)
See also:
---------
"""
# Calculate the length of each A_1...A_delta and save it to the dictionary count
count = {}
for i in range(len(vc_set)):
c = []
for j in vc_set[i].values():
c.append(len(j))
count.update({i: c})
# Find the position of the item with the smallest value in the dictionary count,
# and then find the A_1... A_delta of the smallest cardinality at the same position in vc_set
min_key_value = min(count.items(), key=lambda x: x[1])
min_c = 0
for i in min_key_value[1]:
min_c = min_c + i
min_set = vc_set[min_key_value[0]]
return min_c, min_set
# Vertex cover algorithm for static graph
def vertex_cover(staticGraph):
"""
A method which returns a minimum vertex cover set for static graph
Parameter(s):
-------------
staticGraph : Graph
A graph with nodes and edges
Returns:
--------
Minimum vertex cover set: List
A list contain minimum vertex cover for static graph
Example(s):
-----------
graph = Graph('test_graph', data=CsvInput('./network.csv'))
vertexCover = vertex_cover(graph)
See also:
---------
SW_TVC
d_approximation_swtvc
"""
# Find all vertex subsets of the static graph
subSet = getSubSet(staticGraph.nodes.labels())
vertexCover = []
# Judge whether each subset is the vertex cover set of this static graph
for s in subSet:
graph = copy.deepcopy(staticGraph)
# Remove the vertices in the subset from the static graph
for node in s:
graph.remove_node(node)
# When the first vertex cover set is found,
# the minimum vertex cover set is found and return this subset
if graph.edges.labels() == []:
vertexCover.append(s)
# Sort all vertex cover sets
vertexCover.sort(key=len)
# Returns the smallest vertex cover set
return vertexCover[0]
# Main algorithm
def SW_TVC(temporalGraph, delta):
"""
A method which returns the smallest cardinality of a sliding delta-window temporal vertex cover
in a temporal graph
Parameter(s):
-------------
temporalGraph : TemporalGraph
A temporal graph.
delta : int
Size of sliding window
Returns:
--------
Smallest cardinality: int
The smallest cardinality of a sliding delta-window temporal vertex cover in a temporal graph
Example(s):
-----------
temporalGraph = TemporalGraph('test_network', data=CsvInput('./network.csv'))
smallestCardinality = SW_TVC(temporalGraph, 2)
See also:
---------
d_approximation_swtvc
"""
# Initialize the swtvc set and F function of smallest cardinality swtvc
swtvc = {}
f_t_A = {}
lifeTime = len(temporalGraph.edges.timespan())
# When delta = 1, it becomes a 1-tvc problem.
# Find the minimum vertex coverage of each time slot and get the minimum cardinality
if delta == 1:
# Find the minimum vertex coverage of each time slot
for t in temporalGraph.edges.timespan():
subGraph = temporalGraph.get_snapshot(t)
swtvc.update({t: vertex_cover(subGraph)})
# Get the minimum cardinality
min_ca = 0
for t, i in swtvc.items():
min_ca = min_ca + len(i)
f_t_A.update({t: min_ca})
# When 1< delta <= lifeTime, execute main algorithm
elif 1 < delta <= lifeTime:
# Firstly, find all vertex subsets of temporal graph vertices
subSet = getSubSet(temporalGraph.nodes.labels())
# Then initialize list is used to store the all vertex cover set for each sliding window
vertex_cover_set = [[]] * (lifeTime - delta + 2)
# In each sliding window
for t in range(1, lifeTime - delta + 2):
# Iterate over all possible combinations of A_1,...,A_delta
for unionA in delta_A_union(subSet, delta):
subgraph = temporalGraph.get_temporal_subgraph((t, t + delta - 1))
# Find all vertex cover combinations in G[t,t+delta-1]
if check_is_vertex_cover(subgraph, unionA):
vertex_cover_set[t].append(unionA)
# If there is a vertex cover set
if vertex_cover_set[t] != []:
# If it is the first sliding window,
# the minimum cardinality is the cardinality of the current minimum vertex cover set
if t == 1:
min_vc = get_min_cardinality(vertex_cover_set[t])
min_cardinality = min_vc[0]
min_vertex_cover = min_vc[1]
f_t_A.update({t: min_cardinality})
swtvc = min_vertex_cover
# If it is not the first sliding window
else:
# Find all vertex covering combinations which make A1...A_delta-1 equal to A2...A_delta of
# the previous window, then find the minimum combination that fit the conditions
temp = vertex_cover_set[t]
for i in range(len(temp) - 1, -1, -1):
for j in range(1, delta):
if temp[i][j] != swtvc[j + t - 1]:
del temp[i]
min_vc = get_min_cardinality(temp)
min_vertex_cover = min_vc[1]
# Get A_delta from the minimum combination which found by last step
delta_A = min_vertex_cover[delta]
f_t_A.update({t: f_t_A[t - 1] + len(delta_A)})
swtvc.update({t + delta - 1: delta_A})
# If there is no Vertex overlay set, the minimum cardinality is infinite
else:
f_t_A.update({t: -1})
else:
print("Error! delta must in [1,{}]".format(len(temporalGraph.edges.timespan())))
# return the smallest cardinality of sw-tvc set. if want to return sw-tvc set, use 'return swtvc'
return f_t_A[lifeTime-delta+1]
# return swtvc
# Get all temporalgraphs with only one edge e = uv
def get_temporalgraphs_with_single_edge(temporalGraph):
"""
A method which returns several single-edge temporal graph
Parameter(s):
-------------
temporalGraph : TemporalGraph
A temporal graph
Returns:
--------
single-edge temproal graphs: List
A list stored several temporal graph with single edge which exist in original temporal graph
Example(s):
-----------
temporalGraph = TemporalGraph('test_network', data=CsvInput('./network.csv'))
singleEdgeTGraph = get_temporalgraphs_with_single_edge(temporalGraph)
See also:
---------
"""
# Create a list to store all the single_edge graphs
singleEdgeGraphs = []
# Get the underlying graph of temporal graph
a = temporalGraph.get_underlying_graph()
# for each edge of the underlying graph, create temporal graph only containing this edge
for edge in a.edges.ulabels():
# Get all uid of this edge
edgeUid = temporalGraph.edges.get_edge_by_label(edge).uids()
# Copy the original temporal graph and delete all edges that do not match these UIDs
seg = copy.deepcopy(temporalGraph)
for everyEdge in seg.edges.uids():
if everyEdge not in edgeUid:
seg.remove_edge(everyEdge)
# Add the temporal graph with single edge to the list
singleEdgeGraphs.append(seg)
return singleEdgeGraphs
# SW-TVC on single-edge temporal graphs.
def single_edge_swtvc(temporalgraph, | |
<reponame>jeremyfix/gan_experiments
#!/usr/bin/env python3
# coding: utf-8
# Standard imports
from typing import Optional, Tuple
from functools import reduce
import operator
# External imports
import torch
import torch.nn as nn
def conv_bn_leakyrelu(in_channels, out_channels):
"""
Conv(3x3, same) - BN - LeakyRelu(0.2)
"""
ks = 3
return [
nn.Conv2d(in_channels, out_channels,
kernel_size=ks,
stride=1,
padding=int((ks-1)/2),
bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2)
]
def conv_downsampling(channels):
"""
Conv(3x3, s2) - LeakyRelu(0.2)
"""
ks = 3
return [
nn.Conv2d(channels, channels,
kernel_size=ks,
stride=2,
padding=int((ks-1)/2),
bias=True),
nn.LeakyReLU(negative_slope=0.2)
]
class Discriminator(nn.Module):
"""
The discriminator network tells if the input image is real or not
The output logit is supposed to be high(-ly positive) for real images
and low (highly negative) for fake images
"""
def __init__(self,
img_shape: Tuple[int, int, int],
dropout: float,
base_c: int) -> None:
"""
Args:
img_shape : (C, H, W) image shapes
dropout (float) the probability of zeroing before the FC layer
base_c (int): The base number of channels for the discriminator
"""
super(Discriminator, self).__init__()
self.img_shape = img_shape
in_C = img_shape[0]
######################
# START CODING HERE ##
######################
# Definition of the convolutional part of the classifier
# Hint : conv_bn_leakyrelu() and conv_downsampling() can
# be usefull
#@TEMP<EMAIL> = None
#@SOL
# Note: the output receptive field size is 36 x 36
# the output representation size is 3 x 3
self.cnn = nn.Sequential(
*conv_bn_leakyrelu(in_C, base_c),
*conv_bn_leakyrelu(base_c, base_c),
*conv_downsampling(base_c),
nn.Dropout2d(dropout),
*conv_bn_leakyrelu(base_c, base_c*2),
*conv_bn_leakyrelu(base_c*2, base_c*2),
*conv_downsampling(base_c*2),
nn.Dropout2d(dropout),
*conv_bn_leakyrelu(base_c*2, base_c*3),
*conv_bn_leakyrelu(base_c*3, base_c*3),
*conv_downsampling(base_c*3),
nn.Dropout2d(dropout)
)
#SOL@
####################
# END CODING HERE ##
####################
# Compute the size of the representation by forward propagating
# a fake tensor; This can be cpu tensor as the model is not yet
# built and therefore not yet transfered to the GPU
fake_input = torch.zeros((1, *img_shape))
out_cnn = self.cnn(fake_input)
print(f"The output shape of the convolutional part of the "
f"discriminator is {out_cnn.shape}")
num_features = reduce(operator.mul, out_cnn.shape[1:])
######################
# START CODING HERE ##
######################
# The fully connected part of the classifier
#@TEMPL@self.classif = None
#@SOL
self.classif = nn.Sequential(
nn.Linear(num_features, 1)
)
#SOL@
####################
# END CODING HERE ##
####################
# Run the initialization script
self.apply(self.init_weights)
def init_weights(self, m):
"""
Initialize the weights of the convolutional layers
"""
with torch.no_grad():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
m.bias.fill_(0.)
def forward(self,
X: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the discriminator
Args:
X(torch.Tensor (B, C, H, W)) : The images to classify
Returns:
Logits (torch.Tensor (B, )) : The logits
"""
######################
# START CODING HERE ##
######################
# Step 1 - Forward pass through the CNN part
#@TEMPL@out_cnn = None
out_cnn = self.cnn(X) #@SOL@
# Step 2 - "Reshape" the 4D tensor to a 2D tensor
# Hint : Tensor.view can be of help
#@TEMPL@input_classif = None
input_classif = out_cnn.view((out_cnn.shape[0], -1)) #@SOL@
# Step 3 - Forward pass through the fully connected layers
#@TEMPL@out_classif = None
out_classif = self.classif(input_classif) #@SOL@
####################
# END CODING HERE ##
####################
return out_classif.squeeze()
def up_conv_bn_relu(in_channels, out_channels):
"""
Upsampling with Upsample - Conv
UpSample(x2) - Conv(3x3) - BN - Relu - Conv(3x3) - BN - Relu
"""
ks = 3
return [
nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels,
out_channels,
kernel_size=ks,
stride=1,
padding=int((ks-1)/2),
bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels,
out_channels,
kernel_size=ks,
stride=1,
padding=int((ks-1)/2),
bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()
]
def tconv_bn_relu(in_channels, out_channels, ksize, stride, pad, opad):
"""
Upsampling with transposed convolutions
TConv2D - BN - LeakyRelu(0.2)
"""
return [
nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=ksize,
stride=stride,
padding=pad,
output_padding=opad),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2)
]
class Generator(nn.Module):
"""
The generator network generates image from random inputs
"""
def __init__(self,
img_shape: Tuple[int, int, int],
latent_size: int,
base_c: int) -> None:
"""
Args:
img_shape : (C, H, W) image shapes
latent_size (int) : The dimension of the latent space
base_c (int) : The base number of channels
"""
super(Generator, self).__init__()
self.img_shape = img_shape
self.latent_size = latent_size
self.base_c = base_c
H, W = img_shape[1:]
######################
# START CODING HERE ##
######################
# Step 1 - Build the feedforward upscaling network
#@TEMPL@self.upscale = nn.Sequential()
#@SOL
self.upscale = nn.Sequential(
nn.Linear(self.latent_size, H//4*W//4*self.base_c*4, bias=False),
nn.BatchNorm1d(H//4*W//4*self.base_c*4),
nn.ReLU()
)
#SOL@
# Step 2 - Build the convolutional upscaling network
# Hint : up_conv_bn_relu() might be useful
#@TEMPL@self.model = nn.Sequential()
#@SOL
self.model = nn.Sequential(
*up_conv_bn_relu(self.base_c*4, self.base_c*2),
*up_conv_bn_relu(self.base_c*2, self.base_c),
nn.Conv2d(self.base_c, self.img_shape[0],
kernel_size=1,stride=1, padding=0, bias=True),
nn.Tanh()
)
#SOL@
####################
# END CODING HERE ##
####################
#@SOL
# Note : size, stride, pad, opad
# self.model = nn.Sequential(
# *tconv_bn_relu2(base_c*4, base_c*2, 5, 1, 2, 0),
# # nn.Dropout2d(0.3),
# *tconv_bn_relu2(base_c*2, base_c, 5, 2, 2, 1),
# # nn.Dropout2d(0.3),
# nn.ConvTranspose2d(base_c, 1, 5, 2, 2, 1),
# nn.Tanh() # as suggested by [Radford, 2016]
# )
#SOL@
# Initialize the convolutional layers
self.apply(self.init_weights)
def init_weights(self, m):
with torch.no_grad():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
m.bias.fill_(0.)
def forward(self,
X: Optional[torch.Tensor] = None,
batch_size: Optional[float] = None) -> torch.Tensor:
"""
Forward pass of the generator. You can either provide a noise
input vector or specify the batch_size to let it generate the input
Args:
X (torch.Tensor, optional): The input noise batch
batch_size (int, optional): The number of samples to generate
"""
# X is expected to be a 2D tensor (B, L)
if X is None:
assert(batch_size is not None)
device = next(self.parameters()).device
X = torch.randn(batch_size, self.latent_size).to(device)
else:
if len(X.shape) != 2:
raise RuntimeError("Expected a 2D tensor as input to the "
f" generator got a {len(X.shape)}D tensor.")
######################
# START CODING HERE ##
######################
# Step 1 - Forward pass through the first linear layers
# to generate the seed image
#@TEMPL@upscaled = None
upscaled = self.upscale(X) #@SOL@
# Step 2 - "Reshape" the upscaled image as a 4D tensor
# Hint : use the view method
#@TEMPL@reshaped = None
reshaped = upscaled.view(-1, self.base_c*4, self.img_shape[1]//4, self.img_shape[2]//4) #@SOL@
# Step 3 : Forward pass through the last convolutional part
# to generate the image
#@TEMPL@out = None
out = self.model(reshaped) #@SOL@
####################
# END CODING HERE ##
####################
return out
class GAN(nn.Module):
def __init__(self,
img_shape: Tuple[int, int, int],
dropout: float,
discriminator_base_c: int,
latent_size: int,
generator_base_c: int) -> None:
"""
Args:
img_shape : (C, H, W) image shapes
dropout (float): The probability of zeroing before the FC layers
discriminator_base_c (int) : The base number of channels for
the discriminator
latent_size (int) : The size of the latent space for the generator
generator_base_c (int) : The base number of channels for the
generator
"""
super(GAN, self).__init__()
self.img_shape = img_shape
self.discriminator = Discriminator(img_shape,
dropout,
discriminator_base_c)
self.generator = Generator(img_shape,
latent_size,
generator_base_c)
def forward(self,
X: Optional[torch.Tensor],
batch_size: Optional[float]):
"""
Given true images, returns the generated tensors
and the logits of the discriminator for both the generated tensors
and the true tensors
Args:
X (torch.Tensor) : a real image or None if we just
want the logits for the generated images
batch_size (int) : the batch to consider when generating
fake images
"""
if X is None and batch_size is None:
raise RuntimeError("Not both X and batch_size can be None")
if X is not None and batch_size is not None:
raise RuntimeError("Not both X and batch_size can be not None")
if X is not None:
######################
# START CODING HERE ##
######################
# An input tensor of real images is provided
# we compute its logits
# 1 line
#@TEMPL@real_logits = None
real_logits = self.discriminator(X) #@SOL@
####################
# END CODING HERE ##
####################
return real_logits, X
else:
######################
# START CODING HERE ##
######################
# No input tensor is provided. We generate batch_size fake images
# and evaluate its logits
# 2 lines
#@TEMPL@fake_images = None
#@TEMPL@fake_logits = None
#@SOL
fake_images = self.generator(X=None, batch_size=batch_size)
fake_logits = self.discriminator(fake_images)
#SOL@
####################
# END CODING HERE ##
####################
return fake_logits, fake_images
#@SOL
def test_tconv():
layers = nn.Sequential(
nn.Conv2d(20, 10, kernel_size=3, stride=1, padding=2)
)
print(layers)
inputs = torch.zeros((1, 20, 2, 2))
outputs = layers(inputs)
print(outputs.shape)
imagify = nn.Linear(100, 7*7*10)
conv1 = nn.ConvTranspose2d(10, 10,
kernel_size=5,
stride=1,
padding=2)
conv2 = nn.ConvTranspose2d(10, 10,
kernel_size=5,
stride=2,
padding=2,
output_padding=1)
conv3 = nn.ConvTranspose2d(10, 1,
kernel_size=5,
stride=2,
padding=2, output_padding=1)
X = torch.randn(64, 100)
X = imagify(X).view(-1, 10, 7, 7)
print('--')
print(X.shape)
| |
block to the committed chain, this function extends the
chain by updating the most recent committed block field
Args:
tblock (Transaction.TransactionBlock) -- block of transactions to
be committed
"""
with self._txn_lock:
logger.info('blkid: %s - commit block from %s with previous '
'blkid: %s',
tblock.Identifier[:8],
self.gossip.node_id_to_name(tblock.OriginatorID),
tblock.PreviousBlockID[:8])
assert tblock.Status == transaction_block.Status.valid
# Remove all of the newly committed transactions from the
# pending list and put them in the committed list
for txnid in tblock.TransactionIDs:
assert txnid in self.transaction_store
if txnid in self.pending_transactions:
del self.pending_transactions[txnid]
txn = self.transaction_store[txnid]
txn.Status = transaction.Status.committed
txn.InBlock = tblock.Identifier
self.transaction_store[txnid] = txn
# Update the head of the chain
self.most_recent_committed_block_id = tblock.Identifier
self.chain_store['MostRecentBlockID'] = \
self.most_recent_committed_block_id
self.JournalStats.PreviousBlockID.Value = \
self.most_recent_committed_block_id
# Update stats
self.JournalStats.CommittedTxnCount.increment(len(
tblock.TransactionIDs))
self.JournalStats.CommittedBlockCount.Value = \
self.committed_block_count + 1
# fire the event handler for block commit
self.on_commit_block.fire(self, tblock)
def _decommit_block_chain(self, forkid):
"""
decommit blocks from the head of the chain through the forked block
Args:
forkid (UUID) -- identifier of the block where the fork occurred
"""
blockid = self.most_recent_committed_block_id
while blockid != forkid:
self._decommit_block()
blockid = self.most_recent_committed_block_id
def _decommit_block(self):
"""
Move the head of the block chain from the committed pool to the
orphaned pool and move all transactions in the block back into the
pending transaction list.
"""
with self._txn_lock:
blockid = self.most_recent_committed_block_id
block = self.block_store[blockid]
assert block.Status == transaction_block.Status.valid
# fire the event handler for block decommit
self.on_decommit_block.fire(self, block)
# move the head of the chain back
self.most_recent_committed_block_id = block.PreviousBlockID
self.chain_store['MostRecentBlockID'] = \
self.most_recent_committed_block_id
# this bizarre bit of code is intended to preserve the ordering of
# transactions, where all committed transactions occur before
# pending transactions
pending = OrderedDict()
for txnid in block.TransactionIDs:
# there is a chance that this block is incomplete and some
# of the transactions have not arrived, don't put
# transactions into pending if we dont have the transaction
txn = self.transaction_store.get(txnid)
if txn:
txn.Status = transaction.Status.pending
self.transaction_store[txnid] = txn
if txn.add_to_pending():
pending[txnid] = True
pending.update(self.pending_transactions)
self.pending_transactions = pending
# update stats
self.JournalStats.CommittedBlockCount.Value = \
self.committed_block_count + 1
self.JournalStats.CommittedTxnCount.increment(-len(
block.TransactionIDs))
def _test_and_apply_block(self, tblock):
"""Test and apply transactions to the previous block's global
store to create a new version of the store
Args:
tblock (Transaction.TransactionBlock) -- block of transactions to
apply
Returns:
GlobalStore
"""
with self._txn_lock:
assert tblock.Status == transaction_block.Status.complete
# make a copy of the store from the previous block, the previous
# block must be complete if this block is complete
teststore = self.global_store_map.get_block_store(
tblock.PreviousBlockID).clone_block()
# apply the transactions
try:
for txnid in tblock.TransactionIDs:
txn = self.transaction_store[txnid]
txnstore = teststore.get_transaction_store(
txn.TransactionTypeName)
if not txn.is_valid(txnstore):
return None
txn.apply(txnstore)
except:
logger.exception('txnid: %s - unexpected exception '
'when testing transaction block '
'validity.',
txnid[:8])
return None
return teststore
def _find_fork(self, tblock):
"""
Find most recent predecessor of tblock that is in the committed
chain, searching through at most depth blocks
:param tblock PoetTransactionBlock:
:param depth int: depth in the current chain to search, 0 implies all
"""
blockids = set(self.committed_block_ids(0))
forkid = tblock.PreviousBlockID
while True:
if forkid == common.NullIdentifier or forkid in blockids:
return forkid
assert forkid in self.block_store
forkid = self.block_store[forkid].PreviousBlockID
return None
def _prepare_transaction_list(self, maxcount=0):
"""
Prepare an ordered list of valid transactions that can be included in
the next consensus round
Returns:
list of Transaction.Transaction
"""
with self._txn_lock:
# generate a list of valid transactions to place in the new block
addtxns = []
deltxns = []
store = self.global_store.clone_block()
for txnid in self.pending_transactions.iterkeys():
txn = self.transaction_store[txnid]
if txn:
self._prepare_transaction(addtxns, deltxns, store, txn)
if maxcount and len(addtxns) >= maxcount:
break
# as part of the process, we may identify transactions that
# are invalid so go ahead and get rid of them, since these
# had all dependencies met we know that they will never be valid
for txnid in deltxns:
self.JournalStats.InvalidTxnCount.increment()
if txnid in self.transaction_store:
txn = self.transaction_store[txnid]
if txn.InBlock is None:
logger.debug("txnid: %s - deleting from transaction "
"store", txnid)
del self.transaction_store[txnid]
if txnid in self.pending_transactions:
logger.debug("txnid: %s - deleting from pending "
"transactions", txnid)
del self.pending_transactions[txnid]
return addtxns
def _prepare_transaction(self, addtxns, deltxns, store, txn):
"""
Determine if a particular transaction is valid
Args:
addtxns (list of Transaction.Transaction) -- transaction to be
added to the current block
deltxns (list of Transaction.Transaction) -- invalid transactions
store (GlobalStore) -- current global store
txn -- the transaction to be tested
Returns:
True if the transaction is valid
"""
with self._txn_lock:
logger.debug('txnid: %s - add transaction %s',
txn.Identifier[:8],
str(txn))
# Because the dependencies may reorder transactions in the block
# in a way that is different from the arrival order, this
# transaction might already be in the block
if txn.Identifier in addtxns:
return True
# First step in adding the transaction to the list is to make
# sure that all dependent transactions are in the list already
ready = True
for dependencyID in txn.Dependencies:
logger.debug('txnid: %s - check dependency %s',
txn.Identifier[:8], dependencyID[:8])
# check to see if the dependency has already been committed
if (dependencyID in self.transaction_store and
(self.transaction_store[dependencyID].Status ==
transaction.Status.committed)):
continue
# check to see if the dependency is already in this block
if dependencyID in addtxns:
continue
# check to see if the dependency is among the transactions to
# be deleted, if so then this transaction will never be valid
# and we can just get rid of it
if dependencyID in deltxns:
logger.info('txnid: %s - depends on deleted '
'transaction %s',
txn.Identifier[:8], dependencyID[:8])
deltxns.append(txn.Identifier)
ready = False
continue
# recurse into the dependency, note that we need to make sure
# there are no loops in the dependencies but not doing that
# right now
deptxn = self.transaction_store.get(dependencyID)
if deptxn and self._prepare_transaction(addtxns,
deltxns,
store,
deptxn):
continue
# at this point we cannot find the dependency so send out a
# request for it and wait, we should set a timer on this
# transaction so we can just throw it away if the dependencies
# cannot be met
ready = False
logger.info('txnid: %s - missing %s, '
'calling request_missing_txn',
txn.Identifier[:8], dependencyID[:8])
self.request_missing_txn(dependencyID)
self.JournalStats.MissingTxnDepCount.increment()
# if all of the dependencies have not been met then there isn't any
# point in continuing on so bail out
if not ready:
txn.increment_age()
self.transaction_store[txn.Identifier] = txn
logger.info('txnid: %s - not ready (age %s)',
txn.Identifier[:8], txn.age)
if txn.age > self.max_txn_age:
logger.warn('txnid: %s - too old, dropping - %s',
txn.Identifier[:8], str(txn))
deltxns.append(txn.Identifier)
return False
# after all that work... we know the dependencies are met, so
# see if # the transaction is valid, that is that all of the
# preconditions encoded in the transaction itself are met
txnstore = store.get_transaction_store(txn.TransactionTypeName)
if txn.is_valid(txnstore):
logger.debug('txnid: %s - is valid, adding to block',
txn.Identifier[:8])
addtxns.append(txn.Identifier)
txn.apply(txnstore)
return True
# because we have all of the dependencies but the transaction is
# still invalid we know that this transaction is broken and we
# can simply throw it away
logger.warn(
'txnid: %s - is not valid for this block, dropping - %s',
txn.Identifier[:8], str(txn))
logger.info(common.pretty_print_dict(txn.dump()))
deltxns.append(txn.Identifier)
return False
def _clean_transaction_blocks(self):
"""
_clean_transaction_blocks -- for blocks and transactions that are with
high probability no longer going to change, clean out the bulk of the
memory used to store the block and the corresponding transactions
"""
with self._txn_lock:
self.chain_store.sync()
self.transaction_store.sync()
self.block_store.sync()
# with the state storage, we can flatten old blocks to reduce
# memory footprint, they can always be recovered from
# persistent storage later on, however, the flattening
# process increases memory usage so we don't want to do
# it too often, the code below keeps the number of blocks
# kept in memory less than 2 * self.MaximumBlocksToKeep
if self.most_recent_committed_block.BlockNum \
% self.maximum_blocks_to_keep == 0:
logger.info('compress global state for block number %s',
self.most_recent_committed_block.BlockNum)
depth = 0
blockid = self.most_recent_committed_block_id
while (blockid != common.NullIdentifier and
depth < self.maximum_blocks_to_keep):
blockid = self.block_store[blockid].PreviousBlockID
depth += 1
if | |
import torch.nn.functional as F
import torch
import logging
import torch.nn as nn
import numpy as np
import time
from torch.autograd import Variable
__all__ = ['sigmoid_dice_loss','softmax_dice_loss','GeneralizedDiceLoss','FocalLoss','dice','CE_loss','bce_loss','IOU_loss','TverskyLoss','SSIM']
cross_entropy = F.cross_entropy
use_class_balance = False
def CE_loss(output, target):
if use_class_balance:
mask = torch.zeros([1,4])
num_total = torch.sum(target.float()).float()
num_pos1 = torch.sum((target==1).float()).float()
num_pos2 = torch.sum((target==2).float()).float()
num_pos4 = torch.sum((target==4).float()).float()
num_neg = num_total - num_pos1 - num_pos2 - num_pos4
mask[0,1] = 1-num_pos1 / num_total
mask[0,2] = 1-num_pos2 / num_total
mask[0,3] = 1-num_pos4 / num_total
mask[0,0] = 1-num_neg / num_total
if output.dim() > 2:
_output = output.contiguous().view(output.size(0), output.size(1), -1) # N,C,H,W,D => N,C,H*W*D
__output = _output.transpose(1, 2) # N,C,H*W*D => N,H*W*D,C
output = __output.reshape(-1, __output.size(2))
if target.dim() == 4:
_target = target.view(-1) # N*H*W*D
if use_class_balance:
loss = F.cross_entropy(output, _target, weight=mask.cuda()) #####
else:
loss = F.cross_entropy(output, _target)
return loss
def FocalLoss(output, target, alpha=0.25, gamma=2.0):
if use_class_balance:
mask = torch.zeros([1,4])
num_total = torch.sum(target.float()).float()
num_pos1 = torch.sum((target==1).float()).float()
num_pos2 = torch.sum((target==2).float()).float()
num_pos4 = torch.sum((target==4).float()).float()
num_neg = num_total - num_pos1 - num_pos2 - num_pos4
mask[0,1] = 1-num_pos1 / num_total
mask[0,2] = 1-num_pos2 / num_total
mask[0,3] = 1-num_pos4 / num_total
mask[0,0] = 1-num_neg / num_total
mask = mask/mask.sum()
ww = torch.Tensor([num_neg.item(),num_pos1.item(),num_pos2.item(),num_pos4.item()])
mask = mask/(ww+0.00001)
# target[target == 4] = 3 # label [4] -> [3]
# target = expand_target(target, n_class=output.size()[1]) # [N,H,W,D] -> [N,4,H,W,D]
if output.dim() > 2:
output = output.view(output.size(0), output.size(1), -1) # N,C,H,W,D => N,C,H*W*D
output = output.transpose(1, 2) # N,C,H*W*D => N,H*W*D,C
output = output.contiguous().view(-1, output.size(2)) # N,H*W*D,C => N*H*W*D,C
if target.dim() == 5:
target = target.contiguous().view(target.size(0), target.size(1), -1)
target = target.transpose(1, 2)
target = target.contiguous().view(-1, target.size(2))
if target.dim() == 4:
target = target.view(-1) # N*H*W*D
# compute the negative likelyhood
if use_class_balance:
_logpt = -F.cross_entropy(output, target, weight=mask.cuda(),reduce=False) ###
logpt = -F.cross_entropy(output, target, reduce=False) ###
pt = torch.exp(logpt)
# compute the loss
loss = -((1 - pt) ** gamma) * _logpt ###
return loss.sum()
else:
# logpt = -F.cross_entropy(output, target, reduction='none')
# pt = torch.exp(logpt)
# loss = -((1 - pt) ** gamma) * logpt #_logpt ###
# ((1-torch.exp(-F.cross_entropy(output, target, reduction='none')))** gamma) * (-F.cross_entropy(output, target, reduction='none'))
# focal loss
loss = F.cross_entropy(output, target, reduction='none')
logpt = F.log_softmax(output)
target = target.view(-1, 1)
logpt = logpt.gather(1, target)
pt = Variable(logpt.data.exp()).view(-1)
# pt = logpt.exp().view(-1)
loss = ((1 - pt)**gamma) * loss
return loss.mean() ## .sum()
def dice(output, target,eps =1e-5): # soft dice loss
target = target.float()
# num = 2*(output*target).sum() + eps
num = 2*(output*target).sum()
den = output.sum() + target.sum() + eps
return 1.0 - num/den
def sigmoid_dice_loss(output, target, alpha=1e-5, datasets=None, use_class_balance=True):
# output: [-1,3,H,W,T]
# target: [-1,H,W,T] noted that it includes 0,1,2,4 here
if datasets == 'BraTSDataset':
loss1 = dice(F.sigmoid(output[:,1,...]),(target==1).float(),eps=alpha)
loss2 = dice(F.sigmoid(output[:,2,...]),(target==2).float(),eps=alpha)
loss3 = dice(F.sigmoid(output[:,3,...]),(target == 3).float(),eps=alpha)
logging.info('1:{:.4f} | 2:{:.4f} | 4:{:.4f}'.format(1-loss1.data, 1-loss2.data, 1-loss3.data))
loss = (loss1+loss2+loss3)/3
if use_class_balance:
mask = torch.zeros([1,4]).cuda()
num_total = torch.sum(target.float()).float()+1e-5
num_pos1 = torch.sum((target==1).float()).float()
num_pos2 = torch.sum((target==2).float()).float()
num_pos4 = torch.sum((target==3).float()).float() ### 3/4
mask[0,1] = 1-num_pos1 / num_total
mask[0,2] = 1-num_pos2 / num_total
mask[0,3] = 1-num_pos4 / num_total
mask[0,1:4] = mask[0,1:4] / mask[0,1:4].sum()
return (loss1*mask[0,1] + loss2*mask[0,2] + loss3*mask[0,3]) ###
else:
return loss
def softmax_dice_loss(output, target,eps=1e-5): # Only for edge-dice-loss calculation without sigmoid
# output : [bsize,c,H,W,D]
# target : [bsize,H,W,D]
loss1 = dice(output[:,1,...],(target==0).float())
loss2 = dice(output[:,2,...],(target==1).float())
loss3 = dice(output[:,3,...],(target==2).float())
logging.info('1:{:.4f} | 2:{:.4f} | 4:{:.4f}'.format(1-loss1.data, 1-loss2.data, 1-loss3.data))
return (loss1+loss2+loss3)/3
# Generalised Dice : 'Generalised dice overlap as a deep learning loss function for highly unbalanced segmentations'
def GeneralizedDiceLoss(output,target,eps=1e-5,weight_type='square'): # Generalized dice loss
"""
Generalised Dice : 'Generalised dice overlap as a deep learning loss function for highly unbalanced segmentations'
"""
# target = target.float()
if target.dim() == 4:
target[target == 4] = 3 # label [4] -> [3]
target = expand_target(target, n_class=output.size()[1]) # [N,H,W,D] -> [N,4,H,W,D]
output = flatten(output)[1:,...] # transpose [N,4,H,W,D] -> [4,N,H,W,D] -> [3, N*H*W*D] voxels
target = flatten(target)[1:,...] # [class, N*H*W*D]
target_sum = target.sum(-1) # sub_class_voxels [3,1] -> 3个voxels
if weight_type == 'square':
class_weights = 1. / (target_sum * target_sum + eps)
elif weight_type == 'identity':
class_weights = 1. / (target_sum + eps)
elif weight_type == 'sqrt':
class_weights = 1. / (torch.sqrt(target_sum) + eps)
else:
raise ValueError('Check out the weight_type :',weight_type)
intersect = (output * target).sum(-1)
intersect_sum = (intersect * class_weights).sum()
denominator = (output + target).sum(-1)
denominator_sum = (denominator * class_weights).sum() + eps
loss1 = 2*intersect[0] / (denominator[0] + eps) ## corresponding to class-1
loss2 = 2*intersect[1] / (denominator[1] + eps) ## corresponding to class-2
loss3 = 2*intersect[2] / (denominator[2] + eps) ## corresponding to class-4
# cal TC:
output_tc = torch.cat((output[0],output[2]), 0)
target_tc = torch.cat((target[0],target[2]), 0)
dice_TC = 2*(output_tc*target_tc).sum(-1) / ((output_tc+target_tc).sum(-1) + eps)
# cal WT:
output_wt = torch.cat((output[0],output[1],output[2]), 0)
target_wt = torch.cat((target[0],target[1],target[2]), 0)
dice_WT = 2*(output_wt*target_wt).sum(-1) / ((output_wt+target_wt).sum(-1) + eps)
dice_ET = loss3
# cal Sensitivity:
Sensitivity_TC = (output_tc*target_tc).sum(-1) / target_tc.sum(-1)
Sensitivity_WT = (output_wt*target_wt).sum(-1) / target_wt.sum(-1)
Sensitivity_ET = intersect[2] / target[2].sum(-1)
# cal Specificity:
Specificity_TC = ((1-output_tc)*(1-target_tc)).sum(-1) / (1-target_tc).sum(-1)
Specificity_WT = ((1-output_wt)*(1-target_wt)).sum(-1) / (1-target_wt).sum(-1)
Specificity_ET = ((1-output) * (1-target)).sum(-1)[2] / (1-target[2]).sum(-1)
logging.info('1: {:.5f} | 2: {:.5f} | 4: {:.5f}'.format(loss1.data, loss2.data, loss3.data))
logging.info('Dice_ET:{:.5f} | Dice_WT:{:.5f} | Dice_TC:{:.5f} | Sensitivity_ET:{:.5f} | Sensitivity_WT:{:.5f} | Sensitivity_TC:{:.5f}'.format(dice_ET, dice_WT, dice_TC, Sensitivity_ET, Sensitivity_WT, Sensitivity_TC))
logging.info('Specificity_ET:{:.5f} | Specificity_WT:{:.5f} | Specificity_TC:{:.5f}'.format(Specificity_ET, Specificity_WT, Specificity_TC))
return 1 - 2. * intersect_sum / denominator_sum
def expand_target(x, n_class,mode='softmax'):
"""
Converts NxDxHxW label image to NxCxDxHxW, where each label is stored in a separate channel
:param input: 4D input image (NxDxHxW)
:param C: number of channels/labels
:return: 5D output image (NxCxDxHxW)
"""
assert x.dim() == 4
shape = list(x.size())
shape.insert(1, n_class)
shape = tuple(shape)
xx = torch.zeros(shape)
if mode.lower() == 'softmax':
xx[:,1,:,:,:] = (x == 1)
xx[:,2,:,:,:] = (x == 2)
xx[:,3,:,:,:] = (x == 3) ## corresponding to class-4
if mode.lower() == 'sigmoid':
xx[:,0,:,:,:] = (x == 1)
xx[:,1,:,:,:] = (x == 2)
xx[:,2,:,:,:] = (x == 3) ## corresponding to class-4
return xx.to(x.device)
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.reshape(C, -1)
# BCE Loss:
def bce_loss(prediction, label, smooth_label=False):
label = label.clone().long()
mask = label.clone().float()
if smooth_label:
num_positive = torch.sum((mask!=0).float()).float()
num_negative = torch.sum((mask==0).float()).float()
mask[mask > 0] = 1.0 * num_negative / (num_positive + num_negative)
mask[mask == 0] = 1.1 * num_positive / (num_positive + num_negative)
else:
num_positive = torch.sum((mask==1).float()).float()
num_negative = torch.sum((mask==0).float()).float()
mask[mask == 1] = 1.0 * num_negative / (num_positive + num_negative)
mask[mask == 0] = 1.1 * num_positive / (num_positive + num_negative)
mask[mask == 2] = 0
cost = torch.nn.functional.binary_cross_entropy(prediction.clone().float(),label.clone().float(), weight=mask, reduce=False)
return torch.sum(cost) / (num_positive+1e-6)
# IOU Loss:
def _iou(pred, target, size_average = True):
b = pred.shape[0]
target[target == 4] = 3 # label [4] -> [3]
IoU = [0.0, 0.0, 0.0]
for j in range(1,4):
for i in range(0,b):
#compute the IoU of the foreground
target[target == j] = 1
target[target != j] = 0
Iand1 = torch.sum(target.clone()[i,:,:,:]*pred.clone()[i,j,:,:,:])
Ior1 = torch.sum(target.clone()[i,:,:,:]) + torch.sum(pred.clone()[i,j,:,:,:])-Iand1
IoU1 = Iand1/Ior1
#IoU loss is (1-IoU1)
IoU[j-1] = IoU[j-1] + (1-IoU1)
IoU[j-1] = IoU[j-1]/b
return sum(IoU) #IoU/b
class IOU(torch.nn.Module):
def __init__(self, size_average = True):
super(IOU, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return _iou(pred, target, self.size_average)
def IOU_loss(pred,label):
iou_loss = IOU(size_average=True)
iou_out = iou_loss(pred, label)
return iou_out
# Tversky Loss
def TverskyLoss(output, targets, smooth=1, alpha=0.3, beta=0.7):
#comment out if your model contains a sigmoid or equivalent activation layer
output = F.sigmoid(output.clone())
#flatten label and prediction tensors
if output.dim() > 2:
output = output.view(output.size(0), output.size(1), -1) # N,C,H,W,D => N,C,H*W*D
output = output.transpose(1, 2) # N,C,H*W*D => N,H*W*D,C
output = output.contiguous().view(-1, output.size(2)) # N,H*W*D,C => N*H*W*D,C
if targets.dim() == 4:
targets = targets.view(-1) # N*H*W*D
targets = torch.unsqueeze(targets,1).expand(targets.size(0), 4)
#True Positives, False Positives & False Negatives
TP = (output * targets).sum() | |
<gh_stars>0
# Standard
import re
# PIP
import cupy
import torch
kernel_Softsplat_updateOutput = """
extern "C" __global__ void kernel_Softsplat_updateOutput(
const int n,
const float* input,
const float* flow,
float* output
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
const int intN = ( intIndex / SIZE_3(output) / SIZE_2(output) / SIZE_1(output) ) % SIZE_0(output);
const int intC = ( intIndex / SIZE_3(output) / SIZE_2(output) ) % SIZE_1(output);
const int intY = ( intIndex / SIZE_3(output) ) % SIZE_2(output);
const int intX = ( intIndex ) % SIZE_3(output);
float fltOutputX = (float) (intX) + VALUE_4(flow, intN, 0, intY, intX);
float fltOutputY = (float) (intY) + VALUE_4(flow, intN, 1, intY, intX);
int intNorthwestX = (int) (floor(fltOutputX));
int intNorthwestY = (int) (floor(fltOutputY));
int intNortheastX = intNorthwestX + 1;
int intNortheastY = intNorthwestY;
int intSouthwestX = intNorthwestX;
int intSouthwestY = intNorthwestY + 1;
int intSoutheastX = intNorthwestX + 1;
int intSoutheastY = intNorthwestY + 1;
float fltNorthwest = ((float) (intSoutheastX) - fltOutputX ) * ((float) (intSoutheastY) - fltOutputY );
float fltNortheast = (fltOutputX - (float) (intSouthwestX)) * ((float) (intSouthwestY) - fltOutputY );
float fltSouthwest = ((float) (intNortheastX) - fltOutputX ) * (fltOutputY - (float) (intNortheastY));
float fltSoutheast = (fltOutputX - (float) (intNorthwestX)) * (fltOutputY - (float) (intNorthwestY));
if ((intNorthwestX >= 0) & (intNorthwestX < SIZE_3(output)) & (intNorthwestY >= 0) & (intNorthwestY < SIZE_2(output))) {
atomicAdd(&output[OFFSET_4(output, intN, intC, intNorthwestY, intNorthwestX)], VALUE_4(input, intN, intC, intY, intX) * fltNorthwest);
}
if ((intNortheastX >= 0) & (intNortheastX < SIZE_3(output)) & (intNortheastY >= 0) & (intNortheastY < SIZE_2(output))) {
atomicAdd(&output[OFFSET_4(output, intN, intC, intNortheastY, intNortheastX)], VALUE_4(input, intN, intC, intY, intX) * fltNortheast);
}
if ((intSouthwestX >= 0) & (intSouthwestX < SIZE_3(output)) & (intSouthwestY >= 0) & (intSouthwestY < SIZE_2(output))) {
atomicAdd(&output[OFFSET_4(output, intN, intC, intSouthwestY, intSouthwestX)], VALUE_4(input, intN, intC, intY, intX) * fltSouthwest);
}
if ((intSoutheastX >= 0) & (intSoutheastX < SIZE_3(output)) & (intSoutheastY >= 0) & (intSoutheastY < SIZE_2(output))) {
atomicAdd(&output[OFFSET_4(output, intN, intC, intSoutheastY, intSoutheastX)], VALUE_4(input, intN, intC, intY, intX) * fltSoutheast);
}
} }
"""
kernel_Softsplat_updateGradInput = """
extern "C" __global__ void kernel_Softsplat_updateGradInput(
const int n,
const float* input,
const float* flow,
const float* grad_output,
float* grad_input,
float* grad_flow
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
const int intN = ( intIndex / SIZE_3(grad_input) / SIZE_2(grad_input) / SIZE_1(grad_input) ) % SIZE_0(grad_input);
const int intC = ( intIndex / SIZE_3(grad_input) / SIZE_2(grad_input) ) % SIZE_1(grad_input);
const int intY = ( intIndex / SIZE_3(grad_input) ) % SIZE_2(grad_input);
const int intX = ( intIndex ) % SIZE_3(grad_input);
float fltGradInput = 0.0;
float fltOutputX = (float) (intX) + VALUE_4(flow, intN, 0, intY, intX);
float fltOutputY = (float) (intY) + VALUE_4(flow, intN, 1, intY, intX);
int intNorthwestX = (int) (floor(fltOutputX));
int intNorthwestY = (int) (floor(fltOutputY));
int intNortheastX = intNorthwestX + 1;
int intNortheastY = intNorthwestY;
int intSouthwestX = intNorthwestX;
int intSouthwestY = intNorthwestY + 1;
int intSoutheastX = intNorthwestX + 1;
int intSoutheastY = intNorthwestY + 1;
float fltNorthwest = ((float) (intSoutheastX) - fltOutputX ) * ((float) (intSoutheastY) - fltOutputY );
float fltNortheast = (fltOutputX - (float) (intSouthwestX)) * ((float) (intSouthwestY) - fltOutputY );
float fltSouthwest = ((float) (intNortheastX) - fltOutputX ) * (fltOutputY - (float) (intNortheastY));
float fltSoutheast = (fltOutputX - (float) (intNorthwestX)) * (fltOutputY - (float) (intNorthwestY));
if ((intNorthwestX >= 0) & (intNorthwestX < SIZE_3(grad_output)) & (intNorthwestY >= 0) & (intNorthwestY < SIZE_2(grad_output))) {
fltGradInput += VALUE_4(grad_output, intN, intC, intNorthwestY, intNorthwestX) * fltNorthwest;
}
if ((intNortheastX >= 0) & (intNortheastX < SIZE_3(grad_output)) & (intNortheastY >= 0) & (intNortheastY < SIZE_2(grad_output))) {
fltGradInput += VALUE_4(grad_output, intN, intC, intNortheastY, intNortheastX) * fltNortheast;
}
if ((intSouthwestX >= 0) & (intSouthwestX < SIZE_3(grad_output)) & (intSouthwestY >= 0) & (intSouthwestY < SIZE_2(grad_output))) {
fltGradInput += VALUE_4(grad_output, intN, intC, intSouthwestY, intSouthwestX) * fltSouthwest;
}
if ((intSoutheastX >= 0) & (intSoutheastX < SIZE_3(grad_output)) & (intSoutheastY >= 0) & (intSoutheastY < SIZE_2(grad_output))) {
fltGradInput += VALUE_4(grad_output, intN, intC, intSoutheastY, intSoutheastX) * fltSoutheast;
}
grad_input[intIndex] = fltGradInput;
} }
"""
kernel_Softsplat_updateGradFlow = """
extern "C" __global__ void kernel_Softsplat_updateGradFlow(
const int n,
const float* input,
const float* flow,
const float* grad_output,
float* grad_input,
float* grad_flow
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
float fltGradFlow = 0.0;
const int intN = ( intIndex / SIZE_3(grad_flow) / SIZE_2(grad_flow) / SIZE_1(grad_flow) ) % SIZE_0(grad_flow);
const int intC = ( intIndex / SIZE_3(grad_flow) / SIZE_2(grad_flow) ) % SIZE_1(grad_flow);
const int intY = ( intIndex / SIZE_3(grad_flow) ) % SIZE_2(grad_flow);
const int intX = ( intIndex ) % SIZE_3(grad_flow);
float fltOutputX = (float) (intX) + VALUE_4(flow, intN, 0, intY, intX);
float fltOutputY = (float) (intY) + VALUE_4(flow, intN, 1, intY, intX);
int intNorthwestX = (int) (floor(fltOutputX));
int intNorthwestY = (int) (floor(fltOutputY));
int intNortheastX = intNorthwestX + 1;
int intNortheastY = intNorthwestY;
int intSouthwestX = intNorthwestX;
int intSouthwestY = intNorthwestY + 1;
int intSoutheastX = intNorthwestX + 1;
int intSoutheastY = intNorthwestY + 1;
float fltNorthwest = 0.0;
float fltNortheast = 0.0;
float fltSouthwest = 0.0;
float fltSoutheast = 0.0;
if (intC == 0) {
fltNorthwest = ((float) (-1.0)) * ((float) (intSoutheastY) - fltOutputY );
fltNortheast = ((float) (+1.0)) * ((float) (intSouthwestY) - fltOutputY );
fltSouthwest = ((float) (-1.0)) * (fltOutputY - (float) (intNortheastY));
fltSoutheast = ((float) (+1.0)) * (fltOutputY - (float) (intNorthwestY));
} else if (intC == 1) {
fltNorthwest = ((float) (intSoutheastX) - fltOutputX ) * ((float) (-1.0));
fltNortheast = (fltOutputX - (float) (intSouthwestX)) * ((float) (-1.0));
fltSouthwest = ((float) (intNortheastX) - fltOutputX ) * ((float) (+1.0));
fltSoutheast = (fltOutputX - (float) (intNorthwestX)) * ((float) (+1.0));
}
for (int intChannel = 0; intChannel < SIZE_1(grad_output); intChannel += 1) {
float fltInput = VALUE_4(input, intN, intChannel, intY, intX);
if ((intNorthwestX >= 0) & (intNorthwestX < SIZE_3(grad_output)) & (intNorthwestY >= 0) & (intNorthwestY < SIZE_2(grad_output))) {
fltGradFlow += fltInput * VALUE_4(grad_output, intN, intChannel, intNorthwestY, intNorthwestX) * fltNorthwest;
}
if ((intNortheastX >= 0) & (intNortheastX < SIZE_3(grad_output)) & (intNortheastY >= 0) & (intNortheastY < SIZE_2(grad_output))) {
fltGradFlow += fltInput * VALUE_4(grad_output, intN, intChannel, intNortheastY, intNortheastX) * fltNortheast;
}
if ((intSouthwestX >= 0) & (intSouthwestX < SIZE_3(grad_output)) & (intSouthwestY >= 0) & (intSouthwestY < SIZE_2(grad_output))) {
fltGradFlow += fltInput * VALUE_4(grad_output, intN, intChannel, intSouthwestY, intSouthwestX) * fltSouthwest;
}
if ((intSoutheastX >= 0) & (intSoutheastX < SIZE_3(grad_output)) & (intSoutheastY >= 0) & (intSoutheastY < SIZE_2(grad_output))) {
fltGradFlow += fltInput * VALUE_4(grad_output, intN, intChannel, intSoutheastY, intSoutheastX) * fltSoutheast;
}
}
grad_flow[intIndex] = fltGradFlow;
} }
"""
def cupy_kernel(func_name, var_object):
kernel_name = globals()[func_name]
while True:
match_object = re.search("(SIZE_)([0-4])(\()([^\)]*)(\))", kernel_name)
if match_object is None:
break
tensor_name = match_object.group(4)
size_list = var_object[tensor_name].size()
index = int(match_object.group(2))
kernel_name = kernel_name.replace(match_object.group(), str(size_list[index]))
while True:
match_object = re.search("(OFFSET_)([0-4])(\()([^\)]+)(\))", kernel_name)
if match_object is None:
break
num_args = int(match_object.group(2))
args_list = match_object.group(4).split(",")
tensor_name = args_list[0]
stride_list = var_object[tensor_name].stride()
index_list = []
for index in range(num_args):
tmp = args_list[index + 1].replace("{", "(").replace("}", ")").strip()
tmp = f"(({tmp})*{stride_list[index]})"
index_list.append(tmp)
kernel_name = kernel_name.replace(match_object.group(0), f'({"+".join(index_list)})')
while True:
match_object = re.search("(VALUE_)([0-4])(\()([^\)]+)(\))", kernel_name)
if match_object is None:
break
num_args = int(match_object.group(2))
args_list = match_object.group(4).split(",")
tensor_name = args_list[0]
stride_list = var_object[tensor_name].stride()
index_list = []
for index in range(num_args):
tmp = args_list[index + 1].replace("{", "(").replace("}", ")").strip()
tmp = f"(({tmp})*{stride_list[index]})"
index_list.append(tmp)
kernel_name = kernel_name.replace(match_object.group(0), tensor_name + f'[{"+".join(index_list)}]')
return kernel_name
@cupy.memoize(for_each_device=True)
def cupy_launch(func_name, kernel_name):
return cupy.cuda.compile_with_cache(kernel_name).get_function(func_name)
class SoftSplatFunc(torch.autograd.Function):
@staticmethod
def forward(self, input, flow):
self.save_for_backward(input, flow)
[num_samples, input_depth, input_height, input_width] = input.shape
flow_depth, flow_height, flow_width = (
flow.shape[1],
flow.shape[2],
flow.shape[3],
)
assert flow_depth == 2
assert input_height == flow_height
assert input_width == flow_width
assert input.is_contiguous()
assert flow.is_contiguous()
output = input.new_zeros([num_samples, input_depth, input_height, input_width])
n = output.nelement()
cupy_launch("kernel_Softsplat_updateOutput", cupy_kernel("kernel_Softsplat_updateOutput", {"input": input, "flow": flow, "output": output},),)(
grid=tuple([int((n + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[n, input.data_ptr(), flow.data_ptr(), output.data_ptr()],
)
return output
@staticmethod
def backward(self, grad_output):
input, flow = self.saved_tensors
[num_samples, input_depth, input_height, input_width] = input.shape
flow_depth, flow_height, flow_width = | |
<reponame>oi-analytics/oia-transport-archive
# -*- coding: utf-8 -*-
"""
Python script to create transport networks in Vietnam
Created on Wed June 27 2018
@author: <NAME>, <NAME>
"""
import pandas as pd
import os
import psycopg2
import networkx as nx
import csv
import igraph as ig
import numpy as np
import geopandas as gpd
from vtra.utils import line_length
def assign_province_road_conditions(x):
asset_code = x.code
asset_level = x.level
if asset_code in (17,303) or asset_level in (0,1): # This is an expressway, national and provincial road
return 'paved'
else: # Anything else not included above
return 'unpaved'
def assign_assumed_width_to_province_roads_from_file(asset_width,width_range_list):
'''
Assign widths to roads assets in Vietnam
The widths are assigned based on our understanding of:
1. The reported width in the data which is not reliable
2. A design specification based understanding of the assumed width based on ranges of values
Inputs are:
asset_width - Numeric value for width of asset
width_range_list - List of tuples containing (from_width,to_width,assumed_width)
Outputs are:
assumed_width - assigned width of the raod asset based on design specifications
'''
assumed_width = asset_width
for width_vals in width_range_list:
if width_vals[0] <= assumed_width <= width_vals[1]:
assumed_width = width_vals[2]
break
return assumed_width
def assign_assumed_width_to_province_roads(x):
'''
Assign widths to roads assets in Vietnam
The widths are assigned based on our understanding of:
1. The reported width in the data which is not reliable
2. A design specification based understanding of the assumed width based on ranges of values
Inputs are:
asset_width - Numeric value for width of asset
Outputs are:
modified_width - assigned width of the road asset based on design specifications
'''
if 0 <= x.width < 4.25:
return 3.5
elif 4.25 <= x.width < 6.0:
return 5.0
elif 6.0 <= x.width < 8.0:
return 7.0
elif 8.0 <= x.width < 11.5:
return 9.0
elif 11.5 <= x.width < 17.5:
return 14.0
elif 17.5 <= x.width < 24.5:
return 21.0
elif 24.5 <= x.width < 100:
return 9.0
else:
return x.width
def assign_asset_type_to_province_roads_from_file(asset_code,asset_type_list):
'''
Assign asset types to roads assets in Vietnam
The types are assigned based on our understanding of:
1. The reported asset code in the data
Inputs are:
asset code - Numeric value for code of asset
Outputs are:
asset type - Which is either of (Bridge,Dam,Culvert,Tunnel,Spillway,Road)
'''
asset_type = 'road'
for asset in asset_type_list:
if asset_code == asset[0]:
asset_type = asset[2]
break
return asset_type
def assign_asset_type_to_province_roads(x):
'''
Assign asset types to roads assets in Vietnam
The types are assigned based on our understanding of:
1. The reported asset code in the data
Inputs are:
asset code - Numeric value for code of asset
Outputs are:
asset type - Which is either of (Bridge,Dam,Culvert,Tunnel,Spillway,Road)
'''
if x.code in (12,25):
return 'Bridge'
elif x.code == (23):
return 'Dam'
elif x.code == (24):
return 'Culvert'
elif x.code == (26):
return 'Tunnel'
elif x.code == (27):
return 'Spillway'
else:
return 'Road'
def assign_minmax_travel_speeds_province_roads_apply(x):
'''
Assign travel speeds to roads assets in Vietnam
The speeds are assigned based on our understanding of:
1. The types of assets
2. The levels of classification of assets: 0-National,1-Provinical,2-Local,3-Other
3. The terrain where the assets are located: Flat or Mountain or No information
Inputs are:
asset_code - Numeric code for type of asset
asset_level - Numeric code for level of asset
asset_terrain - String value of the terrain of asset
Outputs are:
speed_min - Minimum assigned speed in km/hr
speed_max - Maximum assigned speed in km/hr
'''
asset_code = x.code
asset_level = x.level
asset_terrain= x.terrain
if (not asset_terrain) or ('flat' in asset_terrain.lower()):
if asset_code == 17: # This is an expressway
return 100,120
elif asset_code in (15,4): # This is a residential road or a mountain pass
return 40,60
elif asset_level == 0: # This is any other national network asset
return 80,100
elif asset_level == 1:# This is any other provincial network asset
return 60,80
elif asset_level == 2: # This is any other local network asset
return 40,60
else: # Anything else not included above
return 20,40
else:
if asset_level < 3:
return 40, 60
else:
return 20,40
def assign_minmax_time_costs_province_roads_apply(x,cost_dataframe):
'''
'''
asset_code = x.code
asset_level = x.level
asset_terrain= x.terrain
min_time_cost = 0
max_time_cost = 0
cost_list = list(cost_dataframe.itertuples(index=False))
for cost_param in cost_list:
if cost_param.code == asset_code:
min_time_cost = 1.0*cost_param.time_cost_usd*(x.length/x.max_speed)
max_time_cost = 1.0*cost_param.time_cost_usd*(x.length/x.min_speed)
break
elif cost_param.level == asset_level and cost_param.terrain == asset_terrain:
min_time_cost = 1.0*cost_param.time_cost_usd*(x.length/x.max_speed)
max_time_cost = 1.0*cost_param.time_cost_usd*(x.length/x.min_speed)
break
return min_time_cost, max_time_cost
def assign_minmax_tariff_costs_province_roads_apply(x,cost_dataframe):
'''
Assign travel speeds to roads assets in Vietnam
The speeds are assigned based on our understanding of:
1. The types of assets
2. The levels of classification of assets: 0-National,1-Provinical,2-Local,3-Other
3. The terrain where the assets are located: Flat or Mountain or No information
Inputs are:
asset_code - Numeric code for type of asset
asset_level - Numeric code for level of asset
asset_terrain - String value of the terrain of asset
Outputs are:
speed_min - Minimum assigned speed in km/hr
speed_max - Maximum assigned speed in km/hr
tariff_min_usd tariff_max_usd
'''
asset_code = x.code
asset_level = x.level
asset_terrain= x.terrain
min_tariff_cost = 0
max_tariff_cost = 0
cost_list = list(cost_dataframe.itertuples(index=False))
for cost_param in cost_list:
if cost_param.code == asset_code:
min_tariff_cost = 1.0*cost_param.tariff_min_usd*x.length
max_tariff_cost = 1.0*cost_param.tariff_max_usd*x.length
break
elif cost_param.level == asset_level and cost_param.terrain == asset_terrain:
min_tariff_cost = 1.0*cost_param.tariff_min_usd*x.length
max_tariff_cost = 1.0*cost_param.tariff_max_usd*x.length
break
return min_tariff_cost, max_tariff_cost
def province_shapefile_to_dataframe(edges_in,road_terrain,road_properties_file):
"""
input parameters:
edges_in : string of path to edges file/network file.
output:
SG: connected graph of the shapefile
"""
edges = gpd.read_file(edges_in)
edges.columns = map(str.lower, edges.columns)
# assgin asset terrain
edges['terrain'] = road_terrain
# assign road conditon
edges['road_cond'] = edges.apply(assign_province_road_conditions,axis=1)
# assign asset type
asset_type_list = [tuple(x) for x in pd.read_excel(road_properties_file,sheet_name ='provincial').values]
edges['asset_type'] = edges.code.apply(lambda x: assign_asset_type_to_province_roads_from_file(x,asset_type_list))
# get the right linelength
edges['length'] = edges.geometry.apply(line_length)
# correct the widths of the road assets
# get the width of edges
width_range_list = [tuple(x) for x in pd.read_excel(road_properties_file,sheet_name ='widths').values]
edges['width'] = edges.width.apply(lambda x: assign_assumed_width_to_province_roads_from_file(x,width_range_list))
# assign minimum and maximum speed to network
edges['speed'] = edges.apply(assign_minmax_travel_speeds_province_roads_apply,axis=1)
edges[['min_speed', 'max_speed']] = edges['speed'].apply(pd.Series)
edges.drop('speed',axis=1,inplace=True)
# assign minimum and maximum travel time to network
edges['min_time'] = edges['length']/edges['max_speed']
edges['max_time'] = edges['length']/edges['min_speed']
cost_values_df = pd.read_excel(road_properties_file,sheet_name ='costs')
# assign minimum and maximum cost of time in USD to the network
# the costs of time = (unit cost of time in USD/hr)*(travel time in hr)
edges['time_cost'] = edges.apply(lambda x: assign_minmax_time_costs_province_roads_apply(x,cost_values_df),axis = 1)
edges[['min_time_cost', 'max_time_cost']] = edges['time_cost'].apply(pd.Series)
edges.drop('time_cost',axis=1,inplace=True)
# assign minimum and maximum cost of tonnage in USD/ton to the network
# the costs of time = (unit cost of tariff in USD/ton-km)*(length in km)
edges['tariff_cost'] = edges.apply(lambda x: assign_minmax_tariff_costs_province_roads_apply(x,cost_values_df),axis = 1)
edges[['min_tariff_cost', 'max_tariff_cost']] = edges['tariff_cost'].apply(pd.Series)
edges.drop('tariff_cost',axis=1,inplace=True)
# make sure that From and To node are the first two columns of the dataframe
# to make sure the conversion from dataframe to igraph network goes smooth
edges = edges.reindex(list(edges.columns)[2:]+list(edges.columns)[:2],axis=1)
return edges
def province_shapefile_to_network(edges_in,road_terrain,road_properties_file):
# create network from edge file
edges = province_shapefile_to_dataframe(edges_in,road_terrain,road_properties_file)
G = ig.Graph.TupleList(edges.itertuples(index=False), edge_attrs=list(edges.columns)[2:])
# only keep connected network
return G.clusters().giant()
def assign_national_road_terrain(x):
terrain_type = x.dia_hinh__
if terrain_type is None:
return 'flat'
elif 'flat' in terrain_type.lower().strip(): # Assume flat for all roads with no terrain
return 'flat'
else: # Anything else not included above
return 'mountain'
def assign_national_road_conditions(x):
road_cond = x.loai_mat__
if road_cond is None:
return 'paved'
elif 'asphalt' in road_cond.lower().strip(): # Assume asphalt for all roads with no condition
return 'paved'
else: # Anything else not included above
return 'unpaved'
def assign_national_road_class(x):
road_class = x.capkth__ca
vehicle_numbers = x.vehicle_co
if road_class is None:
if vehicle_numbers >= 6000:
return 1
elif 3000 <= vehicle_numbers < 6000:
return 2
elif 1000 <= vehicle_numbers < 3000:
return 3
elif 300 <= vehicle_numbers < 1000:
return 4
elif 50 <= vehicle_numbers < 300:
return 5
else:
return 6
else:
if ',' in road_class:
road_class = road_class.split(',')
else:
road_class = [road_class]
class_1 = [rc for rc in road_class if rc == 'i']
class_2 = [rc for rc in road_class if rc == 'ii']
class_3 = [rc for rc in road_class if rc == 'iii']
class_4 = [rc for rc in road_class if rc == 'iv']
class_5 = [rc for rc in road_class if rc == 'v']
class_6 = [rc for rc in road_class if rc == 'vi']
if class_1:
return 1
elif class_2:
return 2
elif class_3:
return 3
elif class_4:
return 4
elif class_5:
return 5
elif class_6:
return 6
elif vehicle_numbers >= 6000:
return 1
elif 3000 <= vehicle_numbers < 6000:
return 2
elif 1000 <= vehicle_numbers < 3000:
return 3
elif 300 <= vehicle_numbers < 1000:
return 4
elif 50 <= vehicle_numbers < 300:
return 5
else:
return 6
def assign_assumed_width_to_national_roads_from_file(x,flat_width_range_list,mountain_width_range_list):
'''
Assign widths to roads assets in Vietnam
The widths are assigned based on our understanding of:
1. The class of the road which is not reliable
2. The number of lanes
3. The terrain of the road
Inputs are:
x - dataframe row
flat_width_range_list - List of tuples containing (from_width,to_width,assumed_width)
Outputs are:
assumed_width - assigned width of the raod asset based on design specifications
'''
road_class = x.road_class
road_lanes = x.lanenum__s
if road_lanes is None:
road_lanes = 0
else:
road_lanes = int(road_lanes)
road_terrain = x.terrain
assumed_width = 3.5
if road_terrain == 'flat':
for vals in flat_width_range_list:
if road_class == vals.road_class:
if road_lanes > 0 and road_lanes <= 8:
assumed_width = road_lanes*vals.lane_width + vals.median_strip + 2.0*vals.shoulder_width
else:
assumed_width = vals.road_width
break
else:
for vals in mountain_width_range_list:
if road_class == vals.road_class:
if road_lanes > 0 and road_lanes <= 8:
assumed_width = road_lanes*vals.lane_width + vals.median_strip + 2.0*vals.shoulder_width
else:
assumed_width = vals.road_width
break
return assumed_width
def assign_min_max_speeds_to_national_roads_from_file(x,flat_width_range_list,mountain_width_range_list):
'''
Assign speeds to national roads in Vietnam
The speeds are assigned based on our understanding of:
1. The class of the road
2. The estimated speed from the CVTS data
3. The terrain of the road
Inputs are:
x - dataframe row
flat_width_range_list - List of tuples containing flat road properties
mountain_width_range_list - List of tuples containing mountain road properties
Outputs are:
min and max speeds - assigned speeds of the road asset based on estimated speeds and design specifications
'''
road_class = x.road_class
road_terrain = x.terrain
est_speed = x.est_speed
min_speed = est_speed
max_speed = est_speed
if road_terrain == 'flat':
for vals in flat_width_range_list:
if road_class == vals.road_class:
if est_speed | |
# -*- coding: utf-8 -*-
from framework import BasePlayer, Choice, ChoiceDetails, utils, HintDetails
import functools
from copy import deepcopy
debug = False
class BaseTrustful:
def __init__(self):
self.card_hint_type = {}
self.hand_size = 5
def initialize_card_hint_history(self, round_info):
original_player_number = round_info.player_turn
player_number = original_player_number
just_began = True
self.hand_size = len(round_info.player_hand)
while just_began or player_number is not original_player_number:
just_began = False
self.card_hint_type[player_number] = []
for x in range(len(round_info.player_hand)):
self.card_hint_type[player_number].append(None)
player_number = utils.next_player_number(round_info, player_number)
def extrapolate_board_state(self, round_info, target_player):
player_number = utils.next_player_number(round_info, round_info.player_turn)
predicted_board_state = deepcopy(round_info.board_state)
while player_number is not target_player:
play = self.check_for_obvious_play(round_info, player_number)
if play is False:
play = self.check_for_hinted_play(round_info, player_number)
if play is not False:
player_hand = utils.get_player_hand_by_number(round_info, player_number)
suit = player_hand[play[1]].real_suit
rank = player_hand[play[1]].real_rank
self.info("{0} {1}".format(suit, rank))
if suit is None or rank is None or predicted_board_state[suit] is rank.value - 1:
predicted_board_state[suit] += 1
player_number = utils.next_player_number(round_info, player_number)
return predicted_board_state
def check_play_history(self, round_info):
original_player_number = round_info.player_turn
player_number = original_player_number
amount_of_players = len(self.card_hint_type)
current_board_state = deepcopy(round_info.board_state)
current_lives = round_info.lives
current_hints = round_info.hints
current_discarded = round_info.discarded
current_played = round_info.played
for i in range(-1, -amount_of_players - 1, -1):
if len(round_info.history) + i >= 0:
move = round_info.history[i]
if move[0] is Choice.PLAY:
if move[2].real_rank.value is current_board_state[move[2].real_suit]:
if current_board_state[move[2].real_suit] == 5:
current_hints = max(0, current_hints - 1)
current_board_state[move[2].real_suit] -= 1
current_played.pop()
else:
current_discarded.pop()
current_lives = min(utils.LIVES, current_lives + 1)
if move[0] is Choice.DISCARD:
current_discarded.pop()
current_hints = max(0, current_hints - 1)
if move[0] is Choice.HINT:
current_hints = min(current_hints + 1, utils.MAX_HINTS)
for i in range(-amount_of_players, 0):
if len(round_info.history) + i >= 0:
move = round_info.history[i]
if debug and round_info.log:
self.info("{0}, {1}, {2}, {3}, {4}".format(player_number, move[0], move[1], move[2], move[3]))
if move[0] is Choice.PLAY or move[0] is Choice.DISCARD:
if debug and round_info.log:
self.info("{0}".format(self.card_hint_type[player_number][move[1]]))
if move[0] is Choice.PLAY:
if move[2].real_rank.value is current_board_state[move[2].real_suit] + 1:
current_board_state[move[2].real_suit] += 1
if current_board_state[move[2].real_suit] == 5:
current_hints = min(current_hints + 1, utils.MAX_HINTS)
current_played.append(move[2])
else:
current_discarded.append(move[2])
current_lives = max(1, current_lives - 1)
if move[0] is Choice.DISCARD:
current_hints = min(utils.MAX_HINTS, current_hints + 1)
current_discarded.append(move[2])
self.card_hint_type[player_number][move[1]] = None
if move[3] == 0:
for x in range(move[1], self.hand_size - 1):
self.card_hint_type[player_number][x] = self.card_hint_type[player_number][x + 1]
if move[0] is Choice.HINT:
target_player = move[1]
hint = move[2]
hint_type = "Play"
current_round_info = deepcopy(round_info)
current_round_info.player_turn = player_number
current_round_info.board_state = deepcopy(current_board_state)
current_round_info.lives = current_lives
current_round_info.hints = current_hints
current_round_info.played = current_played
current_round_info.discarded = current_discarded
current_round_info.other_players_hands = []
hands = deepcopy(round_info.hands_history[i - 1])
for hand in hands:
if hand.player_number is player_number:
current_round_info.player_hand = hand
else:
current_round_info.other_players_hands.append(hand)
player_distance = target_player - player_number - 1
if player_distance < 0:
player_distance += round_info.number_of_players
if debug and round_info.log:
self.info("{0}".format(round_info.player_hand))
self.info("{0}, C: {1}, N: {2}".format(player_distance, current_round_info.player_hand,
current_round_info.other_players_hands[
0].current_knowledge()))
if target_player is original_player_number:
player_hand = round_info.player_hand
else:
player_hand = round_info.hands_history[i][target_player]
if player_distance == 0:
answer = self.check_for_obvious_play(current_round_info, target_player)
if answer is False:
answer = self.check_for_hinted_play(current_round_info, target_player)
if answer is not False:
if debug and round_info.log:
self.info("{0}".format(answer))
position = answer[1]
self.card_hint_type[target_player][position] = "Information"
if player_hand[position].revealed_rank is not None and \
player_hand[position].revealed_suit is not None:
hint_type = "Information"
else:
answer = self.check_for_guess_discard(current_round_info, target_player)
if debug and round_info.log:
self.info("{0}".format(answer))
position = answer[1]
if isinstance(hint, utils.Rank):
if player_hand[position].revealed_rank is not None and \
player_hand[position].revealed_rank is hint:
hint_type = "Information"
else:
if player_hand[position].revealed_suit is not None and \
player_hand[position].revealed_suit is hint:
hint_type = "Information"
if isinstance(hint, utils.Rank) and max(current_board_state.values()) < hint.value - 1:
hint_type = "Information"
for x in range(0, len(player_hand)):
if isinstance(hint, utils.Rank):
if player_hand[x].revealed_rank is not None and player_hand[x].revealed_rank is hint:
if self.card_hint_type[target_player][x] is None:
self.card_hint_type[target_player][x] = hint_type
elif player_hand[x].revealed_suit is not None:
self.card_hint_type[target_player][x] = None
else:
if player_hand[x].revealed_suit is not None and player_hand[x].revealed_suit is hint:
if self.card_hint_type[target_player][x] is None:
self.card_hint_type[target_player][x] = hint_type
elif player_hand[x].revealed_rank is not None:
self.card_hint_type[target_player][x] = None
current_hints = max(0, current_hints - 1)
if debug and round_info.log:
self.info("{0}".format(hint_type))
player_number = utils.next_player_number(round_info, player_number)
def check_for_obvious_play(self, round_info, player_number):
if player_number is round_info.player_turn:
player_hand = round_info.player_hand
else:
player_hand = utils.get_player_hand_by_number(round_info, player_number)
board_state_stars_align = round_info.board_state[utils.Suit.BLUE] + 1
prev = False
for suit in round_info.board_state:
if prev is not False and round_info.board_state[suit] is not prev:
board_state_stars_align = -1
prev = round_info.board_state[suit]
best_card = -1
best_card_rank = 6
for card in player_hand:
if ((card.revealed_rank is not None and card.revealed_suit is not None and
round_info.board_state[card.revealed_suit] + 1 is card.revealed_rank.value) or
(card.revealed_rank is not None and card.revealed_rank.value is board_state_stars_align)) and \
card.revealed_rank.value < best_card_rank:
best_card = card.hand_position
best_card_rank = card.revealed_rank.value
if best_card >= 0:
return ChoiceDetails(
Choice.PLAY,
best_card
)
return False
def check_for_hinted_play(self, round_info, player_number):
if player_number is round_info.player_turn:
player_hand = round_info.player_hand
else:
player_hand = utils.get_player_hand_by_number(round_info, player_number)
alignment_delta = 2
max_hint_size = 10
if round_info.lives == 1:
alignment_delta = 0
max_hint_size = 1
hinted_ranks = {}
hinted_suits = {}
for suit in utils.Suit:
hinted_suits[suit] = 0
for rank in utils.Rank:
hinted_ranks[rank] = 0
for x in range(0, len(player_hand)):
if player_hand[x].revealed_suit is not None and player_hand[x].revealed_rank is None and \
self.card_hint_type[player_number][x] == "Play":
hinted_suits[player_hand[x].revealed_suit] += 1
if player_hand[x].revealed_rank is not None and player_hand[x].revealed_suit is None and \
self.card_hint_type[player_number][x] == "Play":
hinted_ranks[player_hand[x].revealed_rank] += 1
known = utils.list_all_known_cards(round_info, player_number)[0]
remaining = utils.list_remaining_playable_cards(round_info)
discarded = utils.list_discarded_cards(round_info)
best_hint = -1
best_hint_size = max_hint_size
best_alignment = 0
hint_type = None
for suit in hinted_suits:
if 0 < hinted_suits[suit] <= best_hint_size:
rank = round_info.board_state[suit] + 1
if rank <= 5:
rank_rank = utils.Rank(rank)
if remaining[suit][rank_rank] - known[suit][rank_rank] + discarded[suit][rank_rank] > 0:
best_hint = suit
best_hint_size = hinted_suits[suit]
best_alignment = 1
hint_type = 'suit'
board_alignment = {}
for rank in utils.Rank:
board_alignment[rank] = 0
for suit in round_info.board_state:
rank = round_info.board_state[suit] + 1
if rank <= 5:
rank_rank = utils.Rank(rank)
if remaining[suit][rank_rank] - known[suit][rank_rank] + discarded[suit][rank_rank] > 0:
board_alignment[rank_rank] += 1
for rank in hinted_ranks:
if 0 < board_alignment[rank] and ((0 < hinted_ranks[rank] < best_hint_size) or
(0 < hinted_ranks[rank] <= best_hint_size and best_alignment <
board_alignment[rank])):
best_hint = rank
best_hint_size = hinted_ranks[rank]
best_alignment = board_alignment[rank]
hint_type = 'rank'
if best_hint != -1 and best_hint_size <= best_alignment + alignment_delta:
for x in range(0, len(player_hand)):
if hint_type == 'rank':
if player_hand[x].revealed_rank is not None and player_hand[x].revealed_suit is None and \
player_hand[x].revealed_rank is best_hint and \
self.card_hint_type[player_number][x] == "Play":
return ChoiceDetails(
Choice.PLAY,
x
)
else:
if player_hand[x].revealed_suit is not None and player_hand[x].revealed_rank is None and \
player_hand[x].revealed_suit is best_hint and \
self.card_hint_type[player_number][x] == "Play":
return ChoiceDetails(
Choice.PLAY,
x
)
return False
def check_card_usefulness(self, round_info, card):
remaining = utils.list_remaining_playable_cards(round_info)
useless = False
point_of_uselessness = {}
for suit in utils.Suit:
point_of_uselessness[suit] = None
for rank in utils.Rank:
if round_info.board_state[suit] < rank.value:
if point_of_uselessness[suit] is None and remaining[suit][rank] == 0:
point_of_uselessness[suit] = rank
if card.revealed_suit is not None:
if round_info.board_state[card.revealed_suit] == 5 or \
(point_of_uselessness[card.revealed_suit] is not None and
round_info.board_state[card.revealed_suit] + 1 is point_of_uselessness[card.revealed_suit].value):
useless = True
if card.revealed_rank is not None:
useless = True
for suit in utils.Suit:
if round_info.board_state[suit] < card.revealed_rank.value and \
(point_of_uselessness[suit] is None or
point_of_uselessness[suit].value > card.revealed_rank.value):
useless = False
if card.revealed_suit is not None and card.revealed_rank is not None:
if round_info.board_state[card.revealed_suit] < card.revealed_rank.value and \
(point_of_uselessness[card.revealed_suit] is None or
point_of_uselessness[card.revealed_suit].value > card.revealed_rank.value):
useless = False
else:
useless = True
if useless:
return ChoiceDetails(
Choice.DISCARD,
card.hand_position
)
return False
def check_for_obvious_discard(self, round_info, player_number):
if player_number is round_info.player_turn:
player_hand = round_info.player_hand
else:
player_hand = utils.get_player_hand_by_number(round_info, player_number)
for card in player_hand:
answer = self.check_card_usefulness(round_info, card)
if answer is not False:
return answer
return False
def check_for_guess_discard(self, round_info, player_number):
if player_number is round_info.player_turn:
player_hand = round_info.player_hand
else:
player_hand = utils.get_player_hand_by_number(round_info, player_number)
unmarked = []
for card in player_hand:
if card.revealed_rank is None and card.revealed_suit is None:
unmarked.append(card)
if len(unmarked) == 0:
known = utils.list_all_known_cards(round_info, player_number)[0]
remaining = utils.list_remaining_playable_cards(round_info)
discarded = utils.list_discarded_cards(round_info)
for card in player_hand:
if card.revealed_rank is None:
add = True
for rank in utils.Rank:
if round_info.board_state[card.revealed_suit] < rank.value and \
remaining[card.revealed_suit][rank] == 1 and \
known[card.revealed_suit][rank] - discarded[card.revealed_suit][rank] == 0:
add = False
if add:
unmarked.append(card)
elif card.revealed_suit is None:
add = True
for suit in remaining:
if round_info.board_state[suit] < card.revealed_rank.value and \
remaining[suit][card.revealed_rank] == 1 and \
known[suit][card.revealed_rank] - discarded[suit][card.revealed_rank] == 0:
add = False
if add:
unmarked.append(card)
if len(unmarked) == 0:
unmarked = | |
"""
Python Interchangeable Virtual Instrument Driver
Copyright (c) 2017 <NAME>
derived from agilent436a.py driver by:
Copyright (c) 2012-2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import counter
from .. import vxi11
#import time
# Parameter Values
ChanNameMap = {0 : 'A', 1 : 'B', 2 : 'C'}
Units = set(['Sec', 'Hz', 'Volts'])
Operator = set(['none', 'difference', 'sum', 'quotient'])
RangeType = set(['in_range', 'under_range', 'over_range'])
OperationState = set(['complete', 'in_progress', 'unknown'])
MeasurementFunction = set(['frequency',
'period',
'pulse_width',
'duty_cycle',
'edge_time',
'frequency_ratio',
'time_interval',
'totalize_continuous',
'totalize_gated',
'totalize_timed',
'invalid'])
MeasurementFunctionMap = {'frequency' : 'FN', # fn1, fn2, fn3 is a, b, and c channel
'period': 'FN4',
'time_interval' : 'FN5',
'time_interval_delay' : 'FN6',
'frequency_ratio' : 'FN7',
'total_stop' : 'FN8', # non standard
'total_start' : 'FN9', # non standard
'pulse_width' : 'FN10',
'edge_time' : 'FN11',
#'dc_voltage' : 'FN12',
#'trigger_voltage' : 'FN13',
#'peak_to_peak_voltage' : 'FN14',
#'totalize_timed' : 'x',
#'totalize_gated' : 'xx',
'invalid' : 'inv'}
ErrorMessages = { 0 : 'No error', # to accurately reflect error codes of device, divide by 10
10 : 'Parameter disallowed in present mode',
11 : 'Attenuators controlled by AUTO TRIG',
12 : '50-ohm B, AC B settings preset by COM A',
13 : 'Slope B set by Slope A in Rise/Fall mode',
14 : 'Parameter disallowed in High Speed mode',
15 : 'Calibration data unaccessible in present mode',
20 : 'Invalid key entry',
21 : 'Data outside valid range',
22 : 'Data exceeds maximum resolution',
23 : 'Mantissa digit buffer full',
24 : 'Decimal point previously entered',
30 : 'Multiple key closures',
40 : 'Mnemonic not recognizable',
41 : 'Numeric syntax error',
42 : 'Alpha character expected',
43 : 'Data exceeds valid range',
44 : 'Attention (ATN) asserted in Talk-Only mode',
50 : 'Store instrument setup operation failed', #50.X where x is the register number: 0-9
51 : 'Recall instrument setup operation failed', #51.X
52 : 'HP-IB address cannot be recalled at power up; address default to 03'}
class agilentBase5334(ivi.Driver, counter.Base):
"Agilent HP5334 Series IVI Universal Counter driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilentBase5334, self).__init__(*args, **kwargs)
self._identity_description = "Agilent HP5334 Universal Counter driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 1
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['HP5334A','HP5334B']
self._init_defaults()
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilentBase5334, self)._initialize(resource, id_query, reset, **keywargs)
# configure interface
if self._interface is not None:
self._interface.term_char = '\n'
# interface clear
if not self._driver_operation_simulate:
self._clear()
# verify instrument model matches
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
self._set_cache_valid(False, 'identity_instrument_manufacturer')
self._set_cache_valid(False, 'identity_instrument_model')
self._set_cache_valid(False, 'identity_instrument_firmware_revision')
idstr = "HP5334S"
if not self._driver_operation_simulate:
idstr = self._ask("ID")
if idstr.find('HP') == 0:
self._identity_instrument_manufacturer = 'Agilent'
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._identity_instrument_model = idstr
self._identity_instrument_firmware_revision = 'Cannot query from instrument'
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid('identity_instrument_manufacturer'):
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid('identity_instrument_model'):
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
try:
error_code = self._ask("TE")
error_code = float(error_code) * 10
if error_code < 50 or error_code > 52:
error_message = ErrorMessages[error_code]
elif error_code < 51:
regnum = int((error_code % 50) * 10.01)
error_code = int(error_code)
error_message = ErrorMessages[error_code]
elif error_code < 52:
regnum = int((error_code % 51) * 10.01)
error_code = int(error_code)
error_message = "Register " + str(regnum) + ' ' + ErrorMessages[error_code]
except vxi11.vxi11.Vxi11Exception as err:
error_message = err.msg
error_code = -1
except ValueError:
error_message = "bad error code: " + str(error_code)
error_code = -1
except KeyError:
error_message = "undefined error code: " + str(error_code)
error_code = -1
return (int(error_code), error_message)
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _utility_reset(self):
#if not self._driver_operation_simulate:
self._write("IN")
self._clear()
self.driver_operation.invalidate_all_attributes()
self._init_defaults()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
raise ivi.OperationNotSupportedException()
def _init_defaults(self):
self._measurement_function = 'frequency'
self.driver_operation.invalidate_all_attributes()
self._frequency_aperture = 0.3
self._period_aperture = 0.3
self._time_interval_resolution == 1e-9
def _init_channels(self):
try:
super(agilentBase5334, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_impedance = list()
self._channel_coupling = list()
self._channel_attenuation = list()
self._channel_level = list()
self._channel_hysteresis = list()
self._channel_slope = list()
self._channel_filter_enabled = list()
self._channel_count = 3
for i in range(self._channel_count):
self._channel_name.append(ChanNameMap[i])
self._channel_impedance.append(1e6)
self._channel_coupling.append('dc')
self._channel_attenuation.append(1)
self._channel_level.append(-50)
self._channel_hysteresis.append(0)
self._channel_slope.append('positive')
self._channel_filter_enabled.append(False)
self.channels._set_list(self._channel_name)
# Chan C not settable, override defaults
self._channel_impedance[2] = 50
self._channel_coupling[2] = 'ac'
def _get_channel_impedance(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_impedance[index]
def _set_channel_impedance(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
#if not self._driver_operation_simulate:
if value > 99:
self._write(ChanNameMap[index] + "Z0") # set to 1meg
self._channel_impedance[index] = 1e6
else:
self._write(ChanNameMap[index] + "Z1") # set to 50ohm
self._channel_impedance[index] = 50
def _get_channel_coupling(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_coupling[index]
def _set_channel_coupling(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value not in counter.Coupling:
raise ivi.ValueNotSupportedException()
if value == "ac":
self._write(ChanNameMap[index] + "A1") # ac
else:
self._write(ChanNameMap[index] + "A0") # dc
self._channel_coupling[index] = value
def _get_channel_attenuation(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_attenuation[index]
def _set_channel_attenuation(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
if value == 1:
self._write(ChanNameMap[index] + "X0") # x1
elif value == 10:
self._write(ChanNameMap[index] + "X1") # x10
else:
raise ivi.ValueNotSupportedException("attenuation must be '1' or '10'")
self._channel_attenuation[index] = value
def _get_channel_level(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_level[index]
def _set_channel_level(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
max_atten = 10
if value > 4.999 * max_atten:
# set instrument to manual trigger (front panel knobs)
self._write('AU0')
elif value < -4.999 * max_atten:
# set instrument to automatic trigger
self._write('AU1')
elif self._get_identity_instrument_model() == 'HP5334A':
# set A instrument trigger dac values
self._write(ChanNameMap[index] + "T" + value)
else:
# B instrument has no dac. ignore for now.
pass
self._channel_level[index] = value
def _get_channel_hysteresis(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_level[index]
def _set_channel_hysteresis(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_hysteresis[index] = value
def _get_channel_slope(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_slope[index]
def _set_channel_slope(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value not in counter.Slope:
raise ivi.ValueNotSupportedException()
if value == "positive":
self._write(ChanNameMap[index] + "S0") # positive
else:
self._write(ChanNameMap[index] + "S1") # negative
self._channel_slope[index] = value
def _get_channel_filter_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
if index != 0:
raise ivi.ValueNotSupportedException()
return self._channel_filter_enabled[index]
def _set_channel_filter_enabled(self, index, value):
if index != 0:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value == True:
self._write("FI1") # 100khz filter on (a channel only)
else:
self._write("FI0") # filter off.
self._channel_filter_enabled[index] = value
# totalize
def | |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 15:05:36 2018
@author: a001985
"""
import os
import shutil
import time
import json
import codecs
import pandas as pd
import logging
import importlib
# TODO: Move this!
#current_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
#current_path = os.path.dirname(os.path.realpath(__file__))
#print(current_path)
##sgdöljdf
#if current_path not in sys.path:
# sys.path.append(current_path)
import core
import core.exceptions as exceptions
"""
Module to handle all events linked to the Django application.
Maybe this should be in the root? Maybe not a class, only functions? Maybe __name__ == "__main__"?
MW: Started this to start logging functionality.
"""
class EventHandler(object):
def __init__(self,
user_id=None,
workspace_directory='',
resource_directory='',
log_directory='',
test_data_directory=''):
"""
Created 20180219 by <NAME>
Updated 20180616 by <NAME>
MW 20180530: Only one usr per event handler.
In terms of user_id this does not really matter at the moment.
user_id must be given in every call and the corresponding uuid_mapping
file is loaded in the method call if neaded.
"""
assert all([user_id, workspace_directory, resource_directory, log_directory]), 'Missing directory paths when creating EventHandler instance.'
self.user_id = user_id
self.workspace_directory = workspace_directory
self.resource_directory = resource_directory
self.log_directory = log_directory
self.test_data_directory = test_data_directory
self.log_id = 'event_handler'
self.include_status = ['editable', 'readable']
self.all_status = ['editable', 'readable', 'deleted', 'inactive']
# Add logger
core.add_log(log_id=self.log_id,
log_directory=self.log_directory,
log_level='DEBUG',
on_screen=True,
prefix='main')
# Test main logger
self._logger = core.get_log(self.log_id)
self._logger.debug('Start EventHandler: {}'.format(self.log_id))
# self._logger.debug('')
# self._logger.info('TEST info logger')
# self._logger.warning('TEST warning logger')
# self._logger.error('TEST error logger')
# self._logger.debug('TEST debug logger')
self.workspaces = {}
# Mapping objects
self.mapping_objects = {}
self.mapping_objects['water_body'] = core.WaterBody(file_path=os.path.join(self.resource_directory, 'mappings/water_body_match.txt'))
self.mapping_objects['quality_element'] = core.QualityElement(file_path=os.path.join(self.resource_directory, 'Quality_Elements.cfg'))
self.mapping_objects['hypsographs'] = core.Hypsograph(file_path=os.path.join(self.resource_directory, 'mappings/hypsographs.txt'))
self.mapping_objects['display_mapping'] = core.ParameterMapping()
self.mapping_objects['display_mapping'].load_mapping_settings(file_path=os.path.join(self.resource_directory, 'mappings/display_mapping.txt'))
self.mapping_objects['indicator_settings_homogeneous_parameters'] = core.IndSetHomPar(file_path=os.path.join(self.resource_directory, 'mappings/indicator_settings_homogeneous_parameters.txt'))
self.mapping_objects['indicator_settings_matching_columns'] = core.SimpleList(file_path=os.path.join(self.resource_directory, 'mappings/indicator_settings_matching_columns.txt'))
self.mapping_objects['indicator_settings_items_to_show_in_gui'] = core.SimpleList(file_path=os.path.join(self.resource_directory, 'mappings/indicator_settings_items_to_show_in_gui.txt'))
self.mapping_objects['indicator_settings_items_editable_in_gui'] = core.SimpleList(file_path=os.path.join(self.resource_directory, 'mappings/indicator_settings_items_editable_in_gui.txt'))
if self.test_data_directory:
self.load_test_requests()
#==========================================================================
def _change_ok(self, alias):
if alias in ['default_workspace', 'default_subset']:
self._logger.warning('Not allowed to make changes to "{}"!'.format(alias))
return False
return True
#==========================================================================
def _get_active_values_in_list_with_dicts(self, dict_list):
"""
Created 20180315 by <NAME>
Updated 20180315 by <NAME>
Checks a list containing dictionaries. For each dict in list, if active,
value is put in return list.
"""
return_list = []
for item in dict_list:
if item['active']:
return_list.append(item['value'])
return return_list
#==========================================================================
def _get_mapping_for_name_in_dict(self, name, list_of_dicts):
return_mapping = {}
if not list_of_dicts:
return return_mapping
for item in list_of_dicts:
return_mapping[item[name]] = item
return return_mapping
#==========================================================================
def _get_workspace_object(self, unique_id=None):
"""
Updated 20180530 by <NAME>
"""
# TODO: _get_workspace_object and self.get_workspace does the same thing.
# TODO: Maybe use self.get_workspace to handle status and check against user etc
return self.workspaces.get(unique_id, False)
#==========================================================================
def _get_uuid_mapping_object(self):
file_path = '{}/uuid_mapping_{}.txt'.format(self.workspace_directory, self.user_id)
if not os.path.exists(file_path):
# print('=file_path'.upper(), file_path)
shutil.copy('{}/templates/uuid_mapping.txt'.format(self.resource_directory),
file_path)
# print('-file_path'.upper(), file_path)
uuid_mapping_object = core.UUIDmapping(file_path, self.user_id)
return uuid_mapping_object
#==========================================================================
def apply_data_filter(self,
workspace_uuid=None,
subset_uuid=None,
step='step_1'):
"""
Updated 20180530 by <NAME>
"""
w = self._get_workspace_object(unique_id=workspace_uuid)
w.apply_data_filter(subset=subset_uuid,step=step)
#==========================================================================
def apply_indicator_data_filter(self,
workspace_uuid='',
subset_uuid='',
indicator='',
type_area='',
step='step_2'):
"""
Created 20180319 by <NAME>
Updated 20180530 by <NAME>
"""
w = self._get_workspace_object(unique_id=workspace_uuid)
all_ok = w.apply_indicator_data_filter(subset=subset_uuid,
indicator=indicator,
type_area=type_area,
step=step)
return all_ok
#==========================================================================
def change_workspace_alias(self, unique_id, new_alias):
"""
Updated 20180530 by <NAME>
"""
uuid_mapping = self._get_uuid_mapping_object()
if not unique_id:
return False
uuid_mapping.set_alias(unique_id, new_alias)
#==========================================================================
def copy_subset(self,
workspace_uuid=None,
subset_source_uuid=None,
subset_target_alias=None):
"""
Created 20180219 by <NAME>
Updated 20180601 by <NAME>
"""
workspace_object = self.workspaces.get(workspace_uuid, False)
if not workspace_object:
self._logger.warning('Workspace "{}" not loaded.'.format(subset_source_uuid))
return False
# print('!!!!!!!!!!!!', subset_source_alias)
# print('!!!!!!!!!!!!', subset_target_alias)
# print('subset_source_alias'.upper(), subset_source_alias)
self._logger.debug('Trying to copy subset "{}"'.format(subset_source_uuid))
try:
return_dict = workspace_object.copy_subset(subset_source_uuid, subset_target_alias)
except:
raise
# print('return_dict'.upper(), return_dict)
return return_dict
#==========================================================================
def copy_workspace(self, source_uuid=None, target_alias=None):
"""
Created 20180219 by <NAME>
Updated 20180530 by <NAME>
"""
if self.user_id == 'default':
self._logger.warning('Can not copy workspace as default user. ')
return
uuid_mapping = self._get_uuid_mapping_object()
# Add UUID for workspace in uuid_mapping
target_uuid = uuid_mapping.add_new_uuid_for_alias(target_alias)
if not target_uuid:
self._logger.debug('Could not add workspace with alias "{}". Workspace already exists!'.format(target_alias))
return False
# Copy all directories and files in workspace
source_workspace_path = '/'.join([self.workspace_directory, source_uuid])
target_workspace_path = '/'.join([self.workspace_directory, target_uuid])
# print('source_workspace_path:', source_workspace_path)
# print('target_workspace_path:', target_workspace_path)
self._logger.debug('Trying to copy workspace "{}". Copy has alias "{}"'.format(source_uuid, target_alias))
# Copy files
shutil.copytree(source_workspace_path, target_workspace_path)
"""
No data is loaded yet
Now we need to change uuid for subsets.
Do this by creating an UUID mapping object the subset and:
1: rename in mapping file
2: rename subset folder
"""
target_subset_uuid_mapping_file = '{}/subsets/uuid_mapping.txt'.format(target_workspace_path)
uuid_object = core.UUIDmapping(target_subset_uuid_mapping_file, self.user_id)
uuid_list = uuid_object.get_uuid_list_for_user()
for u_id in uuid_list:
new_uuid = uuid_object.set_new_uuid(u_id)
current_subset_path = '{}/subsets/{}'.format(target_workspace_path, u_id)
new_subset_path = '{}/subsets/{}'.format(target_workspace_path, new_uuid)
os.rename(current_subset_path, new_subset_path)
status = uuid_mapping.get_status(unique_id=target_uuid) # Check in case default is changed
return {'alias': target_alias,
'workspace_uuid': target_uuid,
'status': status}
#==========================================================================
def delete_subset(self, workspace_uuid=None, subset_alias=None, subset_uuid=None, permanently=False):
"""
Created 20180219 by <NAME>
Updated 20180530 by <NAME>
Deletes the given subset in the given workspace.
"""
if not self._change_ok(workspace_uuid):
return False
if not self._change_ok(subset_alias):
return False
if not workspace_uuid:
return False
workspace_object = self.workspaces.get(workspace_uuid, False)
if not workspace_object:
return False
return workspace_object.delete_subset(unique_id=subset_uuid, permanently=permanently)
#==========================================================================
def delete_workspace(self, unique_id=None, permanently=False):
"""
Created 20180219 by <NAME>
Updated 20180223 by <NAME>
Deletes the given workspace.
"""
uuid_mapping = self._get_uuid_mapping_object()
# print('USER_ID', user_id)
if unique_id not in uuid_mapping.get_uuid_list_for_user():
return False
alias = uuid_mapping.get_alias(unique_id)
# if unique_id not in self.workspaces.keys():
# self._logger.warning('Workspace "{}" with alias "{}" is not loaded!'.format(unique_id, alias))
# return False
if permanently:
path_to_remove = '/'.join([self.workspace_directory, unique_id])
if 'workspace' not in path_to_remove:
self._logger.error('Trying to delete workspace "{}" with alias "{}" but the path to delete is not secure!'.format(unique_id, alias))
return False
if os.path.exists(path_to_remove) is False:
self._logger.error('Trying to delete workspace "{}" with alias "{}" but cannot find workspace with this uuid!'.format(unique_id, alias))
return False
self._logger.warning('Permanently deleting workspace "{}" with alias "{}".'.format(unique_id, alias))
# Delete files and folders:
shutil.rmtree(path_to_remove)
# Remove objects and links
if unique_id in self.workspaces.keys():
self.workspaces.pop(unique_id)
# Remove in uuid_mapping
uuid_mapping.permanent_delete_uuid(unique_id)
else:
self._logger.warning('Removing workspace "{}" with alias "{}".'.format(unique_id, alias))
uuid_mapping.set_status(unique_id, 'deleted')
return True
#==========================================================================
def dict_data_source(self,
workspace_uuid=None,
file_name=None,
request={}):
"""
Created 20180524 by <NAME>
Updated
Return dict like:
{
"filename": "chlorophyll_integrated_2015_2016_row_format.txt",
"status": True,
"loaded": True,
"datatype": "chlorophyll"
}
"""
workspace_object = self._get_workspace_object(unique_id=workspace_uuid)
if not workspace_object:
return {}
datatype_settings_object = workspace_object.datatype_settings
if not datatype_settings_object.has_info:
return {}
if request:
datatype_settings_object.set_status(file_name=file_name, status=request['status'])
info_dict = request
info_dict = datatype_settings_object.get_info_for_file(file_name)
info_dict['loaded'] = bool(info_dict['loaded'])
info_dict['status'] = bool(info_dict['status'])
# print('-'*50)
# print(info_dict)
if info_dict['filename'] == 'physicalchemical_sharkweb_data_fyskem_wb_2007-2017_20180320.txt':
self.info_dict = info_dict
return info_dict
#==========================================================================
def dict_indicator(self,
workspace_uuid=None,
subset_uuid=None,
indicator=None,
available_indicators=None,
request={},
**kwargs):
"""
Created 20180222 by <NAME>
Updated 20180321 by <NAME>
dict like:
{
"label": "Biovolume - default",
"status": "selectable",
"selected": true,
"value": "Biovolume - default",
"settings": {}
}
Update 20180608 by MW: kwargs contains what to include. Currently options are:
inidcator_settings
Usage:
if kwargs.get('<include>'):
"INCLUDE"
"""
return_dict = {"label": "",
"status": "",
"selected": False,
"value": "",
"settings": []}
# return_dict = {"label": "",
# "status": "",
# "selected": False,
# "value": ""}
workspace_object = self._get_workspace_object(unique_id=workspace_uuid)
subset_object = workspace_object.get_subset_object(subset_uuid)
if not subset_object:
self._logger.warning('Could not find subset object {}. Subset is probably not loaded.'.format(subset_uuid))
return return_dict
if subset_uuid == 'default_subset':
available_indicators = []
else:
available_indicators = workspace_object.get_available_indicators(subset=subset_uuid, step='step_1')
# Check request
selected = True
if request and 'selected' in request.keys():
selected = request['selected']
# Check if indicator is available
if indicator in available_indicators:
status = "selectable"
else:
status = "not selectable"
selected = False
return_dict["label"] = self.mapping_objects['display_mapping'].get_mapping(indicator, 'internal_name', 'display_en')
return_dict["status"] = status
return_dict["selected"] = selected
return_dict["value"] = indicator
| |
ctx.channel, [url])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command(aliases=["setvoice"])
async def myvoice(self, ctx, voice: str = None):
"""
Changes your TTS voice.
Type `[p]listvoices` to view all possible voices.
If no voice is provided, it will show your current voice.
"""
current_voice = await self.config.user(ctx.author).voice()
if voice is None:
await ctx.send(f"Your current voice is **{current_voice}**")
return
voice = voice.title()
if voice in voices.keys():
await self.config.user(ctx.author).voice.set(voice)
await ctx.send(f"Your new TTS voice is: **{voice}**")
else:
await ctx.send(
f"Sorry, that's not a valid voice. You can view voices with the `{ctx.clean_prefix}listvoices` command."
)
@commands.command(aliases=["setspeed"])
async def myspeed(self, ctx, speed: int = None):
"""
Changes your TTS speed.
If no speed is provided, it will show your current speed.
The speed range is 0-10 (higher is faster, 5 is normal.)
"""
author_data = await self.config.user(ctx.author).all()
current_speed = author_data["speed"]
current_voice = author_data["voice"]
support_speed = voices[current_voice]["speed"]
if speed is None:
await ctx.send(f"Your current speed is **{current_speed}**")
return
if speed < 0:
await ctx.send("Your speed must be greater than or equal to 0.")
return
if speed > 10:
await ctx.send("Your speed must be less than or equal to 10.")
return
await self.config.user(ctx.author).speed.set(speed)
if support_speed:
await ctx.send(f"Your new speed is **{speed}**.")
else:
await ctx.send(
f"Your new speed is **{speed}**. "
"Keep in mind your current voice doesn't support speed changes, "
"so you won't see a difference until you change your voice to one that supports speed."
)
@commands.command()
async def listlangs(self, ctx):
"""
List all the valid language codes for TTS voices.
"""
langs = sorted(
set([voices[voice]["languageCode"] for voice in voices.keys()] + ["all"])
)
embed = discord.Embed(
title="Valid Language Codes",
color=await ctx.embed_color(),
description=", ".join(langs),
)
await ctx.send(embed=embed)
@commands.command()
async def listvoices(self, ctx, lang="en"):
"""
Lists all the TTS voices in the selected language.
If no language is provided, it will list sthe voices in English.
Use 'all' as the language code to view all voices.
"""
langs = set([voices[voice]["languageCode"] for voice in voices.keys()])
ALL_VOICES = False
if lang not in langs:
if lang == "all":
ALL_VOICES = True
else:
await ctx.send(
f"Sorry, that's not a valid language code. You can view all valid language codes with the `{ctx.clean_prefix}listlangs` command."
)
if ALL_VOICES:
voice_data = voices
else:
voice_data = {
voice: voices[voice]
for voice in voices.keys()
if voices[voice]["languageCode"] == lang
}
qs = {"low": [], "medium": [], "high": []}
for voice in voice_data:
embed = discord.Embed(color=await ctx.embed_color(), title=voice)
embed.description = (
"```yaml\n"
f"Gender: {voice_data[voice]['gender']}\n"
f"Language: {voice_data[voice]['languageName']}\n"
f"Quality: {voice_data[voice]['quality']}\n"
f"Supports Speed: {voice_data[voice]['speed']}\n"
f"Translates: {voice_data[voice]['translates']}\n"
f"Provider: {voice_data[voice]['provider']}"
"```"
)
q = voice_data[voice]["quality"].lower()
qs[q].append(embed)
pages = qs["high"] + qs["medium"] + qs["low"]
for index, embed in enumerate(pages):
if len(pages) > 1:
embed.set_footer(text=f"Voice {index + 1}/{len(pages)} | {lang} voices")
if DPY_MENUS:
await dpymenu(ctx, pages, timeout=60)
else:
if len(pages) == 1:
await ctx.send(embed=pages[0])
else:
await menu(ctx, pages, DEFAULT_CONTROLS, timeout=60)
@commands.group()
@commands.guild_only()
@commands.admin_or_permissions(manage_guild=True)
async def ttschannel(self, ctx):
"""
Configures automatic TTS channels.
"""
pass
@ttschannel.command()
async def add(self, ctx, channel: discord.TextChannel):
"""
Adds a channel for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if channel.id not in channel_list:
channel_list.append(channel.id)
await self.config.guild(ctx.guild).channels.set(channel_list)
self.channel_cache[ctx.guild.id] = channel_list
await ctx.send(
f"Okay, {channel.mention} will now be used as a TTS channel."
)
else:
await ctx.send(
f"{channel.mention} is already a TTS channel, did you mean use the `{ctx.clean_prefix}ttschannel remove` command?"
)
@ttschannel.command(aliases=["delete", "del"])
async def remove(self, ctx, channel: discord.TextChannel):
"""
Removes a channel for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if channel.id in channel_list:
channel_list.remove(channel.id)
await self.config.guild(ctx.guild).channels.set(channel_list)
self.channel_cache[ctx.guild.id] = channel_list
await ctx.send(f"Okay, {channel.mention} is no longer a TTS channel.")
else:
await ctx.send(
f"{channel.mention} isn't a TTS channel, did you mean use the `{ctx.clean_prefix}ttschannel add` command?"
)
@ttschannel.command()
async def clear(self, ctx):
"""
Removes all the channels for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if not channel_list:
await ctx.send("There's no channels in the config.")
else:
try:
await ctx.send(
"Are you sure you want to clear all this server's TTS channels? Respond with yes or no."
)
predictate = MessagePredicate.yes_or_no(ctx, user=ctx.author)
await ctx.bot.wait_for("message", check=predictate, timeout=30)
except asyncio.TimeoutError:
await ctx.send(
"You never responded, please use the command again to clear all of this server's TTS channels."
)
return
if predictate.result:
await self.config.guild(ctx.guild).channels.clear()
del self.channel_cache[ctx.guild.id]
await ctx.send("Okay, I've cleared all TTS channels for this server.")
else:
await ctx.send("Okay, I won't clear any TTS channels.")
@ttschannel.command()
async def list(self, ctx):
"""
Shows all the channels for automatic TTS.
"""
try:
channel_list = self.channel_cache[ctx.guild.id]
except KeyError:
channel_list = None
if not channel_list:
await ctx.send("This server doesn't have any TTS channels set up.")
else:
text = "".join(
"<#" + str(channel) + "> - " + str(channel) + "\n"
for channel in channel_list
)
pages = [p for p in pagify(text=text, delims="\n")]
embeds = []
for index, page in enumerate(pages):
embed = discord.Embed(
title="Automatic TTS Channels",
color=await ctx.embed_colour(),
description=page,
)
if len(embeds) > 1:
embed.set_footer(text=f"Page {index+1}/{len(pages)}")
embeds.append(embed)
if DPY_MENUS:
await dpymenu(ctx, embeds, timeout=60)
else:
if len(pages) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS, timeout=60)
@commands.Cog.listener()
async def on_message_without_command(self, message: discord.Message):
if not message.guild:
return
if message.author.bot:
return
if not message.channel.permissions_for(message.guild.me).send_messages:
return
if await self.bot.allowed_by_whitelist_blacklist(who=message.author) is False:
return
if await self.bot.cog_disabled_in_guild(self, message.guild):
return
try:
channel_list = self.channel_cache[message.guild.id]
except KeyError:
return
if not channel_list:
return
if message.channel.id not in channel_list:
return
if not message.author.voice or not message.author.voice.channel:
await message.channel.send("You are not connected to a voice channel.")
return
author_data = await self.config.user(message.author).all()
author_voice = author_data["voice"]
author_speed = author_data["speed"]
text = self.decancer_text(message.clean_content)
if text is None:
await message.channel.send("That's not a valid message, sorry.")
return
char_number = len(text)
if char_number > 1000:
await message.channel.send(
f"Sorry, I limit TTS to 1000 characters to avoid abuse. ({char_number}/1000)"
)
return
urls = generate_urls(author_voice, text, author_speed)
try:
await self.play_sfx(message.author.voice.channel, message.channel, urls)
except Exception:
await message.channel.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
async def play_sfx(self, vc, channel, link):
try:
player = lavalink.get_player(vc.guild.id)
except NoLavalinkNode: # Lavalink hasn't been initialised yet
if channel and type != "autotts":
await channel.send(
"Either the Audio cog is not loaded or lavalink has not been initialized yet. If this continues to happen, please contact the bot owner."
)
return
except KeyError:
player = await lavalink.connect(vc)
link = link[0] # could be rewritten to add ALL links
tracks = await player.load_tracks(query=link)
if not tracks.tracks:
await channel.send(
"Something went wrong. Either the SFX is invalid, or the TTS host is down."
)
return
track = tracks.tracks[0]
self.repeat_state[vc.guild.id] = player.repeat
player.repeat = False
if player.current is None and not player.queue:
player.queue.append(track)
self.current_sfx[vc.guild.id] = track
await player.play()
return
try:
csfx = self.current_sfx[vc.guild.id]
except KeyError:
csfx = None
if csfx is not None:
player.queue.insert(0, track)
await player.skip()
self.current_sfx[player.guild.id] = track
return
self.last_track_info[player.guild.id] = (player.current, player.position)
self.current_sfx[player.guild.id] = track
player.queue.insert(0, track)
player.queue.insert(1, player.current)
await player.skip()
async def queue_sfx(self, vc, channel, link):
try:
player = lavalink.get_player(vc.guild.id)
except NoLavalinkNode: # Lavalink hasn't been initialised yet
if channel and type != "autotts":
await channel.send(
"Either the Audio cog is not loaded or lavalink has not been initialized yet. If this continues to happen, please contact the bot owner."
)
return
except KeyError:
player = await lavalink.connect(vc)
link = link[0] # could be rewritten to add ALL links
tracks = await player.load_tracks(query=link)
if not tracks.tracks:
await channel.send(
"Something went wrong. Either the SFX is invalid, or the TTS host is down."
)
return
track = tracks.tracks[0]
if player.current is None and not player.queue:
player.queue.append(track)
self.current_sfx[vc.guild.id] = track
await player.play()
return
player.queue.append(track)
return
async def ll_check(self, player, event, reason):
try:
csfx = self.current_sfx[player.guild.id]
except KeyError:
csfx = None
try:
lti = self.last_track_info[player.guild.id]
except KeyError:
lti = None
if csfx is None and lti is None:
return
if (
event == lavalink.LavalinkEvents.TRACK_EXCEPTION
and csfx is not None
or event == lavalink.LavalinkEvents.TRACK_STUCK
and csfx is not None
):
del self.current_sfx[player.guild.id]
return
if (
event == | |
<filename>jekyll.py
# -*- coding: utf-8 -*-
import imghdr
import io
import os
import re
import sublime
import sublime_plugin
import sys
import traceback
import uuid
import shutil
from datetime import datetime
from functools import wraps
try:
import simple_json as json
except ImportError:
import json
## ********************************************************************************************** ##
# BEGIN STATIC VARIABLES
## ********************************************************************************************** ##
ST3 = sublime.version() >= '3000'
DEBUG = False
VALID_MARKDOWN_EXT = ('markdown', 'mdown', 'mkdn', 'mkd', 'md', )
VALID_HTML_EXT = ('html', 'htm', )
VALID_TEXTILE_EXT = ('textile', )
VALID_YAML_EXT = ('yaml', 'yml', )
VALID_PLAIN_TEXT_EXT = ('txt', )
POST_DATE_FORMAT = '%Y-%m-%d'
settings = sublime.load_settings('Jekyll.sublime-settings')
if settings.has('jekyll_debug') and settings.get('jekyll_debug') is True:
DEBUG = True
if ST3:
from .send2trash import send2trash
else:
from send2trash import send2trash
## ********************************************************************************************** ##
# BEGIN GLOBAL METHODS
## ********************************************************************************************** ##
def plugin_loaded():
if DEBUG:
UTC_TIME = datetime.utcnow()
PYTHON = sys.version_info[:3]
VERSION = sublime.version()
PLATFORM = sublime.platform()
ARCH = sublime.arch()
PACKAGE = sublime.packages_path()
INSTALL = sublime.installed_packages_path()
message = (
'Jekyll debugging mode enabled...\n'
'\tUTC Time: {time}\n'
'\tSystem Python: {python}\n'
'\tSystem Platform: {plat}\n'
'\tSystem Architecture: {arch}\n'
'\tSublime Version: {ver}\n'
'\tSublime Packages Path: {package}\n'
'\tSublime Installed Packages Path: {install}\n'
).format(time=UTC_TIME, python=PYTHON, plat=PLATFORM, arch=ARCH,
ver=VERSION, package=PACKAGE, install=INSTALL)
sublime.status_message('Jekyll: Debugging enabled...')
debug('Plugin successfully loaded.', prefix='\n\nJekyll', level='info')
debug(message, prefix='Jekyll', level='info')
def plugin_unloaded():
if DEBUG:
debug('Plugin successfully unloaded.\n\n', prefix='Jekyll', level='info')
def debug(message, prefix='Jekyll', level='debug'):
"""Console print utility method.
Prints a formatted console entry to the Sublime Text console
if debugging is enabled in the User settings file.
Args:
message (string): A message to print to the console
prefix (string): An optional prefix
level (string): One of debug, info, warning, error [Default: debug]
Return:
string: Issue a standard console print command.
"""
if DEBUG:
print('{prefix}: [{level}] {message}'.format(
message=message,
prefix=prefix,
level=level
))
def catch_errors(fn):
"""Generic function decorator for catching exceptions.
Use this to primarily catch and alert the user to path issues
which are needed for nearly every command.
Args:
fn (func): A function
Returns:
bool: Description of return value
"""
@wraps(fn)
def _fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except MissingPathException as e:
debug('Unable to resolve path information! - {error}'.format(error=e),
prefix='Jekyll', level='error')
text = (
'Jekyll: Unable to resolve path information!\n\n{error}\n\n'
'Please double-check that you have defined absolute '
'paths to your Jekyll directories, or that you have '
'enabled the `jekyll_auto_find_paths` setting.\n\n'
'If you have set your path settings correctly, please '
'copy the console output and create a new issue.\n'
)
sublime.error_message(text.format(error=e))
except Exception as e:
debug('Unexpected error: {error}.\n\t\t¯\_(ツ)_/¯\n'.format(error=e),
prefix='Jekyll', level='error')
return _fn
def get_setting(view, key, default=None):
"""Returns a specific setting key value or default.
Get a Sublime Text setting value, starting in the project-specific
settings file, then the user-specific settings file, and finally
the package-specific settings file. Also accepts an optional default.
Args:
view (obj): A Sublime view object
key (string): A settings dictionary key
Returns:
bool: Description of return value
"""
try:
debug('Getting key "{key}" from settings.'.format(key=key))
settings = view.settings()
if settings.has('Jekyll'):
s = settings.get('Jekyll').get(key, default)
if s and s is not None:
return s
else:
pass
else:
pass
except:
pass
global_settings = sublime.load_settings('Jekyll.sublime-settings')
return global_settings.get(key, default)
def find_dir_path(window, dir_name):
"""Find a named directory in a given Sublime window.
Searches the folder tree of the current window for
a named path and returns any potential matches.
Args:
window (obj): A Sublime window object
dir_name (string): A directory name
Returns:
array: An array of potential path string matches
"""
all_dirs = []
debug('Searching for directory "{name}" in folder tree.'.format(name=dir_name))
for folder in window.folders():
for root, dirs, files in os.walk(folder):
dirs[:] = [d for d in dirs if not d[0] == '.']
if all(x in dirs for x in [dir_name]):
all_dirs.append(os.path.abspath(os.path.join(root, dir_name)))
debug('Found these sub-folder(s) in the sidebar: {0}'.format(all_dirs))
return all_dirs
def clean_title_input(title, draft=False):
"""Convert a string into a valide Jekyll filename.
Remove non-word characters, replace spaces and underscores with dashes,
and add a date stamp if the file is marked as a Post, not a Draft.
Args:
title (string): A string based title
draft (bool): A boolean indicating that the file is a draft
Returns:
string: a cleaned title for saving a new Jekyll post file
"""
title_clean = title.lower()
title_clean = re.sub(r'[^\w -]', '', title_clean)
title_clean = re.sub(r' |_', '-', title_clean)
today = datetime.today()
title_date = today.strftime('%Y-%m-%d')
return title_date + '-' + title_clean if not draft else title_clean
def create_file(path):
"""Create a new file using the directory path of the filename.
Args:
path (string): A full path string for the new file
Returns:
none
"""
filename = os.path.split(path)[1]
if filename and filename != '':
io.open(path, 'a', encoding="utf-8").close()
## ********************************************************************************************** ##
# BEGIN BASE CLASSES
## ********************************************************************************************** ##
class MissingPathException(Exception):
def __init__(self, message, *args, **kwargs):
# Call the base class constructor with the parameters it needs
super(MissingPathException, self).__init__(message, *args, **kwargs)
class JekyllWindowBase(sublime_plugin.WindowCommand):
"""Abstract base class for Jekyll window commands.
"""
markup = None
def posts_path_string(self):
p = get_setting(self.window.active_view(), 'jekyll_posts_path')
return self.determine_path(p, '_posts')
def drafts_path_string(self):
p = get_setting(self.window.active_view(), 'jekyll_drafts_path')
return self.determine_path(p, '_drafts')
def uploads_path_string(self):
p = get_setting(self.window.active_view(), 'jekyll_uploads_path')
return self.determine_path(p, 'uploads')
def templates_path_string(self):
templates_dir_name = 'Jekyll Templates'
templates_path = os.path.join(sublime.packages_path(), 'User', templates_dir_name)
if not os.path.exists(templates_path):
os.makedirs(templates_path)
sublime.status_message('Jekyll: Created "{}" directory."'.format(templates_dir_name))
# TODO: specify where every template is saved, which slows down workflow?
p = get_setting(self.window.active_view(), 'jekyll_templates_path')
return self.determine_path(p if p != '' else templates_path, '_templates')
@catch_errors
def determine_path(self, path, dir_name=None):
"""Determine a directory path.
Args:
path (string): A string directory path
dir_name (string): A string directory name
Returns:
string: a cleaned title for saving a new Jekyll post file
"""
if not self.window.views():
view = self.window.new_file()
else:
view = self.window.active_view()
auto = get_setting(view, 'jekyll_auto_find_paths', False)
if auto and dir_name:
self.dirs = find_dir_path(self.window, dir_name)
if not self.dirs:
if not path or path == '' or not os.path.exists(path):
debug('Unable to find for "{dir}" directory.'.format(
dir=dir_name), prefix='Jekyll', level='error')
raise MissingPathException('Unable to find for "{dir}" directory.'.format(
dir=dir_name))
return path
elif self.dirs and len(self.dirs) > 1:
# more than one directory was found
# so choose which one to use
def callback(self, index):
if index > -1 and type(self.dirs[index]) is list:
return self.dirs[index]
else:
self.dirs = []
return None
self.window.show_quick_panel(self.dirs, callback)
elif self.dirs and len(self.dirs) == 1:
# only one directory was found, so use it
return self.dirs[0]
else:
if not path or path == '':
debug('Path is null for "{dir}" directory.'.format(
dir=dir_name), prefix='Jekyll', level='error')
raise MissingPathException('Path is null for "{dir}" directory.'.format(
dir=dir_name))
elif not os.path.exists(path):
debug('Path "{path}" does not exist for "{dir}" directory.'.format(
path=path, dir=dir_name), prefix='Jekyll', level='error')
raise MissingPathException('Path "{path}" does not exist for "{dir}" directory.'.format(
path=path, dir=dir_name))
return path
def create_post_frontmatter(self, title, comment=None):
"""Create post frontmatter content.
Args:
title (str): A post title
comment (str): An optional comment block
Returns:
string: A Sublime snippet string
"""
if not comment or comment == '':
comment = ''
else:
comment = '# {0}\n'.format(comment)
frontmatter = (
'{comment}---\n'
'title: {title}\n'
).format(comment=str(comment), title=str(title))
frontmatter += (
'layout: ${1:post}\n'
'---\n$0'
)
return frontmatter
@catch_errors
def title_input(self, title, path=None):
"""Sanitize a file title, save and open
Args:
title (string): A post title
path (string): A path string
Returns:
None
"""
post_dir = self.path_string() if path is None else path
self.markup = get_setting(self.window.active_view(), 'jekyll_default_markup', 'Markdown')
self.extension = get_setting(self.window.active_view(), 'jekyll_markdown_extension', 'markdown')
self.extension = '.' + self.extension if self.extension in VALID_MARKDOWN_EXT else '.markdown'
if self.markup == 'Textile':
file_ext = '.textile'
elif self.markup == 'HTML':
file_ext = '.html'
elif self.markup == 'Plain text':
file_ext = '.txt'
else:
file_ext = self.extension
clean_title = clean_title_input(title, self.IS_DRAFT) + file_ext
full_path = os.path.join(post_dir, clean_title)
if os.path.lexists(full_path):
sublime.error_message('Jekyll: File already exists at "{0}"'.format(full_path))
return
else:
frontmatter = self.create_post_frontmatter(title)
self.create_and_open_file(full_path, frontmatter)
def list_files(self, path, filter_ext=True):
"""Create an array of string arrays for files
Args:
path (string): A directory path of files
filter_ext (bool): Filters files by type
Returns:
None
"""
self.item_list = []
if os.path.exists(path) and os.path.isdir(path):
for root, dirs, files in os.walk(path):
for f in files:
if filter_ext and not self.get_markup(f):
continue
fname = os.path.splitext(f)[0]
fpath = os.path.join(root, f)
self.item_list.append([fname, fpath])
self.item_list.sort(key=lambda x: os.path.getmtime(x[1]), reverse=True)
else:
self.item_list.append(['Directory does not exist!'])
if not len(self.item_list) > 0:
self.item_list.append(['No items found!'])
def on_highlight(self, index):
self.window.open_file(self.item_list[index][1], sublime.TRANSIENT)
def get_markup(self, file):
if file.endswith(VALID_MARKDOWN_EXT):
self.markup = 'Markdown'
elif file.endswith(VALID_HTML_EXT):
self.markup = 'HTML'
elif file.endswith(VALID_TEXTILE_EXT):
self.markup = 'Textile'
elif file.endswith(VALID_PLAIN_TEXT_EXT):
self.markup = 'Plain text'
elif | |
#!/usr/bin/env python
# -*-coding:utf-8 -*-
import numpy as np
import numpy_financial as npf
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
class Mortgage:
"""
A class to represent a mortgage loan for purchasing a house. This is a base class that builds an amortization table for an n-year fixed interest rate loan.
The base class does not include extra principal payments.
Attributes
-----------
__purchase_price (int): The full purchase price amount. Example: 200000 for a $200,000 purchase price.
__down_payment_percent (float): Percent of purchase price paid as down payment. Example: 0.2 for a 20% down payment.
__interest_rate (float): The loan interest rate. Example: 0.04125 for a 4.125% rate.
__start_date (str): The starting date the loan begins, represented as '%Y-%m-%d'. Example: '2000-5-1' for a May 1, 2000 start date.
__years (int): The length of the mortgage loan in years. Example: 30 for a 30 year loan.
__num_yearly_payments (int, optional): Th[summary]e numprivate method to update from setterber of installment payments in a year. Typically, mortgages are paid monthly. Defaults to 12.
__down_payment (int): The down payment paid at the start of the loan
__loan_amount (int): The financed portion of the mortgage. Equals purchase_price - down_payment.
__payment (float): The monthly principal + interest payment
__payment_range (DatetimeIndex): Datetime index of payment periods from loan start date to loan payoff date.
"""
def __init__(self, purchase_price, down_payment_percent, interest_rate, start_date, years, num_yearly_payments=12):
"""Initializes base mortgage class instance using input arguments
Args:
purchase_price (int): The full purchase price amount. Example: 200000 for a $200,000 purchase price.
down_payment_percent (float): Percent of purchase price paid as down payment. Example: 0.2 for a 20% down payment.
interest_rate (float): The loan interest rate. Example: 0.04125 for a 4.125% rate.
start_date (str): The starting date the loan begins, represented as '%Y-%m-%d'. Example: '2000-5-1' for a May 1, 2000 start date.
years (int): The length of the mortgage loan in years. Example: 30 for a 30 year loan.
num_yearly_payments (int, optional): The number of installment payments in a year. Typically, mortgages are paid monthly. Defaults to 12.
"""
# initialize instance variables
self.__purchase_price = purchase_price
self.__down_payment_percent = down_payment_percent
self.__interest_rate = interest_rate
self.__start_date = start_date
self.__years = years
self.__num_yearly_pmts = num_yearly_payments
@property
def __down_payment(self):
return self.get_purchase_price() * self.get_down_payment_percent()
@property
def __loan_amount(self):
return self.get_purchase_price() - self.get_down_payment()
@property
def __payment(self):
return round(-1 * npf.pmt(self.__interest_rate/self.__num_yearly_pmts, self.__years*self.__num_yearly_pmts, self.__loan_amount), 2)
def get_payment(self):
"""returns monthly principal + interest payment
Returns:
float: monthly payment
"""
return self.__payment
def get_purchase_price(self):
"""returns purchase price (loan amount + down payment)
Returns:
int: the purchase price
"""
return self.__purchase_price
def set_purchase_price(self, purchase_price):
"""method to set/change purchase price
Changing the purchase price will recalculate the down payment, loan amount, and monthly payment attributes.
Args:
purchase_price (int): purchase price
"""
self.__purchase_price = purchase_price
def get_down_payment_percent(self):
"""returns down payment percent
Returns:
float: down payment percent
"""
return self.__down_payment_percent
def set_down_payment_percent(self, down_payment_percent):
"""Set/change the down payment percent
Changing the down payment percent will recalculate the down payment, the loan amount, and the monthly payment attributes.
Args:
down_payment_percent (float): the down payment percent, as a float. Example: 0.0475 for a 4.75% rate.
"""
self.__down_payment_percent = down_payment_percent
def get_down_payment(self):
"""Returns down payment (purchase price * down payment percent)
Returns:
int: The down payment
"""
return self.__down_payment
def get_interest_rate(self):
"""Returns the interest rate
Returns:
float: interest rate as a float (0.04125 for 4.125%)
"""
return self.__interest_rate
def set_interest_rate(self, interest_rate):
"""Set/change the interest rate
Changing the interest rate will recalculate the monthly payment attribute
Args:
interest_rate (float): interest rate, expressed as a float (0.04125 for 4.125%)
"""
self.__interest_rate = interest_rate
def get_years(self):
"""returns the number of years over which the mortgage is amortized
Returns:
int: number of years
"""
return self.__years
def set_years(self, years):
"""Set/change the number of years
Changing the number of years will recalculate the monthly payment attribute
Args:
years ([type]): [description]
"""
self.__years = years
def get_num_yearly_pmts(self):
"""Returns the number of yearly payments
Returns:
int: number of yearly payments
"""
return self.__num_yearly_pmts
def set_num_yearly_pmts(self, num_yearly_pmts):
"""Set/change the number of yearly payments
Changing the number of yearly payments will recalculate the monthly payment attribute
Args:
num_yearly_pmts ([type]): [description]
"""
self.__num_yearly_pmts = num_yearly_pmts
def get_start_date(self):
"""Returns the mortgage start date
Returns:
datetime.date: mortgage start date
"""
return self.__start_date
def set_start_date(self, start_date):
"""Set/change the mortgage start date
The new start date attribute value is confirmed with a print statement returning the new start date.
Args:
start_date (str): The mortgage start date as a tuple '%Y-%m-%d'. ex: '2000-5-1' is May 1, 2000.
"""
self.__start_date = start_date
def get_loan_amount(self):
"""Return the loan amount (loan amount = purchase price - down payment)
Returns:
int: the loan amount (purchase price - down payment)
"""
return self.__loan_amount
@property
def __payment_range(self):
payment_range = pd.date_range(self.__start_date, periods=self.__years*self.__num_yearly_pmts, freq='MS')
payment_range.name = 'Payment Date'
return payment_range
def get_payment_range(self):
"""Returns a DatetimeIndex of payment dates from loan start to finish.
Returns:
DatetimeIndex: Index of payment dates in Datetime format
"""
return self.__payment_range
def get_number_of_payments(self):
"""Returns the number of payment periods. This is equal to the number of years times the number of yearly payments.
Example: A 30 year loan with monthly payments will have 360 payments.
Returns:
int: the number of payment periods
"""
return self.__years * self.__num_yearly_pmts
@property
def __amortization_table(self):
"""Returns amortization table containing the loan payment schedule. The columns include:
1) Payment Date
2) Monthly payment
3) Principal Paid at each payment
4) Interest Paid at each payment
5) Beginning Balance at each payment
6) Ending Balance at each payment
7) Cumulative Principal Paid at each payment
8) Cumulative Interest Paid at each payment
Returns:
pandas.DataFrame: DataFrame containing the mortgage amortization table.
"""
atable = pd.DataFrame(
index=self.get_payment_range(),
columns=['Payment', 'Principal Paid', 'Interest Paid', 'Beginning Balance', 'Ending Balance'],
dtype=float
)
atable.reset_index(inplace=True)
atable.index += 1
atable.index.name = 'Period'
atable['Payment'] = self.get_payment()
atable['Principal Paid'] = -1 * npf.ppmt(self.__interest_rate/self.__num_yearly_pmts, atable.index, self.__years*self.__num_yearly_pmts, self.__loan_amount)
atable['Interest Paid'] = -1 * npf.ipmt(self.__interest_rate/self.__num_yearly_pmts, atable.index, self.__years*self.__num_yearly_pmts, self.__loan_amount)
atable.loc[1, 'Beginning Balance'] = self.__loan_amount
atable.loc[1, 'Ending Balance'] = atable.loc[1, 'Beginning Balance'] - atable.loc[1, 'Principal Paid']
for i in range(2, self.__years*self.__num_yearly_pmts + 1):
atable.loc[i, 'Ending Balance'] = atable.loc[i - 1, 'Ending Balance'] - atable.loc[i, 'Principal Paid']
atable.loc[i, 'Beginning Balance'] = atable.loc[i - 1, 'Ending Balance']
atable.loc[atable.index.max(), 'Ending Balance'] = abs(atable.loc[atable.index.max(), 'Ending Balance'])
atable['Cumulative Principal Paid'] = atable['Principal Paid'].cumsum()
atable['Cumulative Interest Paid'] = atable['Interest Paid'].cumsum()
return atable.round(2)
def get_amortization_table(self):
return self.__amortization_table
@property
def __payoff_date(self):
return self.get_amortization_table().iloc[-1, 0].strftime('%m-%d-%Y')
def get_payoff_date(self):
"""Returns mortgage payoff date. This is the date of the final loan payment.
Returns:
Datetime.date: Date of final payment
"""
return self.__payoff_date
def get_total_principal_paid(self):
"""Returns the total principal paid. This value should be equal to the loan amount.
Returns:
float: Total principal paid
"""
atable = self.get_amortization_table()
return round(atable['Cumulative Principal Paid'].iloc[-1], 2)
def get_total_interest_paid(self):
"""Returns the total interest paid over the life of the loan.
Returns:
float: Total interest paid
"""
atable = self.get_amortization_table()
return round(atable['Cumulative Interest Paid'].iloc[-1], 2)
def get_total_cost_of_loan(self):
"""Returns the total paid over the life of the loan and is equal to the total prinicipal plus the total interest.
Returns:
float: The total paid over the life of the loand
"""
return round(self.get_total_principal_paid() + self.get_total_interest_paid(), 2)
def summary_plots(self, figsize=(20,20)):
"""Returns 2x2 figure of summary plots
Args:
figsize (tuple, optional): Figure size (Width, Length). Defaults to (20,20).
"""
atable = self.get_amortization_table()
fig, axes = plt.subplots(nrows=2, ncols=2, sharex='col', figsize=figsize)
# axes[0, 0]: cumulative plot
ax = axes[0, 0]
atable.plot('Payment Date', 'Cumulative Principal Paid', ax=ax)
atable.plot('Payment Date', 'Cumulative Interest Paid', ax=ax)
atable.plot('Payment Date', 'Ending Balance', ax=ax)
ax.scatter(
atable[atable['Cumulative Principal Paid'] > atable['Cumulative Interest Paid']].iloc[0, 0],
atable[atable['Cumulative Principal Paid'] > atable['Cumulative Interest Paid']].iloc[0, -2],
label=f"Cross-Over: \
{atable[atable['Cumulative Principal Paid'] > atable['Cumulative Interest Paid']].iloc[0,0]: %Y-%m-%d}",
color='black'
)
ytick = mtick.StrMethodFormatter('${x:,.0f}')
ax.yaxis.set_major_formatter(ytick)
ax.set_ylabel('Dollars')
ax.set_ylim(0, | |
<filename>ansible/modules/network/cloudengine/ce_mtu.py<gh_stars>1-10
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_mtu
version_added: "2.4"
short_description: Manages MTU settings on HUAWEI CloudEngine switches.
description:
- Manages MTU settings on HUAWEI CloudEngine switches.
author: QijunPan (@CloudEngine-Ansible)
notes:
- Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd.
- C(state=absent) unconfigures a given MTU if that value is currently present.
options:
interface:
description:
- Full name of interface, i.e. 40GE1/0/22.
required: false
default: null
mtu:
description:
- MTU for a specific interface.
The value is an integer ranging from 46 to 9600, in bytes.
required: false
default: null
jumbo_max:
description:
- Maximum frame size. The default value is 9216.
The value is an integer and expressed in bytes. The value range is 1536 to 12224 for the CE12800
and 1536 to 12288 for ToR switches.
required: false
default: null
jumbo_min:
description:
- Non-jumbo frame size threshod. The default value is 1518.
The value is an integer that ranges from 1518 to jumbo_max, in bytes.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Mtu test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config jumboframe on 40GE1/0/22"
ce_mtu:
interface: 40GE1/0/22
jumbo_max: 9000
jumbo_min: 8000
provider: "{{ cli }}"
- name: "Config mtu on 40GE1/0/22 (routed interface)"
ce_mtu:
interface: 40GE1/0/22
mtu: 1600
provider: "{{ cli }}"
- name: "Config mtu on 40GE1/0/23 (switched interface)"
ce_mtu:
interface: 40GE1/0/22
mtu: 9216
provider: "{{ cli }}"
- name: "Config mtu and jumboframe on 40GE1/0/22 (routed interface)"
ce_mtu:
interface: 40GE1/0/22
mtu: 1601
jumbo_max: 9001
jumbo_min: 8001
provider: "{{ cli }}"
- name: "Unconfigure mtu and jumboframe on a given interface"
ce_mtu:
state: absent
interface: 40GE1/0/22
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mtu": "1700", "jumbo_max": "9000", jumbo_min: "8000"}
existing:
description: k/v pairs of existing mtu/sysmtu on the interface/system
returned: always
type: dict
sample: {"mtu": "1600", "jumbo_max": "9216", "jumbo_min": "1518"}
end_state:
description: k/v pairs of mtu/sysmtu values after module execution
returned: always
type: dict
sample: {"mtu": "1700", "jumbo_max": "9000", jumbo_min: "8000"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface 40GE1/0/23", "mtu 1700", "jumboframe enable 9000 8000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, get_config, load_config, get_nc_config, set_nc_config
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
<ifMtu></ifMtu>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_XML_MERGE_INTF_MTU = """
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface operation="merge">
<ifName>%s</ifName>
<ifMtu>%s</ifMtu>
</interface>
</interfaces>
</ifm>
"""
def is_interface_support_setjumboframe(interface):
"""is interface support set jumboframe"""
if interface is None:
return False
support_flag = False
if interface.upper().startswith('GE'):
support_flag = True
elif interface.upper().startswith('10GE'):
support_flag = True
elif interface.upper().startswith('25GE'):
support_flag = True
elif interface.upper().startswith('4X10GE'):
support_flag = True
elif interface.upper().startswith('40GE'):
support_flag = True
elif interface.upper().startswith('100GE'):
support_flag = True
else:
support_flag = False
return support_flag
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def build_config_xml(xmlstr):
""" build_config_xml"""
return '<config> ' + xmlstr + ' </config>'
class Mtu(object):
"""set mtu"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface info
self.interface = self.module.params['interface']
self.mtu = self.module.params['mtu']
self.state = self.module.params['state']
self.jbf_max = self.module.params['jumbo_max'] or None
self.jbf_min = self.module.params['jumbo_min'] or None
self.jbf_config = list()
self.jbf_cli = ""
self.commands = list()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intf_info = dict() # one interface info
self.intf_type = None # loopback tunnel ...
def init_module(self):
""" init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_interface_dict(self, ifname):
""" get one interface attributes dict."""
intf_info = dict()
conf_str = CE_NC_GET_INTF % ifname
ret_xml = get_nc_config(self.module, conf_str)
if "<data/>" in ret_xml:
return intf_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*'
r'<isL2SwitchPort>(.*)</isL2SwitchPort>.*\s*'
r'<ifMtu>(.*)</ifMtu>.*', ret_xml)
if intf:
intf_info = dict(ifName=intf[0][0],
isL2SwitchPort=intf[0][1],
ifMtu=intf[0][2])
return intf_info
def prase_jumboframe_para(self, config_str):
"""prase_jumboframe_para"""
interface_cli = "interface %s" % self.interface
if config_str.find(interface_cli) == -1:
self.module.fail_json(msg='Error: Interface does not exist.')
try:
npos1 = config_str.index('jumboframe enable')
except ValueError:
# return default vale
return [9216, 1518]
try:
npos2 = config_str.index('\n', npos1)
config_str_tmp = config_str[npos1:npos2]
except ValueError:
config_str_tmp = config_str[npos1:]
return re.findall(r'([0-9]+)', config_str_tmp)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
def get_jumboframe_config(self):
""" get_jumboframe_config"""
flags = list()
exp = " all | section inc %s$" % self.interface.upper()
flags.append(exp)
output = get_config(self.module, flags)
output = output.replace('*', '')
return self.prase_jumboframe_para(output)
def set_jumboframe(self):
""" set_jumboframe"""
if self.state == "present":
if not self.jbf_max and not self.jbf_min:
return
jbf_value = self.get_jumboframe_config()
self.jbf_config = copy.deepcopy(jbf_value)
if len(jbf_value) == 1:
jbf_value.append("1518")
self.jbf_config.append("1518")
if not self.jbf_max:
return
if (len(jbf_value) > 2) or (len(jbf_value) == 0):
self.module.fail_json(
msg='Error: Get jubmoframe config value num error.')
if self.jbf_min is None:
if jbf_value[0] == self.jbf_max:
return
else:
if (jbf_value[0] == self.jbf_max) \
and (jbf_value[1] == self.jbf_min):
return
if jbf_value[0] != self.jbf_max:
jbf_value[0] = self.jbf_max
if (jbf_value[1] != self.jbf_min) and (self.jbf_min is not None):
jbf_value[1] = self.jbf_min
else:
jbf_value.pop(1)
else:
jbf_value = self.get_jumboframe_config()
self.jbf_config = copy.deepcopy(jbf_value)
if (jbf_value == [9216, 1518]):
return
jbf_value = [9216, 1518]
# excute commands
command = "interface %s" % self.interface
self.cli_add_command(command)
if len(jbf_value) == 2:
self.jbf_cli = "jumboframe enable %s %s" % (
jbf_value[0], jbf_value[1])
else:
self.jbf_cli = "jumboframe enable %s" % (jbf_value[0])
self.cli_add_command(self.jbf_cli)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
if self.state == "present":
if self.jbf_min:
self.updates_cmd.append(
"jumboframe enable %s %s" % (self.jbf_max, self.jbf_min))
else:
self.updates_cmd.append("jumboframe enable %s" % (self.jbf_max))
else:
self.updates_cmd.append("undo jumboframe enable")
return
def merge_interface(self, ifname, mtu):
""" Merge interface mtu."""
xmlstr = ''
change = False
self.updates_cmd.append("interface %s" % ifname)
if self.state == "present":
if mtu and self.intf_info["ifMtu"] != mtu:
xmlstr += CE_NC_XML_MERGE_INTF_MTU % (ifname, mtu)
self.updates_cmd.append("mtu %s" % mtu)
change = True
else:
if self.intf_info["ifMtu"] != '1500':
xmlstr += CE_NC_XML_MERGE_INTF_MTU % (ifname, '1500')
self.updates_cmd.append("undo mtu")
change = True
if not change:
return
conf_str = build_config_xml(xmlstr)
ret_xml = set_nc_config(self.module, conf_str)
self.check_response(ret_xml, "MERGE_INTF_MTU")
self.changed = True
def check_params(self):
"""Check all input params"""
# interface type check
if self.interface:
self.intf_type = get_interface_type(self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: Interface %s is error.')
# mtu check mtu
if self.mtu:
if not | |
tabs ##
tab_def_content += '''\n<div class="tab">\n'''
## Operations tabs ##
for i, (op, heatmap_html_dir) in enumerate(zip(operations, heatmap_html_dir_l)):
viewer_name = 'op%s_%s' % (i, op)
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '%s')"''' % viewer_name
tab_def_content += '''>%d. %s</button>\n''' % (i+1, op_2_name[op])
flnms = os.listdir(heatmap_html_dir)
heatmap_html_flnm = None
for flnm in flnms:
if flnm.endswith('.html'):
heatmap_html_flnm = flnm
shutil.copy2(os.path.join(heatmap_html_dir, flnm), output_directory)
tab_content += self._generate_tab_content(heatmap_html_flnm, viewer_name)
## Transformed matrix statistics tab ##
viewer_name = 'data_summary'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
if variable_specific:
tab_def_content += '''>Transformed Selected Variables Statistics</button>\n'''
else:
tab_def_content += '''>Transformed Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
if variable_specific:
tab_content += '''\n<h5>Transformed Selected Variables Size: {} x {}</h5>'''.format(
len(transformed_matrix_df.index),
len(transformed_matrix_df.columns))
else:
tab_content += '''\n<h5>Transformed Matrix Size: {} x {}</h5>'''.format(
len(transformed_matrix_df.index),
len(transformed_matrix_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_linear_plot(self, data_df, output_directory, row_name='abundance',
top_percent=100):
linear_plot_path = 'linear_plot.html'
sum_order = data_df.sum(axis=1).sort_values(ascending=False).index
data_df = data_df.reindex(sum_order)
top_index = data_df.index[:int(data_df.index.size * top_percent / 100)]
data_df = data_df.loc[top_index]
links = data_df.stack().reset_index()
col_names = links.columns
links.rename(columns={col_names[0]: row_name,
col_names[1]: 'samples',
col_names[2]: 'value'},
inplace=True)
fig = px.line(links, x=row_name, y='value', color='samples')
plot(fig, filename=os.path.join(output_directory, linear_plot_path))
return linear_plot_path
def _create_chem_abun_heatmap(self, output_directory, data_groups):
data_df = pd.concat(data_groups.values())
col_ordered_label = self._compute_cluster_label_order(data_df.T.values.tolist(),
data_df.T.index.tolist())
data_df = data_df.reindex(columns=col_ordered_label)
data_label_groups_pos = dict()
for group_name, data_group_df in data_groups.items():
if pd.isna(group_name[1]):
label_name = group_name[0]
else:
label_name = '{} ({})'.format(group_name[0], group_name[1])
data_label_groups_pos[label_name] = [
data_df.index.to_list().index(data_id) for data_id in data_group_df.index]
heatmap_file_name = 'chem_abun_heatmap_{}.html'.format(str(uuid.uuid4()))
heatmap_path = os.path.join(output_directory, heatmap_file_name)
colors = px.colors.sequential.OrRd
colorscale = [[0, colors[1]], # 0
[1./10000, colors[2]], # 10
[1./1000, colors[3]], # 100
[1./100, colors[4]], # 1000
[1./10, colors[5]], # 10000
[1., colors[6]]]
layout = go.Layout(xaxis={'type': 'category'},
yaxis={'type': 'category'})
fig = go.Figure(data=go.Heatmap(
z=data_df.values,
x=data_df.columns,
y=data_df.index,
hoverongaps=False,
coloraxis='coloraxis'), layout=layout)
width = max(15 * data_df.columns.size, 1400)
height = max(10 * data_df.index.size, 1000)
fig.update_layout(coloraxis=dict(colorscale=colorscale),
plot_bgcolor='rgba(0,0,0,0)',
autosize=True,
width=width,
height=height,
xaxis=dict(tickangle=45,
automargin=True,
tickfont=dict(color='black', size=8)),
yaxis=dict(automargin=True,
tickfont=dict(color='black', size=8)))
colors = px.colors.qualitative.Bold
chemical_types = ['aggregate', 'exometabolite', 'specific']
text_height = 0
col_size = width / data_df.columns.size
label_pos = 70 / col_size
if len(data_label_groups_pos) > 1:
for i, label_name in enumerate(data_label_groups_pos):
data_label_idx = data_label_groups_pos[label_name]
chemical_type = label_name.split(' ')[0]
if i == 0:
fig.update_layout(yaxis=dict(range=[0, data_df.index.size-1],
tickvals=data_label_idx,
automargin=True,
tickfont=dict(
color=colors[chemical_types.index(chemical_type)],
size=8)))
text_height += len(data_label_idx) - 1
fig.add_annotation(x=label_pos, y=0.5,
ax=label_pos, ay=text_height,
text=label_name,
showarrow=True,
xref="x", yref="y",
axref="x", ayref="y",
arrowside='start',
# arrowwidth=1.5,
font=dict(color=colors[chemical_types.index(chemical_type)],
size=8))
else:
fig.add_trace(dict(yaxis='y{}'.format(i + 1)))
fig.update_layout({'yaxis{}'.format(i + 1): dict(
range=[0, data_df.index.size-1],
tickvals=data_label_idx,
ticktext=[data_df.index[i] for i in data_label_idx],
tickfont=dict(color=colors[chemical_types.index(chemical_type)], size=8),
automargin=True,
overlaying='y')})
text_height += len(data_label_idx)
fig.add_annotation(x=label_pos, y=text_height - len(data_label_idx) + 1,
ax=label_pos, ay=text_height,
text=label_name,
showarrow=True,
xref="x", yref="y",
axref="x", ayref="y",
arrowside='start',
# arrowwidth=1.5,
font=dict(color=colors[chemical_types.index(chemical_type)],
size=8))
plot(fig, filename=heatmap_path)
return heatmap_file_name
def _generate_chem_visualization_content(self, output_directory, data_groups):
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
chemical_types = list(data_groups.keys())
chemical_types = ['{} ({})'.format(item[0], item[1]) for item in chemical_types]
type_text = 'Chemical Type' if len(chemical_types) == 1 else 'Chemical Types'
tab_content += '''\n<h5>{}: {}</h5>'''.format(type_text,
', '.join(chemical_types))
for chemical_type, data_df in data_groups.items():
chemical_type = '{} ({})'.format(chemical_type[0], chemical_type[1])
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
row_data_summary = data_df.T.describe().round(2).to_string()
col_data_summary = data_df.describe().round(2).to_string()
tab_content += '''\n<h5>{} Chemical Matrix Size: {} x {}</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:],
len(data_df.index),
len(data_df.columns))
tab_content += '''\n<h5>{} Row Aggregating Statistics</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:])
html = '''\n<pre class="tab">''' + \
str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<h5>{} Column Aggregating Statistics</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:])
html = '''\n<pre class="tab">''' + \
str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
heatmap_index_page = self._create_chem_abun_heatmap(output_directory, data_groups)
viewer_name = 'MatrixHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Heatmap</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_visualization_content(self, output_directory, heatmap_dir, data_df,
top_heatmap_dir, top_percent):
row_data_summary = data_df.T.describe().round(2).to_string()
col_data_summary = data_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
tab_content += '''\n<h5>Matrix Size: {} x {}</h5>'''.format(len(data_df.index),
len(data_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
if top_heatmap_dir:
viewer_name = 'TopHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Top {} Percent Heatmap</button>\n'''.format(top_percent)
heatmap_report_files = os.listdir(top_heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(top_heatmap_dir, heatmap_report_file),
output_directory)
if heatmap_index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
msg = 'Top {} percent of matrix sorted by sum of abundance values.'.format(
top_percent)
tab_content += '''<p style="color:red;" >{}</p>'''.format(msg)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
if False and len(data_df.columns) <= 200:
if top_heatmap_dir:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Top {} Percent Linear Plot</button>\n'''.format(top_percent)
linear_plot_page = self._generate_linear_plot(data_df, output_directory,
top_percent=top_percent)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
msg = 'Top {} percent of matrix sorted by sum of abundance values.'.format(
top_percent)
tab_content += '''<p style="color:red;" >{}</p>'''.format(msg)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Linear Plot</button>\n'''
linear_plot_page = self._generate_linear_plot(data_df, output_directory)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
viewer_name = 'MatrixHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Heatmap</button>\n'''
heatmap_report_files = os.listdir(heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(heatmap_dir, heatmap_report_file),
output_directory)
if heatmap_index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_mantel_test_html_report(self, pwmantel_res):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mantel_test_viewer_report.html')
visualization_content = self._generate_mantel_test_visualization_content(pwmantel_res)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mantel Test App'
})
return html_report
def _generate_simper_html_report(self, simper_ret, simper_sum, species_stats, grouping_names):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'simper_viewer_report.html')
visualization_content = self._generate_simper_visualization_content(simper_ret,
simper_sum,
species_stats,
grouping_names,
output_directory)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 66%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report | |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
from datetime import datetime
import re
import koji
from kobo.rpmlib import parse_nvr
import semver
from freshmaker import db, conf, log
from freshmaker.handlers import ContainerBuildHandler
from freshmaker.events import BotasErrataShippedEvent, ManualBundleRebuild
from freshmaker.lightblue import ContainerImage
from freshmaker.models import ArtifactBuild, ArtifactType, Event
from freshmaker.types import EventState, ArtifactBuildState, RebuildReason
from freshmaker.pyxis import Pyxis
from freshmaker.kojiservice import KojiService
from freshmaker.errata import Errata
class HandleBotasAdvisory(ContainerBuildHandler):
"""
Handles event that was created by transition of an advisory filed by
BOTAS to SHIPPED_LIVE state
"""
name = "HandleBotasAdvisory"
# This prefix should be added to event reason, when skipping the event.
# Because Release Driver checks event's reason for certain prefixes,
# to determine if there is an error in bundles processing.
_no_bundle_prefix = "No bundles to rebuild: "
def __init__(self, pyxis=None):
super().__init__()
if pyxis:
self._pyxis = pyxis
else:
if not conf.pyxis_server_url:
raise ValueError("'PYXIS_SERVER_URL' parameter should be set")
self._pyxis = Pyxis(conf.pyxis_server_url)
if not conf.freshmaker_root_url or "://" not in conf.freshmaker_root_url:
raise ValueError("'FRESHMAKER_ROOT_URL' parameter should be set to "
"a valid URL")
# Currently processed event
self.event = None
def can_handle(self, event):
if (isinstance(event, BotasErrataShippedEvent) and
'docker' in event.advisory.content_types):
return True
# This handler can handle manual bundle rebuilds too
if isinstance(event, ManualBundleRebuild):
return True
return False
def handle(self, event):
if event.dry_run:
self.force_dry_run()
self.event = event
db_event = Event.get_or_create_from_event(db.session, event)
self.set_context(db_event)
# Check if event is allowed by internal policies
if not self.event.is_allowed(self):
msg = ("This image rebuild is not allowed by internal policy. "
f"message_id: {event.msg_id}")
db_event.transition(EventState.SKIPPED, msg)
self.log_info(msg)
return []
if isinstance(event, ManualBundleRebuild) and \
hasattr(event, 'bundle_images'):
bundles_to_rebuild = self._handle_release_driver_rebuild(db_event)
# automatic rebuild and manual bundle rebuild(triggered by post request)
else:
bundles_to_rebuild = self._handle_bundle_rebuild(db_event)
if not bundles_to_rebuild:
return []
builds = self._prepare_builds(db_event, bundles_to_rebuild)
# Reset context to db_event.
self.set_context(db_event)
self.start_to_build_images(builds)
if all([b.state == ArtifactBuildState.FAILED.value for b in builds]):
db_event.transition(EventState.FAILED, "All bundle rebuilds failed")
else:
msg = f"Advisory {db_event.search_key}: Rebuilding " \
f"{len(db_event.builds.all())} bundle images."
db_event.transition(EventState.BUILDING, msg)
return []
def _handle_bundle_rebuild(self, db_event):
"""
Handle auto rebuild for an advisory created by Botas
OR manually triggered rebuild
:param db_event: database event that represent rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
# Mapping of operators' original build nvrs to rebuilt nvrs in advisory
nvrs_mapping = self._create_original_to_rebuilt_nvrs_map()
original_nvrs = nvrs_mapping.keys()
self.log_info(
"Orignial nvrs of build in the advisory #{0} are: {1}".format(
self.event.advisory.errata_id, " ".join(original_nvrs)))
# Get image manifest_list_digest for all original images, manifest_list_digest is used
# in pullspecs in bundle's related images
original_digests_by_nvr = {}
original_nvrs_by_digest = {}
for nvr in original_nvrs:
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if digest:
original_digests_by_nvr[nvr] = digest
original_nvrs_by_digest[digest] = nvr
else:
log.warning(
f"Image manifest_list_digest not found for original image {nvr} in Pyxis, "
"skip this image"
)
if not original_digests_by_nvr:
msg = f"None of the original images have digests in Pyxis: {','.join(original_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Get image manifest_list_digest for all rebuilt images, manifest_list_digest is used
# in pullspecs of bundle's related images
rebuilt_digests_by_nvr = {}
rebuilt_nvrs = nvrs_mapping.values()
for nvr in rebuilt_nvrs:
# Don't require that the manifest list digest be published in this case because
# there's a delay from after an advisory is shipped and when the published repositories
# entry is populated
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr, must_be_published=False)
if digest:
rebuilt_digests_by_nvr[nvr] = digest
else:
log.warning(
f"Image manifest_list_digest not found for rebuilt image {nvr} in Pyxis, "
"skip this image"
)
if not rebuilt_digests_by_nvr:
msg = f"None of the rebuilt images have digests in Pyxis: {','.join(rebuilt_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
index_images = self._pyxis.get_operator_indices()
# get latest bundle images per channel per index image filtered
# by the highest semantic version
all_bundles = self._pyxis.get_latest_bundles(index_images)
self.log_debug(
"There are %d bundles that are latest in a channel in the found index images",
len(all_bundles),
)
# A mapping of digests to bundle metadata. This metadata is used to
# for the CSV metadata updates.
bundle_mds_by_digest = {}
# get bundle digests for original images
bundle_digests_by_related_nvr = {}
for image_nvr, image_digest in original_digests_by_nvr.items():
bundles = self._pyxis.get_bundles_by_related_image_digest(
image_digest, all_bundles
)
if not bundles:
log.info(f"No latest bundle image with the related image of {image_nvr}")
continue
for bundle in bundles:
bundle_digest = bundle['bundle_path_digest']
bundle_mds_by_digest[bundle_digest] = bundle
bundle_digests_by_related_nvr.setdefault(image_nvr, []).append(bundle_digest)
if not bundle_digests_by_related_nvr:
msg = "None of the original images have related bundles, skip."
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
self.log_info(
"Found %d bundles with relevant related images", len(bundle_digests_by_related_nvr)
)
# Mapping of bundle digest to bundle data
# {
# digest: {
# "images": [image_amd64, image_aarch64],
# "nvr": NVR,
# "auto_rebuild": True/False,
# "osbs_pinning": True/False,
# "pullspecs": [...],
# }
# }
bundles_by_digest = {}
default_bundle_data = {
'images': [],
'nvr': None,
'auto_rebuild': False,
'osbs_pinning': False,
# CSV modifications for the rebuilt bundle image
'pullspec_replacements': [],
'update': {},
}
# Get images for each bundle digest, a bundle digest can have multiple images
# with different arches.
for digest in bundle_mds_by_digest:
bundles = self._pyxis.get_images_by_digest(digest)
# If no bundle image found, just skip this bundle digest
if not bundles:
self.log_warn('The bundle digest %r was not found in Pyxis. Skipping.', digest)
continue
bundle_nvr = bundles[0]['brew']['build']
# If specific container images where requested to rebuild, process only them
if (isinstance(self.event, ManualBundleRebuild)
and self.event.container_images # noqa: W503
and bundle_nvr not in self.event.container_images): # noqa: W503
self.log_debug("Ignoring '%s', because it's not in requested rebuilds"
" (container_images in request)", bundle_nvr)
continue
# Filter out builds from dependent event that were rebuilt recently
done_build = db_event.get_artifact_build_from_event_dependencies(
bundle_nvr)
if done_build:
self.log_debug("Ignoring '%s' bundle, because it was already rebuilt"
" in dependent event", bundle_nvr)
continue
bundles_by_digest.setdefault(digest, copy.deepcopy(default_bundle_data))
bundles_by_digest[digest]['nvr'] = bundle_nvr
bundles_by_digest[digest]['images'] = bundles
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
# For each bundle, check whether it should be rebuilt by comparing the
# auto_rebuild_tags of repository and bundle's tags
for digest, bundle_data in bundles_by_digest.items():
bundle_nvr = bundle_data['nvr']
# Images are for different arches, just check against the first image
image = bundle_data['images'][0]
if self.image_has_auto_rebuild_tag(image):
bundle_data['auto_rebuild'] = True
# Fetch buildinfo
buildinfo = koji_api.get_build(bundle_nvr)
related_images = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
)
bundle_data['osbs_pinning'] = related_images.get('created_by_osbs', False)
# Save the original pullspecs
bundle_data['pullspec_replacements'] = related_images.get('pullspecs', [])
# Digests of bundles to be rebuilt
to_rebuild_digests = set()
# Now for each bundle, replace the original digest with rebuilt
# digest (override pullspecs)
for digest, bundle_data in bundles_by_digest.items():
# Override pullspecs only when auto_rebuild is enabled and OSBS-pinning
# mechanism is used.
if not (bundle_data['auto_rebuild'] and bundle_data['osbs_pinning']):
self.log_info(
'The bundle %r does not have auto-rebuild tags (%r) and/or OSBS pinning (%r)',
bundle_data['nvr'],
bundle_data['auto_rebuild'],
bundle_data['osbs_pinning'],
)
continue
csv_name = bundle_mds_by_digest[digest]['csv_name']
version = bundle_mds_by_digest[digest]['version_original']
bundle_data.update(self._get_csv_updates(csv_name, version))
for pullspec in bundle_data['pullspec_replacements']:
# A pullspec item example:
# {
# 'new': 'registry.exampe.io/repo/example-operator@sha256:<sha256-value>',
# 'original': 'registry.example.io/repo/example-operator:v2.2.0',
# 'pinned': True,
# # value used for internal purpose during manual rebuilds, it's an old pullspec that was replaced
# '_old': 'registry.exampe.io/repo/example-operator@sha256:<previous-sha256-value>,
# }
# A pullspec path is in format of "registry/repository@digest"
pullspec_elems = pullspec.get('new').split('@')
old_digest = pullspec_elems[1]
| |
not str:
raise BadRequest("Attachment content must be str")
attachment.attachment_size = len(attachment.content)
attachment_content = attachment.content
elif attachment.attachment_type == AttachmentType.ASCII:
if type(attachment.content) is not str:
raise BadRequest("Attachment content must be str")
attachment.attachment_size = len(attachment.content)
attachment_content = attachment.content
elif attachment.attachment_type == AttachmentType.OBJECT:
raise BadRequest("AttachmentType.OBJECT is not supported currently")
elif attachment.attachment_type == AttachmentType.REFERENCE:
if not isinstance(attachment.content, basestring):
raise BadRequest("Attachment content must be binary string")
attachment.attachment_size = len(attachment.content)
attachment_content = attachment.content
else:
raise BadRequest("Unknown attachment-type: %s" % attachment.attachment_type)
attachment.content = ''
content = dict(data=attachment_content, content_type=attachment.content_type)
att_id, _ = self.create(attachment, attachments={self.DEFAULT_ATTACHMENT_NAME: content}, actor_id=actor_id)
if resource_id:
self.create_association(resource_id, PRED.hasAttachment, att_id)
return att_id
def read_attachment(self, attachment_id='', include_content=False):
"""
Returns the metadata of an attachment. Unless indicated otherwise the content returned
is only a name to the actual attachment content.
"""
attachment = self.read(attachment_id)
if not isinstance(attachment, Attachment):
raise Inconsistent("Object in datastore must be Attachment, not %s" % type(attachment))
if include_content:
attachment.content = self.rr_store.read_attachment(attachment_id,
attachment_name=self.DEFAULT_ATTACHMENT_NAME)
if attachment.attachment_type == AttachmentType.BLOB:
if type(attachment.content) is not str:
raise BadRequest("Attachment content must be str")
return attachment
def delete_attachment(self, attachment_id=''):
try:
self.rr_store.delete_attachment(attachment_id, attachment_name=self.DEFAULT_ATTACHMENT_NAME)
finally:
return self.delete(attachment_id, del_associations=True)
def find_attachments(self, resource_id='', keyword=None,
limit=0, descending=False, include_content=False, id_only=True):
key = [resource_id]
att_res = self.rr_store.find_by_view("attachment", "by_resource", start_key=key,
end_key=list(key), descending=descending, limit=limit,
id_only=True)
att_ids = [att[0] for att in att_res if not keyword or keyword in att[1][2]]
if id_only:
return att_ids
else:
atts = self.rr_store.read_mult(att_ids)
if include_content:
for att in atts:
att.content = self.rr_store.read_attachment(doc=att._id, attachment_name=self.DEFAULT_ATTACHMENT_NAME)
return atts
# -------------------------------------------------------------------------
# Association operations
def create_association(self, subject=None, predicate=None, object=None, assoc_type=None):
"""
Create an association between two IonObjects with a given predicate
@param assoc_type DEPRECATED
"""
if not (subject and predicate and object):
raise BadRequest("Association must have all elements set")
if type(subject) is str:
subject_id = subject
subject = self.read(subject_id)
subject_type = subject.type_
else:
if "_id" not in subject:
raise BadRequest("Subject id not available")
subject_id = subject._id
subject_type = subject.type_
if type(object) is str:
object_id = object
object = self.read(object_id)
object_type = object.type_
else:
if "_id" not in object:
raise BadRequest("Object id not available")
object_id = object._id
object_type = object.type_
# Check that subject and object type are permitted by association definition
try:
pt = Predicates.get(predicate)
except AttributeError:
raise BadRequest("Predicate unknown %s" % predicate)
if not subject_type in pt['domain']:
found_st = False
for domt in pt['domain']:
if subject_type in getextends(domt):
found_st = True
break
if not found_st:
raise BadRequest("Illegal subject type %s for predicate %s" % (subject_type, predicate))
if not object_type in pt['range']:
found_ot = False
for rant in pt['range']:
if object_type in getextends(rant):
found_ot = True
break
if not found_ot:
raise BadRequest("Illegal object type %s for predicate %s" % (object_type, predicate))
# Finally, ensure this isn't a duplicate
assoc_list = self.find_associations(subject_id, predicate, object_id, id_only=False)
if len(assoc_list) != 0:
assoc = assoc_list[0]
#print "**** Found associations:"
#import pprint
#pprint.pprint(assoc_list)
raise BadRequest("Association between %s and %s with predicate %s already exists" % (subject_id, object_id, predicate))
assoc = IonObject("Association",
s=subject_id, st=subject_type,
p=predicate,
o=object_id, ot=object_type,
ts=get_ion_ts())
return self.rr_store.create(assoc, create_unique_association_id())
def create_association_mult(self, assoc_list=None):
"""
Create multiple associations between two IonObjects with a given predicate.
@param assoc_list A list of 3-tuples of (subject, predicate, object). Subject/object can be str or object
"""
if not assoc_list:
return []
lookup_rid = set()
for s, p, o in assoc_list:
if type(s) is str:
lookup_rid.add(s)
if type(o) is str:
lookup_rid.add(o)
lookup_rid = list(lookup_rid)
lookup_obj = self.read_mult(lookup_rid) if lookup_rid else []
res_by_id = dict(zip(lookup_rid, lookup_obj))
create_ts = get_ion_ts()
new_assoc_list = []
for s, p, o in assoc_list:
new_s = s
new_o = o
if type(s) is str:
new_s = res_by_id[s]
if not new_s:
raise NotFound("Subject %s not found" % s)
else:
if "_id" not in s:
raise BadRequest("Subject id not available")
if type(o) is str:
new_o = res_by_id[o]
if not new_o:
raise NotFound("Object %s not found" % o)
else:
if "_id" not in object:
raise BadRequest("Object id not available")
# Check that subject and object type are permitted by association definition
if p not in Predicates:
raise BadRequest("Predicate unknown %s" % p)
pt = Predicates.get(p)
if not new_s.type_ in pt['domain']:
found_st = False
for domt in pt['domain']:
if new_s.type_ in getextends(domt):
found_st = True
break
if not found_st:
raise BadRequest("Illegal subject type %s for predicate %s" % (new_s.type_, p))
if not new_o.type_ in pt['range']:
found_ot = False
for rant in pt['range']:
if new_o.type_ in getextends(rant):
found_ot = True
break
if not found_ot:
raise BadRequest("Illegal object type %s for predicate %s" % (new_o.type_, p))
# Skip duplicate check
assoc = IonObject("Association",
s=new_s._id, st=new_s.type_,
p=p,
o=new_o._id, ot=new_o.type_,
ts=create_ts)
new_assoc_list.append(assoc)
new_assoc_ids = [create_unique_association_id() for i in xrange(len(new_assoc_list))]
return self.rr_store.create_mult(new_assoc_list, new_assoc_ids)
def delete_association(self, association=''):
"""
Delete an association between two IonObjects
@param association Association object, association id or 3-list of [subject, predicate, object]
"""
if type(association) in (list, tuple) and len(association) == 3:
subject, predicate, obj = association
assoc_id_list = self.find_associations(subject=subject, predicate=predicate, object=obj, id_only=True)
success = True
for aid in assoc_id_list:
success = success and self.rr_store.delete(aid, object_type="Association")
return success
else:
return self.rr_store.delete(association, object_type="Association")
def _is_in_association(self, obj_id):
if not obj_id:
raise BadRequest("Must provide object id")
assoc_ids = self.find_associations(anyside=obj_id, id_only=True, limit=1)
if assoc_ids:
log.debug("_is_in_association(%s): Object has associations: %s", obj_id, assoc_ids)
return True
return False
def read_association(self, association_id=None):
if not association_id:
raise BadRequest("Missing association_id parameter")
return self.rr_store.read(association_id, object_type="Association")
# -------------------------------------------------------------------------
# Resource find operations
def read_object(self, subject="", predicate="", object_type="", assoc="", id_only=False):
if assoc:
if type(assoc) is str:
assoc = self.read_association(assoc)
return assoc.o if id_only else self.read(assoc.o)
else:
obj_list, assoc_list = self.find_objects(subject=subject, predicate=predicate, object_type=object_type, id_only=True)
if not obj_list:
raise NotFound("No object found for subject=%s, predicate=%s, object_type=%s" % (subject, predicate, object_type))
elif len(obj_list) > 1:
raise Inconsistent("More than one object found for subject=%s, predicate=%s, object_type=%s: count=%s" % (
subject, predicate, object_type, len(obj_list)))
return obj_list[0] if id_only else self.read(obj_list[0])
def read_subject(self, subject_type="", predicate="", object="", assoc="", id_only=False):
if assoc:
if type(assoc) is str:
assoc = self.read_association(assoc)
return assoc.s if id_only else self.read(assoc.s)
else:
sub_list, assoc_list = self.find_subjects(subject_type=subject_type, predicate=predicate, object=object, id_only=True)
if not sub_list:
raise NotFound("No subject found for subject_type=%s, predicate=%s, object=%s" % (subject_type, predicate, object))
elif len(sub_list) > 1:
raise Inconsistent("More than one subject found for subject_type=%s, predicate=%s, object=%s: count=%s" % (
subject_type, predicate, object, len(sub_list)))
return sub_list[0] if id_only else self.read(sub_list[0])
def find_objects(self, subject="", predicate="", object_type="", id_only=False,
limit=None, skip=None, descending=None, access_args=None):
return self.rr_store.find_objects(subject, predicate, object_type, id_only=id_only,
limit=limit, skip=skip, descending=descending, access_args=access_args)
def find_subjects(self, subject_type="", predicate="", object="", id_only=False,
limit=None, skip=None, descending=None, access_args=None):
return self.rr_store.find_subjects(subject_type, predicate, object, id_only=id_only,
limit=limit, skip=skip, descending=descending, access_args=access_args)
def find_associations(self, subject="", predicate="", object="", assoc_type=None, id_only=False, anyside=None, query=None,
limit=None, skip=None, descending=None, access_args=None):
return self.rr_store.find_associations(subject, predicate, object, assoc_type, id_only=id_only, anyside=anyside,
query=query, limit=limit, skip=skip, descending=descending, access_args=access_args)
def find_objects_mult(self, subjects=[], id_only=False, predicate="", access_args=None):
return self.rr_store.find_objects_mult(subjects=subjects, id_only=id_only, predicate=predicate, access_args=access_args)
def find_subjects_mult(self, objects=[], id_only=False, predicate="", access_args=None):
return self.rr_store.find_subjects_mult(objects=objects, id_only=id_only, predicate=predicate, access_args=access_args)
def get_association(self, subject="", predicate="", object="", assoc_type=None, id_only=False):
assoc = self.rr_store.find_associations(subject, predicate, object, id_only=id_only)
if not assoc:
raise NotFound("Association for subject/predicate/object/type %s/%s/%s not found" % (
subject, predicate, object))
elif len(assoc) > 1:
raise Inconsistent("Duplicate associations found for subject/predicate/object/type %s/%s/%s" % (
subject, predicate, object))
return assoc[0]
def find_resources(self, restype="", lcstate="", name="", id_only=False, access_args=None):
return self.rr_store.find_resources(restype, lcstate, name, id_only=id_only, access_args=access_args)
def find_resources_ext(self, restype="", lcstate="", name="",
keyword=None, nested_type=None,
attr_name=None, attr_value=None, alt_id="", alt_id_ns="",
limit=None, skip=None, descending=None, id_only=False,
query=None,
access_args=None):
return self.rr_store.find_resources_ext(restype=restype, lcstate=lcstate, name=name,
keyword=keyword, nested_type=nested_type,
attr_name=attr_name, attr_value=attr_value, alt_id=alt_id, alt_id_ns=alt_id_ns,
limit=limit, skip=skip, descending=descending,
id_only=id_only, query=query, access_args=access_args)
def get_superuser_actors(self, reset=False):
"""Returns a memoized list of system superusers, including the system actor and all actors with
ION_MANAGER role assignment"""
if reset or self.superuser_actors is None:
found_actors = []
system_actor_name = CFG.get_safe("system.system_actor", "ionsystem")
sysactors,_ = self.find_resources(restype=RT.ActorIdentity, name=system_actor_name, id_only=True)
found_actors.extend(sysactors)
ion_mgrs,_ = self.find_resources_ext(restype=RT.UserRole, attr_name="governance_name", attr_value="ION_MANAGER", id_only=False)
# roles,_ = self.find_resources(restype=RT.UserRole, id_only=False)
# ion_mgrs = [role for role in roles if role.governance_name == "ION_MANAGER"]
actors, assocs = self.find_subjects_mult(ion_mgrs, id_only=False)
super_actors = list({actor._id for actor, assoc in zip(actors, assocs) if assoc.p == PRED.hasRole and assoc.st == RT.ActorIdentity})
found_actors.extend(super_actors)
self.superuser_actors = found_actors
log.info("get_superuser_actors(): system actor=%s, superuser actors=%s" % (sysactors, super_actors))
return self.superuser_actors
# -------------------------------------------------------------------------
# Extended resource framework operations
def get_resource_extension(self, resource_id='', resource_extension='', computed_resource_type=None, ext_associations=None, ext_exclude=None, **kwargs ):
"""Returns | |
import sys
import math
import scipy
import pylab
import scipy.io.wavfile as wav
import wave
from scipy import signal
from itertools import product
import numpy
def readWav():
"""
Reads a sound wave from a standard input and finds its parameters.
"""
# Read the sound wave from the input.
sound_wave = wave.open(sys.argv[1], "r")
# Get parameters of the sound wave.
nframes = sound_wave.getnframes()
framerate = sound_wave.getframerate()
params = sound_wave.getparams()
duration = nframes / float(framerate)
print "frame rate: %d " % (framerate,)
print "nframes: %d" % (nframes,)
print "duration: %f seconds" % (duration,)
print scipy.array(sound_wave)
return (sound_wave, nframes, framerate, duration, params)
def getDuration(sound_file):
"""
Returns the duration of a given sound file.
"""
wr = wave.open(sound_file,'r')
nchannels, sampwidth, framerate, nframes, comptype, compname = wr.getparams()
return nframes / float(framerate)
def getFrameRate(sound_file):
wr = wave.open(sound_file, 'r')
nchannels, sampwidth, framerate, nframes, comptype, compname = wr.getparams()
return framerate
def plotSoundWave(rate, sample):
"""
Plots a given sound wave.
"""
t = scipy.linspace(0, 2, 2 * rate, endpoint=False)
pylab.figure('Sound wave')
T = int(0.0001 * rate)
pylab.plot(t[:T], sample[:T],)
pylab.show()
def plotPartials(binFrequencies, maxFreq, magnitudes):
"""
Calculates and plots the power spectrum of a given sound wave.
"""
T = int(maxFreq)
pylab.figure('Power spectrum')
pylab.plot(binFrequencies[:T], magnitudes[:T],)
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('Power spectrum (|X[k]|^2)')
pylab.show()
def plotPowerSpectrum(FFT, binFrequencies, maxFreq):
"""
Calculates and plots the power spectrum of a given sound wave.
"""
T = int(maxFreq)
pylab.figure('Power spectrum')
pylab.plot(binFrequencies[:T], scipy.absolute(FFT[:T]) * scipy.absolute(FFT[:T]),)
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('Power spectrum (|X[k]|^2)')
pylab.show()
def get_frequencies_axis(framerate, fft_length):
binResolution = float(framerate) / float(fft_length)
binFreqs = []
for k in range(fft_length):
binFreq = k * binResolution
binFreqs.append(binFreq)
return binFreqs
def is_Prime(n):
"""
Check if a number is prime.
"""
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return False
# 2 is the only even prime number
if n == 2:
return True
# all other even numbers are not primes
if not n & 1:
return False
# range starts with 3 and only needs to go up the squareroot of n
# for all odd numbers
for x in range(3, int(n ** 0.5) + 1, 2):
if n % x == 0:
return False
return True
def get_next_power_2(n):
"""
Returns the closest number that is smaller than n that is a power of 2.
"""
power = 1
while (power < n):
power *= 2
if power > 1:
return power / 2
else:
return 1
class MIDI_Detector_Least_Squares_2(object):
"""
Class for MIDI notes detection given a .wav file.
"""
# Constants:
#THRESHOLD = 0.005e+13 # Empirically found magnitude threshold.
# Only frequencies whose magnitude is higher than
# this threshold are going to be considered.
HAN_WINDOW = 0.093 # 93 ms Hanning window size by Pertusa.
HOP_SIZE = 0.00928 # 9.28 ms hop size by Pertusa.
AkPnBcht_noise = 4.5883573833e+15
AkPnBcht_std = 3.60397998387e+15
AkPnBcht_noise_2 = 3.42198774903e+15
AkPnBcht_std_2 = 2.65280448383e+15
AkPnBcht_std_10 = 1.03972925552e+15 # From 10 samples
AkPnBcht_std__10 = 1.69916357929e+14 # From 10 samples
AkPnBsdf_noise = 2.91911620721e+15
AkPnBsdf_std = 1.69388970617e+15
AkPnBsdf_noise_2 = 1.41729560313e+13 # From 10 samples
AkPnBsdf_std_2 = 6.54120706376e+14 # From 10 samples
AkPnCGdD_noise = 2.74224876671e+15
AkPnCGdD_std = 3.28471607186e+15
AkPnCGdD_noise_2 = 9.07842836729e+14 # From 10 samples
AkPnCGdD_std_2 = 2.10397850449e+14 # From 10 samples
AkPnStgb_noise_10 = 1.20237557768e+15 # From 10 samples
AkPnStgb_std_10 = 2.3459546709e+14 # From 10 samples
ENSTDkAm_noise_10 = 1.2158350262e+16 # From 10 samples
ENSTDkAm_std_10 = 8.43175297998e+15 # From 10 samples
ENSTDkCl_noise_10 = 7.01687672799e+15 # From 10 samples
ENSTDkCl_std_10 = 3.72640836838e+15 # From 10 samples
SptkBGAm_noise_10 = 6.6553382974e+15
SptkBGAm_std_10 = 4.38008203527e+15
SptkBGCl_noise_10 = 2.99160007806e+15
SptkBGCl_std_10 = 1.38230425716e+15
StbgTGd2_noise_10 = 1.22960679454e+15
StbgTGd2_std_10 = 8.05049808568e+14
def __init__(self, wav_file):
self.wav_file = wav_file
self.THRESHOLD = 0.005e+13 # before: 0.005e+13 twinkle: 0.002e+14 scale: 0.005e+16
self.HAN_WINDOW = 0.093
self.HOP_SIZE = 0.00928
self.minFreqConsidered = 20
self.maxFreqConsidered = 5000
self.low_f0s = [27.5, 29.135, 30.868, 32.703, 34.648, 37.708, 38.891,
41.203, 43.654, 46.249, 48.999, 51.913, 55.0, 58.27,
61.735, 65.406, 69.296, 73.416, 77.782, 82.407]
#self.noise_threshold = 1.22960679454e+15
#self.noise_std = 8.05049808568e+14
self.avg_noise_power = 9.47594450397e+16
self.std_noise_power = 856838153209.0
def detect_MIDI_notes(self):
"""
The algorithm for calculating midi notes from a given wav file.
"""
(framerate, sample) = wav.read(self.wav_file)
# We need to change the 2 channels into one because STFT works only
# for 1 channel. We could also do STFT for each channel separately.
#monoChannel = sample.mean(axis=1)
duration = getDuration(self.wav_file)
midi_notes = []
# Consider only files with a duration longer than 0.2 seconds.
if duration > 0.18:
#print 'Calculating FFT...'
(FFT, filteredFreqs, maxFreq, magnitudes, significant_freq) = self.calculateFFT(duration, framerate, sample)
#plotPartials(filteredFreqs, maxFreq, magnitudes)
#print FFT
#print filteredFreqs
#print filteredFreqs
#plotPowerSpectrum(FFT, get_frequencies_axis(framerate, int(duration * framerate)), maxFreq)
#plotPowerSpectrum(FFT, filteredFreqs, 1000)
#print filteredFreqs
#print 'Clustering frequencies...'
clusters = self.clusterFrequencies(filteredFreqs)
#print clusters
#print 'Getting clusters means...'
averagedClusters = self.getClustersMeans(clusters)
#print averagedClusters
#print averagedClusters
#print 'Getting F0 candidates...'
#print averagedClusters
f0_candidates = self.getF0Candidates(averagedClusters)
#print f0_candidates
#print f0_candidates
#print 'Matching with MIDI notes...'
midi_notes = self.matchWithMIDINotes(f0_candidates)
# Include a note with a significant magnitude:
# if its magnitude is higher than the sum of magnitudes of all other spectral peaks
# include it in the list of detected notes and remove the note that's octave lower than this one
# if it was also detected.
if significant_freq > 0:
significant_midi_notes = self.matchWithMIDINotes([significant_freq])
significant_midi_note = significant_midi_notes[0]
if significant_midi_note not in midi_notes:
midi_notes.append(significant_midi_note)
midi_notes = self.remove_lower_octave(significant_midi_note, midi_notes)
#print 'Successfully detected MIDI notes.'
'''
for i in range(len(midi_notes)):
midi_notes[i] = midi_notes[i]+1
'''
return midi_notes
def remove_lower_octave(self, upper_octave, midi_notes):
lower_octave = upper_octave - 12
if lower_octave in midi_notes:
midi_notes.remove(lower_octave)
return midi_notes
def Pertusa_Inesta_Algorithm(self):
(framerate, sample) = wav.read(self.wav_file)
monoChannel = sample.mean(axis=1)
stft = self.STFT(monoChannel, framerate, self.HAN_WINDOW, self.HOP_SIZE)
(stft_bin_freqs, stft_magnitudes) = self.get_stft_bin_freqs(stft, framerate)
print stft_bin_freqs
#print self.get_candidates_with_partials(stft_bin_freqs[0], stft_magnitudes[0])
'''
midi_notes = []
for freqs in stftBinFrequencies:
midi_notes.append(self.matchWithMIDINotes(sorted(freqs.keys())))
print midi_notes[0]
return midi_notes
'''
''' Given STFT returns a list of dictionaries of bin frequencies and corresponding amplitudes.
Each list is a list of bin frequencies for a corresponding FFT in STFT.
The frequencies are filtered based on their magnitude. '''
# TODO: Consider zero padding to get more precise estimation for lower frequencies.
def get_stft_bin_freqs(self, stft, framerate):
fft_length = self.HAN_WINDOW * framerate
binResolution = float(framerate) / float(fft_length)
stft_binFrequencies = []
stft_magnitudes = []
for i in range(len(stft)):
binFreqs = []
magnitudes = []
for k in range(len(stft[i])):
binFreq = k * binResolution
if binFreq > self.minFreqConsidered and binFreq < self.maxFreqConsidered:
power_spectrum = scipy.absolute(stft[i][k]) * scipy.absolute(stft[i][k])
if power_spectrum > self.THRESHOLD:
binFreqs.append(binFreq)
magnitudes.append(power_spectrum)
stft_binFrequencies.append(binFreqs)
stft_magnitudes.append(magnitudes)
return (stft_binFrequencies, stft_magnitudes)
def get_candidates_with_partials(self, frequencies, magnitudes):
print frequencies
partial_margin = 11.0 # Hz
candidates_freq = [] # A list of frequencies of each candidate.
candidates_magnitude = [] # A list of magnitudes of frequencies of each candidate.
for i in range(len(frequencies)):
(partials, partial_magnitudes) = self.find_partials(frequencies[i:], frequencies[i], magnitudes[i:])
candidates_freq.append(partials)
candidates_magnitude.append(partial_magnitudes)
return (candidates_freq, candidates_magnitude)
def calculateFFT(self, duration, framerate, sample):
"""
Calculates FFT for a given sound wave.
Considers only frequencies with the magnitudes higher than
a given threshold.
"""
fft_length = int(duration * framerate) # 216090
#fft_length = int(2**17)
'''
# numpy.fft is very slow if fft length is a prime number.
# TODO: Fix this problem.
while is_Prime(fft_length):
print 'PRIME!!!'
fft_length -= 1
FFT = numpy.fft.fft(sample, n=fft_length)
'''
fft_length = get_next_power_2(fft_length)
FFT = numpy.fft.fft(sample, n=fft_length)
''' ADJUSTING THRESHOLD '''
threshold = 0
power_spectra = []
frequency_bin_with_max_spectrum = 0
for i in range(len(FFT) / 2):
power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
if power_spectrum > threshold:
threshold = power_spectrum
frequency_bin_with_max_spectrum = i
power_spectra.append(power_spectrum)
max_power_spectrum = threshold
threshold = float(self.avg_noise_power) - 3 * float(self.std_noise_power)
binFrequencies = []
magnitudes = []
binResolution = float(framerate) / float(fft_length)
sum_of_significant_spectra = 0
# For each bin calculate the corresponding frequency.
for k in range(len(FFT)):
binFreq = k * binResolution
# Truncating the FFT so we consider only hearable frequencies.
if binFreq > self.maxFreqConsidered:
FFT = FFT[:k]
break
elif binFreq > self.minFreqConsidered:
# Consider only the frequencies with magnitudes higher than the threshold.
power_spectrum = power_spectra[k]
if power_spectrum > threshold:
magnitudes.append(power_spectrum)
binFrequencies.append(binFreq)
# Sum all significant power spectra | |
<reponame>paradiseng/jasmin
from datetime import datetime, timedelta
import re
import json
import pickle
from twisted.internet import reactor, defer
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from smpp.pdu.constants import priority_flag_value_map
from smpp.pdu.smpp_time import parse
from smpp.pdu.pdu_types import RegisteredDeliveryReceipt, RegisteredDelivery
from jasmin.routing.Routables import RoutableSubmitSm
from jasmin.protocols.smpp.configs import SMPPClientConfig
from jasmin.protocols.smpp.operations import SMPPOperationFactory
from jasmin.protocols.http.errors import UrlArgsValidationError
from jasmin.protocols.http.validation import UrlArgsValidator, HttpAPICredentialValidator
from jasmin.protocols.http.errors import (HttpApiError, AuthenticationError, ServerError, RouteNotFoundError, ConnectorNotFoundError,
ChargingError, ThroughputExceededError, InterceptorNotSetError,
InterceptorNotConnectedError, InterceptorRunError)
from jasmin.protocols.http.endpoints import hex2bin, authenticate_user
def update_submit_sm_pdu(routable, config, config_update_params=None):
"""Will set pdu parameters from smppclient configuration.
Parameters that were locked through the routable.lockPduParam() method will not be updated.
config parameter can be the connector config object or just a simple dict"""
if config_update_params is None:
# Set default config params to get from the config object
config_update_params = [
'protocol_id',
'replace_if_present_flag',
'dest_addr_ton',
'source_addr_npi',
'dest_addr_npi',
'service_type',
'source_addr_ton',
'sm_default_msg_id',
]
for param in config_update_params:
_pdu = routable.pdu
# Force setting param in main pdu
if not routable.pduParamIsLocked(param):
if isinstance(config, SMPPClientConfig) and hasattr(config, param):
_pdu.params[param] = getattr(config, param)
elif isinstance(config, dict) and param in config:
_pdu.params[param] = config[param]
# Force setting param in sub-pdus (multipart use case)
while hasattr(_pdu, 'nextPdu'):
_pdu = _pdu.nextPdu
if not routable.pduParamIsLocked(param):
if isinstance(config, SMPPClientConfig) and hasattr(config, param):
_pdu.params[param] = getattr(config, param)
elif isinstance(config, dict) and param in config:
_pdu.params[param] = config[param]
return routable
class Send(Resource):
isleaf = True
def __init__(self, HTTPApiConfig, RouterPB, SMPPClientManagerPB, stats, log, interceptorpb_client):
Resource.__init__(self)
self.SMPPClientManagerPB = SMPPClientManagerPB
self.RouterPB = RouterPB
self.stats = stats
self.log = log
self.interceptorpb_client = interceptorpb_client
self.config = HTTPApiConfig
# opFactory is initiated with a dummy SMPPClientConfig used for building SubmitSm only
self.opFactory = SMPPOperationFactory(long_content_max_parts=HTTPApiConfig.long_content_max_parts,
long_content_split=HTTPApiConfig.long_content_split)
@defer.inlineCallbacks
def route_routable(self, updated_request):
try:
# Do we have a hex-content ?
if b'hex-content' not in updated_request.args:
# Convert utf8 to GSM 03.38
if updated_request.args[b'coding'][0] == b'0':
if isinstance(updated_request.args[b'content'][0], bytes):
short_message = updated_request.args[b'content'][0].decode().encode('gsm0338', 'replace')
else:
short_message = updated_request.args[b'content'][0].encode('gsm0338', 'replace')
updated_request.args[b'content'][0] = short_message
else:
# Otherwise forward it as is
short_message = updated_request.args[b'content'][0]
else:
# Otherwise convert hex to bin
short_message = hex2bin(updated_request.args[b'hex-content'][0])
# Authentication
user = authenticate_user(
updated_request.args[b'username'][0],
updated_request.args[b'password'][0],
self.RouterPB,
self.stats,
self.log
)
# Update CnxStatus
user.getCnxStatus().httpapi['connects_count'] += 1
user.getCnxStatus().httpapi['submit_sm_request_count'] += 1
user.getCnxStatus().httpapi['last_activity_at'] = datetime.now()
# Build SubmitSmPDU
SubmitSmPDU = self.opFactory.SubmitSM(
source_addr=None if b'from' not in updated_request.args else updated_request.args[b'from'][0],
destination_addr=updated_request.args[b'to'][0],
short_message=short_message,
data_coding=int(updated_request.args[b'coding'][0]),
custom_tlvs=updated_request.args[b'custom_tlvs'][0])
self.log.debug("Built base SubmitSmPDU: %s", SubmitSmPDU)
# Make Credential validation
v = HttpAPICredentialValidator('Send', user, updated_request, submit_sm=SubmitSmPDU)
v.validate()
# Update SubmitSmPDU by default values from user MtMessagingCredential
SubmitSmPDU = v.updatePDUWithUserDefaults(SubmitSmPDU)
# Prepare for interception then routing
routedConnector = None # init
routable = RoutableSubmitSm(SubmitSmPDU, user)
self.log.debug("Built Routable %s for SubmitSmPDU: %s", routable, SubmitSmPDU)
# Should we tag the routable ?
tags = []
if b'tags' in updated_request.args:
tags = updated_request.args[b'tags'][0].split(b',')
for tag in tags:
if isinstance(tag, bytes):
routable.addTag(tag.decode())
else:
routable.addTag(tag)
self.log.debug('Tagged routable %s: +%s', routable, tag)
# Intercept
interceptor = self.RouterPB.getMTInterceptionTable().getInterceptorFor(routable)
if interceptor is not None:
self.log.debug("RouterPB selected %s interceptor for this SubmitSmPDU", interceptor)
if self.interceptorpb_client is None:
self.stats.inc('interceptor_error_count')
self.log.error("InterceptorPB not set !")
raise InterceptorNotSetError('InterceptorPB not set !')
if not self.interceptorpb_client.isConnected:
self.stats.inc('interceptor_error_count')
self.log.error("InterceptorPB not connected !")
raise InterceptorNotConnectedError('InterceptorPB not connected !')
script = interceptor.getScript()
self.log.debug("Interceptor script loaded: %s", script)
# Run !
r = yield self.interceptorpb_client.run_script(script, routable)
if isinstance(r, dict) and r['http_status'] != 200:
self.stats.inc('interceptor_error_count')
self.log.error('Interceptor script returned %s http_status error.', r['http_status'])
raise InterceptorRunError(
code=r['http_status'],
message='Interception specific error code %s' % r['http_status']
)
elif isinstance(r, (str, bytes)):
self.stats.inc('interceptor_count')
routable = pickle.loads(r)
else:
self.stats.inc('interceptor_error_count')
self.log.error('Failed running interception script, got the following return: %s', r)
raise InterceptorRunError(message='Failed running interception script, check log for details')
# Get the route
route = self.RouterPB.getMTRoutingTable().getRouteFor(routable)
if route is None:
self.stats.inc('route_error_count')
self.log.error("No route matched from user %s for SubmitSmPDU: %s", user, routable.pdu)
raise RouteNotFoundError("No route found")
# Get connector from selected route
self.log.debug("RouterPB selected %s route for this SubmitSmPDU", route)
routedConnector = route.getConnector()
# Is it a failover route ? then check for a bound connector, otherwise don't route
# The failover route requires at least one connector to be up, no message enqueuing will
# occur otherwise.
if repr(route) == 'FailoverMTRoute':
self.log.debug('Selected route is a failover, will ensure connector is bound:')
while True:
c = self.SMPPClientManagerPB.perspective_connector_details(routedConnector.cid)
if c:
self.log.debug('Connector [%s] is: %s', routedConnector.cid, c['session_state'])
else:
self.log.debug('Connector [%s] is not found', routedConnector.cid)
if c and c['session_state'][:6] == 'BOUND_':
# Choose this connector
break
else:
# Check next connector, None if no more connectors are available
routedConnector = route.getConnector()
if routedConnector is None:
break
if routedConnector is None:
self.stats.inc('route_error_count')
self.log.error("Failover route has no bound connector to handle SubmitSmPDU: %s", routable.pdu)
raise ConnectorNotFoundError("Failover route has no bound connectors")
# Re-update SubmitSmPDU with parameters from the route's connector
connector_config = self.SMPPClientManagerPB.perspective_connector_config(routedConnector.cid)
if connector_config:
connector_config = pickle.loads(connector_config)
routable = update_submit_sm_pdu(routable=routable, config=connector_config)
# Set a placeholder for any parameter update to be applied on the pdu(s)
param_updates = {}
# Set priority
priority = 0
if b'priority' in updated_request.args:
priority = int(updated_request.args[b'priority'][0])
param_updates['priority_flag'] = priority_flag_value_map[priority]
self.log.debug("SubmitSmPDU priority is set to %s", priority)
# Set schedule_delivery_time
if b'sdt' in updated_request.args:
param_updates['schedule_delivery_time'] = parse(updated_request.args[b'sdt'][0])
self.log.debug(
"SubmitSmPDU schedule_delivery_time is set to %s (%s)",
routable.pdu.params['schedule_delivery_time'],
updated_request.args[b'sdt'][0])
# Set validity_period
if b'validity-period' in updated_request.args:
delta = timedelta(minutes=int(updated_request.args[b'validity-period'][0]))
param_updates['validity_period'] = datetime.today() + delta
self.log.debug(
"SubmitSmPDU validity_period is set to %s (+%s minutes)",
routable.pdu.params['validity_period'],
updated_request.args[b'validity-period'][0])
# Got any updates to apply on pdu(s) ?
if len(param_updates) > 0:
routable = update_submit_sm_pdu(routable=routable, config=param_updates,
config_update_params=list(param_updates))
# Set DLR bit mask on the last pdu
_last_pdu = routable.pdu
while True:
if hasattr(_last_pdu, 'nextPdu'):
_last_pdu = _last_pdu.nextPdu
else:
break
# DLR setting is clearly described in #107
_last_pdu.params['registered_delivery'] = RegisteredDelivery(
RegisteredDeliveryReceipt.NO_SMSC_DELIVERY_RECEIPT_REQUESTED)
if updated_request.args[b'dlr'][0] == b'yes':
_last_pdu.params['registered_delivery'] = RegisteredDelivery(
RegisteredDeliveryReceipt.SMSC_DELIVERY_RECEIPT_REQUESTED)
self.log.debug(
"SubmitSmPDU registered_delivery is set to %s",
str(_last_pdu.params['registered_delivery']))
dlr_level = int(updated_request.args[b'dlr-level'][0])
if b'dlr-url' in updated_request.args:
dlr_url = updated_request.args[b'dlr-url'][0]
else:
dlr_url = None
if updated_request.args[b'dlr-level'][0] == b'1':
dlr_level_text = 'SMS-C'
elif updated_request.args[b'dlr-level'][0] == b'2':
dlr_level_text = 'Terminal'
else:
dlr_level_text = 'All'
dlr_method = updated_request.args[b'dlr-method'][0]
else:
dlr_url = None
dlr_level = 0
dlr_level_text = 'No'
dlr_method = None
# QoS throttling
if (user.mt_credential.getQuota('http_throughput') and user.mt_credential.getQuota('http_throughput') >= 0) and user.getCnxStatus().httpapi[
'qos_last_submit_sm_at'] != 0:
qos_throughput_second = 1 / float(user.mt_credential.getQuota('http_throughput'))
qos_throughput_ysecond_td = timedelta(microseconds=qos_throughput_second * 1000000)
qos_delay = datetime.now() - user.getCnxStatus().httpapi['qos_last_submit_sm_at']
if qos_delay < qos_throughput_ysecond_td:
self.stats.inc('throughput_error_count')
self.log.error(
"QoS: submit_sm_event is faster (%s) than fixed throughput (%s), user:%s, rejecting message.",
qos_delay,
qos_throughput_ysecond_td,
user)
raise ThroughputExceededError("User throughput exceeded")
user.getCnxStatus().httpapi['qos_last_submit_sm_at'] = datetime.now()
# Get number of PDUs to be sent (for billing purpose)
_pdu = routable.pdu
submit_sm_count = 1
while hasattr(_pdu, 'nextPdu'):
_pdu = _pdu.nextPdu
submit_sm_count += 1
# Pre-sending submit_sm: Billing processing
bill = route.getBillFor(user)
self.log.debug("SubmitSmBill [bid:%s] [ttlamounts:%s] generated for this SubmitSmPDU (x%s)",
bill.bid, bill.getTotalAmounts(), submit_sm_count)
charging_requirements = []
u_balance = user.mt_credential.getQuota('balance')
u_subsm_count = user.mt_credential.getQuota('submit_sm_count')
if u_balance is not None and bill.getTotalAmounts() > 0:
# Ensure user have enough balance to pay submit_sm and submit_sm_resp
charging_requirements.append({
'condition': bill.getTotalAmounts() * submit_sm_count <= u_balance,
'error_message': 'Not enough balance (%s) for charging: %s' % (
u_balance, bill.getTotalAmounts())})
if u_subsm_count is not None:
# Ensure user have enough submit_sm_count to to cover
# the bill action (decrement_submit_sm_count)
charging_requirements.append({
'condition': bill.getAction('decrement_submit_sm_count') * submit_sm_count <= u_subsm_count,
'error_message': 'Not enough submit_sm_count (%s) for charging: %s' % (
u_subsm_count, bill.getAction('decrement_submit_sm_count'))})
if self.RouterPB.chargeUserForSubmitSms(user, bill, submit_sm_count, charging_requirements) is None:
self.stats.inc('charging_error_count')
self.log.error('Charging user %s failed, [bid:%s] [ttlamounts:%s] SubmitSmPDU (x%s)',
user, bill.bid, bill.getTotalAmounts(), submit_sm_count)
raise ChargingError('Cannot charge submit_sm, check RouterPB log file for details')
########################################################
# Send SubmitSmPDU through smpp client manager PB server
self.log.debug("Connector '%s' is set to be a route for this SubmitSmPDU", routedConnector.cid)
c = self.SMPPClientManagerPB.perspective_submit_sm(
uid=user.uid,
cid=routedConnector.cid,
SubmitSmPDU=routable.pdu,
submit_sm_bill=bill,
priority=priority,
pickled=False,
dlr_url=dlr_url,
dlr_level=dlr_level,
dlr_method=dlr_method,
dlr_connector=routedConnector.cid)
# Build final response
if not c.result:
self.stats.inc('server_error_count')
self.log.error('Failed to send SubmitSmPDU to [cid:%s]', routedConnector.cid)
raise ServerError('Cannot send submit_sm, check SMPPClientManagerPB log file for details')
else:
self.stats.inc('success_count')
self.stats.set('last_success_at', datetime.now())
self.log.debug('SubmitSmPDU sent to [cid:%s], result = %s', routedConnector.cid, c.result)
response = {'return': c.result, 'status': 200}
except HttpApiError as e:
self.log.error("Error: %s", e)
response = {'return': e.message, 'status': e.code}
except Exception as e:
self.log.error("Error: %s", e)
response = {'return': "Unknown error: %s" % e, 'status': 500}
raise
finally:
self.log.debug("Returning %s to %s.", response, updated_request.getClientIP())
updated_request.setResponseCode(response['status'])
# Default return
_return | |
help='mining scenario either mp or mf')
parser.add_argument('--intrusion_time', type=int, default= 100, help='intrusion time for nuts release model')
# Spacer
opt = parser.parse_args()
print(opt)
#Nuts release data in units of [ci]
nuts_folder = opt.dir_NUTS
assert os.path.exists(nuts_folder)
print('Success Directory = {}\n'.format(nuts_folder) )
#Panel output in unit of decimal
panel_folder = opt.dir_PANEL
assert os.path.exists(panel_folder)
print('Success Found Directory = {}\n'.format(panel_folder) )
#SECOT2D output in unit of fraction of 1kg
st2d_folder = opt.dir_SECOT2D
assert os.path.exists(st2d_folder)
print('Success Found Directory = {}\n'.format(st2d_folder) )
#list of filenames directories
fn_nut = list_filenames(nuts_folder, prefix=opt.nuts_pre,
suffix=opt.nuts_ext)
fn_stfr = list_filenames(panel_folder, prefix=opt.panel_pre,
suffix=opt.panel_ext)
fn_st2d = list_filenames(st2d_folder, prefix=opt.st2d_pre,
suffix=opt.st2d_ext)
#II. dataframe of static mole fractions that don't transport in Culebra from Panel
colnames = ['vector','time',
'AMFRCMIC','AMFRCINT','AMFRCMIN',
'PUFRCMIC','PUFRCINT','PUFRCMIN',
'UFRCMIC','UFRCINT','UFRCMIN',
'THFRCMIC','THFRCINT','THFRCMIN']
df_stfr = read_concentration_tbl(fn=fn_stfr[0], colnames=colnames)
for fn in fn_stfr[1:]:
df_stfr = pd.concat([df_stfr,read_concentration_tbl(fn=fn,colnames=colnames) ])
#Create Vector Names r1v1 etc
df_stfr['Replicate']=df_stfr['fn'].apply(lambda x: x.split('_')[-2])
df_stfr['Scenario']=df_stfr['fn'].apply(lambda x: x.split('_')[-1])
df_stfr['VectorName']=df_stfr.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
#filter df to scenario and replicates from input args
df_stfr=df_stfr[df_stfr['Scenario'].isin([opt.scen])]
df_stfr=df_stfr[df_stfr['Replicate'].isin(opt.reps)]
#Static Mole Lumped of nuclide fraction not transportable in Culebra
df_stfr['AM241_SML']=df_stfr[['AMFRCMIC','AMFRCINT','AMFRCMIN']].sum(axis=1)
df_stfr['PU239_SML']=df_stfr[['PUFRCMIC','PUFRCINT','PUFRCMIN']].sum(axis=1)
df_stfr['U234_SML']=df_stfr[['UFRCMIC','UFRCINT','UFRCMIN']].sum(axis=1)
df_stfr['TH230_SML']=df_stfr[['THFRCMIC','THFRCINT','THFRCMIN']].sum(axis=1)
#Static Mole Fraction of Lumped Radionuclide not transportable in Culebra
list_sml=['AM241_SML','PU239_SML','U234_SML','TH230_SML']
#Dataframe with fraction transportable in Culebra
dict_Mobile={}
for col in list_sml:
df_stfr[col]=df_stfr[col].apply(lambda x: 1-x)
key = col
dict_Mobile[key] = {vector:val for vector, val in zip(df_stfr['VectorName'].values,df_stfr[col].values)}
#III. datafame of releases in curries to Culebra from Nuts
colnames = ['vector','time','A00AM241','A00PU239','A00U234','A00TH230','EPALWMBT']
df_nut = read_nut_tbl(fn=fn_nut[0],colnames=colnames)
for fn in fn_nut[1:]:
df_nut = pd.concat([df_nut, read_nut_tbl(fn=fn, colnames=colnames)])
#Create Vector Names r1v1 etc
df_nut['Replicate']=df_nut['Intrusion_Event'].apply(lambda x: x.split('_')[0])
df_nut['Scenario']=df_nut['Intrusion_Event'].apply(lambda x: x.split('_')[1])
df_nut['VectorName']=df_nut.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
#filter df to scenario and replicates from input args
df_nut=df_nut[df_nut['Scenario'].isin([opt.scen])]
df_nut=df_nut[df_nut['Replicate'].isin(opt.reps)]
df_nut=df_nut[df_nut['Event_Time'].isin([opt.intrusion_time])]
#Sometimes the nuts output has values of E-100, the thrid exponent digit truncates the "E"
#and values look like 1.123456-100 instead of 1.123456E-100 if regex not matched, overwrite 0
reg_exp = '[+\-]?[^\w]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)'
list_filt = ['A00AM241','A00PU239','A00U234','A00TH230']
for col in list_filt:
df_nut[col] = df_nut[col].apply(lambda x: x if re.match(reg_exp, str(x)) else 0)
df_nut[col] = df_nut[col].apply(lambda x: float(x))
# Verify common set of vectors to match panel and nuts models
assert set(df_nut['VectorName']) == set(df_stfr['VectorName'])
# Scale to transportable fraction
list_trans = ['AM241_Trans', 'PU239_Trans','U234_Trans','TH230_Trans']
def scale(fraction,total):
return (fraction*total)
for nut, mob, trans in zip(list_filt,list_sml, list_trans):
lookup=dict_Mobile[mob]
df_nut[trans]=df_nut.apply(lambda x: lookup[x.VectorName]*x[nut],axis=1)
# fractional release from Culebra
colnames = ['vector','time','MT2AM241','MT2PU239','MT2U234','MT2TH230','MT2TH23A']
df_st2d = read_st2d_tbl(fn=fn_st2d[0],colnames=colnames)
for fn in fn_st2d[1:]:
df_st2d = pd.concat([df_st2d, read_st2d_tbl(fn=fn, colnames=colnames)])
df_st2d['VectorName']=df_st2d.apply(lambda x: x.Replicate + 'v'+str(x.vector), axis=1)
# Verify common set of vectors to match panel, nuts, secot2d models
assert set(df_nut['VectorName']) == set(df_st2d['VectorName'])
#IV. Mass transport from Secot2d
list_mt=['MT2AM241','MT2PU239','MT2U234','MT2TH230','MT2TH23A']
#Sometimes the output has values of E-100, the thrid exponent digit truncates the "E"
#and values look like 1.123456-100 instead of 1.123456E-100 if regex not matched, overwrite 0
reg_exp = '[+\-]?[^\w]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)'
for col in list_mt:
df_st2d[col] = df_st2d[col].apply(lambda x: x if re.match(reg_exp, str(x)) else 0)
df_st2d[col] = df_st2d[col].apply(lambda x: float(x))
df_st2d['MT2TH_Tot']=df_st2d.apply(lambda x: x.MT2TH230 + x.MT2TH23A, axis=1)
#V. Flag negative Culebra mass transport flows, replace with zero, and output 50yr flow steps
dict_st2d_dQ = {}
st2d_keys=['MT2AM241','MT2PU239','MT2U234','MT2TH_Tot']
for key in st2d_keys:
table_temp = df_st2d.pivot_table(index=['time'], columns=['VectorName'],values=[key])
table_temp.columns = [col[1] for col in table_temp.columns]
table_temp2 = table_temp.copy()
dict_table = {}
#Convert from cumulative output to incremental output
for vector in table_temp2.columns:
v0 = table_temp2[vector].values[1:]
v1 = table_temp2[vector].values[:-1]
dv = v0 - v1
dv = np.where(dv>=0, dv, 0)
dv = np.insert(dv,0,v1[0],axis=0)
dict_table[vector]=dv
dict_st2d_dQ[key]=pd.DataFrame(dict_table).set_index(table_temp.index)
#VI. Flag negative nuts flows and replace with zero, output 50 yr flow steps
dict_Nuts_dQ={}
nut_keys = ['<KEY>','A00PU239','A00U234','A00TH230']
for key in nut_keys:
table_temp = df_nut.pivot_table(index=['time'], columns=['VectorName'],values=[key])
table_temp.columns = [col[1] for col in table_temp.columns]
table_temp2 = table_temp.copy()
dict_table = {}
#Convert from cumulative to incremental output
for vector in table_temp2.columns:
v0 = table_temp2[vector].values[1:]
v1 = table_temp2[vector].values[:-1]
dv = v0 - v1
dv = np.insert(dv,0,0,axis=0)
dv = np.where(dv>0, dv, 0)
dv = list(dv)
dict_table[vector]=dv
dict_Nuts_dQ[key] = pd.DataFrame(dict_table).set_index(table_temp.index)
#VII. Convolve to Culebra with from Culebra to calculate radionuclides to the lwb
lwb_keys = ['AM241','PU239','U234','TH230A']
dict_lwb = {}
for nut, st2d, lwb in zip(nut_keys, st2d_keys, lwb_keys):
dict_temp = {}
for vector in dict_Nuts_dQ[nut].columns:
qnuts = dict_Nuts_dQ[nut][vector].values
qst2d = dict_st2d_dQ[st2d][vector].values
######## convolution step here #############
qconv = np.convolve(qnuts,qst2d)[0:len(qnuts)]
dict_temp[vector]=qconv
dict_lwb[lwb]=pd.DataFrame(dict_temp).set_index(dict_Nuts_dQ[nut].index)
#VIII. Aggregate Stats and write ouput
df_lwb_lumped = dict_lwb[lwb_keys[0]]*0
for lwb in lwb_keys:
df_lwb_lumped = df_lwb_lumped + dict_lwb[lwb]
time_max_mean = df_lwb_lumped.mean(axis=1).idxmax()
list_mean=[]
list_median=[]
list_count=[]
for lwb in lwb_keys:
list_mean.append(dict_lwb[lwb].mean(axis=1)[time_max_mean])
list_median.append(dict_lwb[lwb].median(axis=1)[time_max_mean])
list_count.append(dict_lwb[lwb].shape[1])
df_out = pd.DataFrame({'Mean [Ci]':list_mean, 'Median [Ci]':list_median,'Count':list_count,
'Period End':time_max_mean,'Duration':'50 Yrs'}, index=lwb_keys)
df_out.to_csv(opt.fn_output)
#IX. Generate Plot
for nut, st2d, lwb in zip(nut_keys, st2d_keys, lwb_keys):
df_Convolved = dict_lwb[lwb]
df_NutsdQ = dict_Nuts_dQ[nut]
df_st2d = dict_st2d_dQ[st2d]
fig, ax = plt.subplots(ncols=3,nrows=1, figsize=(12,4), dpi=125)
ax[0].set_title('{} Release to Culebra'.format(nut))
ax[1].set_title('{} Release to LWB'.format(st2d))
ax[2].set_title('{} Release to LWB'.format(lwb))
for vector in df_NutsdQ.columns:
horsetail = df_NutsdQ[vector]
ax[0].plot(horsetail.index, horsetail, color='k', alpha=.5)
ax[0].set_ylabel('50 Yr Time Step Release to Culebra [Ci]')
ax[0].set_xlabel('Time Post Closure [Yr]')
plt.setp(ax[0].get_xticklabels(),rotation=90)
plot_count(ax_in=ax[0], df_in=df_Convolved)
for vector in df_st2d.columns:
horsetail = df_st2d[vector]
ax[1].plot(horsetail.index, horsetail, color='k', alpha=.5)
ax[1].set_ylabel('50 Yr Time Step Fraction of Unit Kg')
ax[1].set_xlabel('Time Post Closure [Yr]')
plt.setp(ax[1].get_xticklabels(),rotation=90)
plot_count(ax_in=ax[1], df_in=df_Convolved)
vector_mean = df_Convolved.mean(axis=1)
for vector in df_Convolved.columns:
horsetail = df_Convolved[vector]
ax[2].plot(horsetail.index, horsetail, color='k', alpha=.5)
ax[2].plot(vector_mean, color='r')
ax[2].set_ylabel('50 Yr Time Step Release to LWB [Ci]')
ax[2].set_xlabel('Time Post Closure [Yr]')
plt.setp(ax[2].get_xticklabels(),rotation=90)
plot_count(ax_in=ax[2], df_in=df_Convolved)
plot_annotation(ax_in=ax[2], text_str='Mean = Red')
fig.tight_layout()
plt.savefig('ConvolutionPlot{}'.format(lwb))
if __name__ == '__main__':
main()
# %%
import numpy as np
from scipy import interpolate
from math import *
import matplotlib.pyplot as plt
### Make polar grid ###
rvec = np.arange(1.0, 11.0, 1.0)
tvec = np.arange(pi/10.0, pi, pi/10.0)
Nr = len(rvec)
Nt = len(tvec)
X = np.empty([Nr,Nt])
Y = np.empty([Nr,Nt])
Z = np.empty([Nr,Nt])
for i in range(Nr):
for j in range(Nt):
r = rvec[i]
t = tvec[j]
X[i,j] = r*sin(t)
Y[i,j] = r*cos(t)
Z[i,j] = cos(t)/pow(r,3) # cos(theta)/r^3: Br of dipole
### Do the interpolation ###
interp_poly = interpolate.interp2d(X,Y,Z, kind='linear')
#tck = interpolate.bisplrep(X,Y,Z, kx=3, ky=3)
### interpolate onto new grid ###
rnew = np.arange(1.0, 11.1, 0.1)
tnew=np.array([pi/10, pi/5, pi/4, pi/3, pi/2, .75*pi, pi])
#tnew = np.arange(pi/100.0, pi, pi/100.0)
Nr2 = len(rnew)
Nt2 = len(tnew)
X2 = np.empty([Nr2,Nt2])
Y2 = np.empty([Nr2,Nt2])
Z2 = np.empty([Nr2,Nt2])
for i in range(Nr2):
for j in range(Nt2):
r = rnew[i]
t = tnew[j]
X2[i,j] = r*sin(t)
Y2[i,j] = r*cos(t)
Z2[i,j] = interp_poly(X2[i,j], Y2[i,j])
#Z2[i,j] = interpolate.bisplev(X2[i,j], Y2[i,j], tck)
### Pseudocolour plot ###
fig = plt.figure()
fig.add_subplot(111, aspect='equal')
plt.pcolor(X2,Y2,Z2)
plt.plot(X,Y,marker='+')
plt.plot(X2,Y2,marker='.')
plt.colorbar()
plt.show()
# %%
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 12:59:29 2020
@author: annaklara
"""
import re
import numpy as np
import pandas as pd
import math
# %% Part A
import re
def names():
simple_string = """Amy is 5 years old, and her sister Mary is 2 years old.
Ruth and Peter, their parents, have 3 kids."""
pattern = '[A-Z][a-z]*'
result = re.findall(pattern, simple_string)
return(result)
raise NotImplementedError()
assert len(names()) == 4, "There are four names in the simple_string"
# %% part b
def grades():
with open ("/Users/annaklara/Downloads/grades.txt", "r") as file:
grades = file.read()
#my code
gradeslist = grades.splitlines()
patternGrade = '[B]$|[B][\s]$'
patternName = '[\S\s]+(?=[:])'
bstudents = []
for n in gradeslist:
if re.findall(patternGrade, n):
name = re.findall(patternName, n)[0]
bstudents.append(name)
return(bstudents)
# YOUR CODE HERE
raise NotImplementedError()
assert len(grades() ) == 16
print(grades())
# %% part c
def logs():
loglist = []
with open("assets/logdata.txt", "r") as file:
logdata = file.read()
loglines = logdata.splitlines()
patternHost = '[\d]+[\.][\d]+[\.][\d]+[.][\d]+'
patternUser = '(?<=[-][\s])[\S]+'
patternTime = '(?<=[\[])[\S]+[\s][\S]+(?=[\]])'
patternRequest = '(?<=["])[\S\s]+(?=["])'
for l in loglines:
aH = re.findall(patternHost, l)[0]
aU = re.findall(patternUser, l)[0]
aT = re.findall(patternTime, l)[0]
aR = re.findall(patternRequest, l)[0]
logDict = { 'host':aH, 'user_name':aU, 'time':aT, 'request':aR }
loglist.append(dict(logDict))
return(loglist)
# YOUR CODE HERE
raise NotImplementedError()
assert len(logs()) == 979
one_item={'host': '192.168.127.12',
'user_name': 'feest6811',
'time': '21/Jun/2019:15:45:24 -0700',
'request': 'POST /incentivize HTTP/1.1'}
assert one_item in logs(), "Sorry, this item should be in the log results, check your formating"
# %% quiz 1 workspace
# question 1
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj1 = pd.Series(sdata)
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj2 = pd.Series(sdata, index=states)
obj3 = pd.isnull(obj2)
x = obj2['California']
obj2['California'] != x #nan not a value can't compare
obj3['California']
obj2['California'] == None
math.isnan(obj2['California'])
# %%Question 2
d = {
'1': 'Alice',
'2': 'Bob',
'3': 'Rita',
'4': 'Molly',
'5': 'Ryan'
}
S = pd.Series(d)
check = S.iloc[0:3]
# %% question 3 recast column headers to capitals
ss = pd.Series( ['a', 'b', 'c'] , name = 'vals')
ss.to_frame() # problem, this was still a series
ss = pd.DataFrame({"aaaa": [1, 2, | |
stride of the sliding window for each dimension of the input tensor.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
'NDHWC' and 'NCDHW' are supported.
name : string
Optional name for the operation.
Returns
-------
A Tensor with the same type as value. The average pooled output tensor.
"""
avg_pool_obj = AvgPool(ksize, strides, padding, data_format)
return avg_pool_obj(input)
def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None):
"""
Performs an N-D pooling operation.
Parameters
----------
input : tensor
Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels]
if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape
if data_format starts with "NC". Pooling happens over the spatial dimensions only.
window_shape : int
Sequence of N ints >= 1.
pooling_type : string
Specifies pooling operation, must be "AVG" or "MAX".
strides : ints
Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1.
padding : string
The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of tf.ops.convolution for details.
data_format : string
Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"),
or the second dimension (if data_format starts with "NC").
For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations : list of ints
Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1.
name : string
Optional. Name of the op.
Returns
-------
Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels]
"""
if pooling_type in ["MAX", "max"]:
pool_obj = MaxPool(window_shape, strides, padding, data_format)
elif pooling_type in ["AVG", "avg"]:
pool_obj = AvgPool(window_shape, strides, padding, data_format)
else:
raise ValueError('Unsupported pool_mode: ' + str(pooling_type))
return pool_obj(input)
class DepthwiseConv2d(object):
def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1):
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
if self.data_format == 'NHWC':
self._stride = (strides[1], strides[2])
if self.data_format == 'NCHW':
self._stride = (strides[2], strides[3])
self.dilations = dilations
def __call__(self, input, filter, point_filter=None):
if self.data_format == 'NHWC':
input = nhwc_to_nchw(input)
channel = input.shape[1]
depthwise_conv = F.conv2d(input, filter, bias=None, stride=self._stride, padding=self.padding,
dilation=self.dilations, groups=channel)
pointwise_conv = F.conv2d(depthwise_conv, point_filter, padding=self.padding)
if self.data_format == 'NHWC':
pointwise_conv = nchw_to_nhwc(pointwise_conv)
return pointwise_conv
def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None):
"""
Depthwise 2-D convolution.
Parameters
----------
input : tensor
4-D with shape according to data_format.
filter : tensor
4-D with shape [filter_height, filter_width, in_channels, channel_multiplier].
strides : list
1-D of size 4. The stride of the sliding window for each dimension of input.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
The data format for input. Either "NHWC" (default) or "NCHW".
dilations : list
1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution.
If it is greater than 1, then all values of strides must be 1.
name : string
A name for this operation (optional).
Returns
-------
A 4-D Tensor with shape according to data_format.
E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier].
"""
depthwise_conv2d_obj = DepthwiseConv2d(strides, padding, data_format, dilations)
return depthwise_conv2d_obj(input, filter)
def same_padding_deconvolution(input, weight, strides, dilations):
#H(out) = floor((H(in) - 1)*stride[0] - 2* padding[0] + dilation[0] * (ksize[0]-1) + 1)
if isinstance(weight, torch.Tensor):
if len(input.shape) == 3:
filter_rows = weight.size(2)
if len(input.shape) == 4:
filter_rows = weight.size(2)
filter_cols = weight.size(3)
elif len(input.shape) == 5:
filter_rows = weight.size(2)
filter_cols = weight.size(3)
filter_depth = weight.size(4)
else:
if len(input.shape) == 3:
filter_rows = weight[0]
elif len(input.shape) == 4:
filter_rows = weight[0]
filter_cols = weight[1]
elif len(input.shape) == 5:
filter_rows = weight[0]
filter_cols = weight[1]
filter_depth = weight[2]
if len(input.shape) == 3:
input_rows = input.size(2)
out_rows = input_rows * strides - strides + 1
padding_rows = max(0, (input_rows-1) * strides + (filter_rows - 1) * dilations + 1 - out_rows)
rows_odd = (padding_rows % 2 != 0)
return rows_odd, padding_rows
if len(input.shape) == 4:
input_rows = input.size(2)
input_cols = input.size(3)
out_rows = input_rows * strides[0] - strides[0] + 1
out_cols = input_rows * strides[1] - strides[1] + 1
padding_rows = max(0, (input_rows - 1) * strides[0] + (filter_rows - 1) * dilations[0] + 1 - out_rows)
padding_cols = max(0, (input_cols - 1) * strides[1] + (filter_cols - 1) * dilations[1] + 1 - out_cols)
rows_odd = (padding_rows % 2 != 0)
cols_odd = (padding_cols % 2 != 0)
return rows_odd, cols_odd, padding_rows, padding_cols
if len(input.shape) == 5:
input_rows = input.size(2)
input_cols = input.size(3)
input_depth = input.size(4)
out_rows = input_rows * strides[0] - strides[0] + 1
out_cols = input_rows * strides[1] - strides[1] + 1
out_depth = input_rows * strides[2] - strides[2] + 1
padding_rows = max(0, (input_rows - 1) * strides[0] + (filter_rows - 1) * dilations[0] + 1 - out_rows)
padding_cols = max(0, (input_cols - 1) * strides[1] + (filter_cols - 1) * dilations[1] + 1 - out_cols)
padding_depth = max(0, (input_depth - 1) * strides[2] + (filter_depth - 1) * dilations[2] + 1 - out_depth)
rows_odd = (padding_rows % 2 != 0)
cols_odd = (padding_cols % 2 != 0)
depth_odd = (padding_depth % 2 != 0)
return rows_odd, cols_odd, depth_odd, padding_rows, padding_cols, padding_depth
class Conv1d_transpose(object):
def __init__(
self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None
):
self.stride = stride
self.dilations = dilations
self.data_format, self.padding = preprocess_1d_format(data_format, padding)
def __call__(self, input, filters):
if self.data_format == 'NLC':
input = nhwc_to_nchw(input)
if self.padding == 'same':
out = self.conv1d_transpose_same_padding(input, filters)
else:
out = F.conv_transpose1d(
input,
weight=filters,
padding=(0 if isinstance(self.padding, str) else self.padding),
stride=self.stride,
dilation=self.dilations
)
if self.data_format == 'NLC':
out = nchw_to_nhwc(out)
return out
def conv1d_transpose_same_padding(self, input, filters):
rows_odd, padding_rows = same_padding_deconvolution(input, filters, self.stride, 1)
if rows_odd:
input = F.pad(input, [0, int(rows_odd)])
out_padding = 0
else:
out_padding = 1
return F.conv_transpose1d(input, weight=filters, padding=(padding_rows // 2), stride=self.stride,
dilation=self.dilations, output_padding=out_padding)
def conv1d_transpose(
input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None
):
"""
The transpose of conv1d.
Parameters
----------
input : tensor
A 3-D Tensor of type float and shape [batch, in_width, in_channels]
for NWC data format or [batch, in_channels, in_width] for NCW data format.
filters : tensor
A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels].
filter's in_channels dimension must match that of value.
output_shape : tensor
A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op.
strides : list
An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
'NWC' and 'NCW' are supported.
dilations : list
An int or list of ints that has length 1 or 3 which defaults to 1.
The dilation factor for each dimension of input. If set to k > 1,
there will be k-1 skipped cells between each filter element on that dimension.
Dilations in the batch and depth dimensions must be 1.
name : string
Optional name for the returned tensor.
Returns
-------
A Tensor with the same type as value.
"""
conv1d_transpose_obj = Conv1d_transpose(strides, padding, data_format, dilations)
return conv1d_transpose_obj(input, filters)
class Conv2d_transpose(object):
def __init__(
self, strides, padding, data_format='NHWC', dilations=None, name=None, out_channel=None, k_size=None,
in_channels=None
):
self.strides = strides
self.dilations = dilations
self.name = name
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
def __call__(self, input, filters):
if self.data_format == 'NHWC':
input = nhwc_to_nchw(input)
if self.padding == 'same':
out = self.conv2d_transpore_same(input, filters)
else:
out = F.conv_transpose2d(
input,
weight=filters,
padding=(0 | |
<filename>pixel_link.py
import tensorflow as tf
import numpy as np
import cv2
import os
import util
PIXEL_CLS_WEIGHT_all_ones = 'PIXEL_CLS_WEIGHT_all_ones'
PIXEL_CLS_WEIGHT_bbox_balanced = 'PIXEL_CLS_WEIGHT_bbox_balanced'
PIXEL_NEIGHBOUR_TYPE_4 = 'PIXEL_NEIGHBOUR_TYPE_4'
PIXEL_NEIGHBOUR_TYPE_8 = 'PIXEL_NEIGHBOUR_TYPE_8'
DECODE_METHOD_join = 'DECODE_METHOD_join'
def get_neighbours_8(x, y):
"""
Get 8 neighbours of point(x, y)
"""
return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \
(x - 1, y), (x + 1, y), \
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]
def get_neighbours_4(x, y):
return [(x - 1, y), (x + 1, y), (x, y + 1), (x, y - 1)]
def get_neighbours(x, y):
import config
neighbour_type = config.pixel_neighbour_type
if neighbour_type == PIXEL_NEIGHBOUR_TYPE_4:
return get_neighbours_4(x, y)
else:
return get_neighbours_8(x, y)
def get_neighbours_fn():
import config
neighbour_type = config.pixel_neighbour_type
if neighbour_type == PIXEL_NEIGHBOUR_TYPE_4:
return get_neighbours_4, 4
else:
return get_neighbours_8, 8
def is_valid_cord(x, y, w, h):
"""
Tell whether the 2D coordinate (x, y) is valid or not.
If valid, it should be on an h x w image
"""
return x >=0 and x < w and y >= 0 and y < h;
#=====================Ground Truth Calculation Begin==================
def tf_cal_gt_for_single_image(xs, ys, labels):
pixel_cls_label, pixel_cls_weight, \
pixel_link_label, pixel_link_weight = \
tf.py_func(
cal_gt_for_single_image,
[xs, ys, labels],
[tf.int32, tf.float32, tf.int32, tf.float32]
)
import config
score_map_shape = config.score_map_shape
num_neighbours = config.num_neighbours
h, w = score_map_shape
pixel_cls_label.set_shape(score_map_shape)
pixel_cls_weight.set_shape(score_map_shape)
pixel_link_label.set_shape([h, w, num_neighbours])
pixel_link_weight.set_shape([h, w, num_neighbours])
return pixel_cls_label, pixel_cls_weight, \
pixel_link_label, pixel_link_weight
def cal_gt_for_single_image(normed_xs, normed_ys, labels):
"""
Args:
xs, ys: both in shape of (N, 4),
and N is the number of bboxes,
their values are normalized to [0,1]
labels: shape = (N,), only two values are allowed:
-1: ignored
1: text
Return:
pixel_cls_label
pixel_cls_weight
pixel_link_label
pixel_link_weight
"""
import config
score_map_shape = config.score_map_shape
pixel_cls_weight_method = config.pixel_cls_weight_method
h, w = score_map_shape
text_label = config.text_label
ignore_label = config.ignore_label
background_label = config.background_label
num_neighbours = config.num_neighbours
bbox_border_width = config.bbox_border_width
pixel_cls_border_weight_lambda = config.pixel_cls_border_weight_lambda
# validate the args
assert np.ndim(normed_xs) == 2
assert np.shape(normed_xs)[-1] == 4
assert np.shape(normed_xs) == np.shape(normed_ys)
assert len(normed_xs) == len(labels)
# assert set(labels).issubset(set([text_label, ignore_label, background_label]))
num_positive_bboxes = np.sum(np.asarray(labels) == text_label)
# rescale normalized xys to absolute values
xs = normed_xs * w
ys = normed_ys * h
# initialize ground truth values
mask = np.zeros(score_map_shape, dtype = np.int32)
pixel_cls_label = np.ones(score_map_shape, dtype = np.int32) * background_label
pixel_cls_weight = np.zeros(score_map_shape, dtype = np.float32)
pixel_link_label = np.zeros((h, w, num_neighbours), dtype = np.int32)
pixel_link_weight = np.ones((h, w, num_neighbours), dtype = np.float32)
# find overlapped pixels, and consider them as ignored in pixel_cls_weight
# and pixels in ignored bboxes are ignored as well
# That is to say, only the weights of not ignored pixels are set to 1
## get the masks of all bboxes
bbox_masks = []
pos_mask = mask.copy()
for bbox_idx, (bbox_xs, bbox_ys) in enumerate(zip(xs, ys)):
if labels[bbox_idx] == background_label:
continue
bbox_mask = mask.copy()
bbox_points = zip(bbox_xs, bbox_ys)
bbox_contours = util.img.points_to_contours(bbox_points)
util.img.draw_contours(bbox_mask, bbox_contours, idx = -1,
color = 1, border_width = -1)
bbox_masks.append(bbox_mask)
if labels[bbox_idx] == text_label:
pos_mask += bbox_mask
# treat overlapped in-bbox pixels as negative,
# and non-overlapped ones as positive
pos_mask = np.asarray(pos_mask == 1, dtype = np.int32)
num_positive_pixels = np.sum(pos_mask)
## add all bbox_maskes, find non-overlapping pixels
sum_mask = np.sum(bbox_masks, axis = 0)
not_overlapped_mask = sum_mask == 1
## gt and weight calculation
for bbox_idx, bbox_mask in enumerate(bbox_masks):
bbox_label = labels[bbox_idx]
if bbox_label == ignore_label:
# for ignored bboxes, only non-overlapped pixels are encoded as ignored
bbox_ignore_pixel_mask = bbox_mask * not_overlapped_mask
pixel_cls_label += bbox_ignore_pixel_mask * ignore_label
continue
if labels[bbox_idx] == background_label:
continue
# from here on, only text boxes left.
# for positive bboxes, all pixels within it and pos_mask are positive
bbox_positive_pixel_mask = bbox_mask * pos_mask
# background or text is encoded into cls gt
pixel_cls_label += bbox_positive_pixel_mask * bbox_label
# for the pixel cls weights, only positive pixels are set to ones
if pixel_cls_weight_method == PIXEL_CLS_WEIGHT_all_ones:
pixel_cls_weight += bbox_positive_pixel_mask
elif pixel_cls_weight_method == PIXEL_CLS_WEIGHT_bbox_balanced:
# let N denote num_positive_pixels
# weight per pixel = N /num_positive_bboxes / n_pixels_in_bbox
# so all pixel weights in this bbox sum to N/num_positive_bboxes
# and all pixels weights in this image sum to N, the same
# as setting all weights to 1
num_bbox_pixels = np.sum(bbox_positive_pixel_mask)
if num_bbox_pixels > 0:
per_bbox_weight = num_positive_pixels * 1.0 / num_positive_bboxes
per_pixel_weight = per_bbox_weight / num_bbox_pixels
pixel_cls_weight += bbox_positive_pixel_mask * per_pixel_weight
else:
raise ValueError, 'pixel_cls_weight_method not supported:%s'\
%(pixel_cls_weight_method)
## calculate the labels and weights of links
### for all pixels in bboxes, all links are positive at first
bbox_point_cords = np.where(bbox_positive_pixel_mask)
pixel_link_label[bbox_point_cords] = 1
## the border of bboxes might be distored because of overlapping
## so recalculate it, and find the border mask
new_bbox_contours = util.img.find_contours(bbox_positive_pixel_mask)
bbox_border_mask = mask.copy()
util.img.draw_contours(bbox_border_mask, new_bbox_contours, -1,
color = 1, border_width = bbox_border_width * 2 + 1)
bbox_border_mask *= bbox_positive_pixel_mask
bbox_border_cords = np.where(bbox_border_mask)
## give more weight to the border pixels if configured
pixel_cls_weight[bbox_border_cords] *= pixel_cls_border_weight_lambda
### change link labels according to their neighbour status
border_points = zip(*bbox_border_cords)
def in_bbox(nx, ny):
return bbox_positive_pixel_mask[ny, nx]
for y, x in border_points:
neighbours = get_neighbours(x, y)
for n_idx, (nx, ny) in enumerate(neighbours):
if not is_valid_cord(nx, ny, w, h) or not in_bbox(nx, ny):
pixel_link_label[y, x, n_idx] = 0
pixel_cls_weight = np.asarray(pixel_cls_weight, dtype = np.float32)
pixel_link_weight *= np.expand_dims(pixel_cls_weight, axis = -1)
# try:
# np.testing.assert_almost_equal(np.sum(pixel_cls_weight), num_positive_pixels, decimal = 1)
# except:
# print num_positive_pixels, np.sum(pixel_cls_label), np.sum(pixel_cls_weight)
# import pdb
# pdb.set_trace()
return pixel_cls_label, pixel_cls_weight, pixel_link_label, pixel_link_weight
#=====================Ground Truth Calculation End====================
#============================Decode Begin=============================
def tf_decode_score_map_to_mask_in_batch(pixel_cls_scores, pixel_link_scores):
masks = tf.py_func(decode_batch,
[pixel_cls_scores, pixel_link_scores], tf.int32)
b, h, w = pixel_cls_scores.shape.as_list()
masks.set_shape([b, h, w])
return masks
def decode_batch(pixel_cls_scores, pixel_link_scores,
pixel_conf_threshold = None, link_conf_threshold = None):
import config
if pixel_conf_threshold is None:
pixel_conf_threshold = config.pixel_conf_threshold
if link_conf_threshold is None:
link_conf_threshold = config.link_conf_threshold
batch_size = pixel_cls_scores.shape[0]
batch_mask = []
for image_idx in xrange(batch_size):
image_pos_pixel_scores = pixel_cls_scores[image_idx, :, :]
image_pos_link_scores = pixel_link_scores[image_idx, :, :, :]
mask = decode_image(
image_pos_pixel_scores, image_pos_link_scores,
pixel_conf_threshold, link_conf_threshold
)
batch_mask.append(mask)
return np.asarray(batch_mask, np.int32)
# @util.dec.print_calling_in_short
# @util.dec.timeit
def decode_image(pixel_scores, link_scores,
pixel_conf_threshold, link_conf_threshold):
import config
if config.decode_method == DECODE_METHOD_join:
mask = decode_image_by_join(pixel_scores, link_scores,
pixel_conf_threshold, link_conf_threshold)
return mask
elif config.decode_method == DECODE_METHOD_border_split:
return decode_image_by_border(pixel_scores, link_scores,
pixel_conf_threshold, link_conf_threshold)
else:
raise ValueError('Unknow decode method:%s'%(config.decode_method))
import pyximport; pyximport.install()
from pixel_link_decode import decode_image_by_join
def min_area_rect(cnt):
"""
Args:
xs: numpy ndarray with shape=(N,4). N is the number of oriented bboxes. 4 contains [x1, x2, x3, x4]
ys: numpy ndarray with shape=(N,4), [y1, y2, y3, y4]
Note that [(x1, y1), (x2, y2), (x3, y3), (x4, y4)] can represent an oriented bbox.
Return:
the oriented rects sorrounding the box, in the format:[cx, cy, w, h, theta].
"""
rect = cv2.minAreaRect(cnt)
cx, cy = rect[0]
w, h = rect[1]
theta = rect[2]
box = [cx, cy, w, h, theta]
return box, w * h
def rect_to_xys(rect, image_shape):
"""Convert rect to xys, i.e., eight points
The `image_shape` is used to to make sure all points return are valid, i.e., within image area
"""
h, w = image_shape[0:2]
def get_valid_x(x):
if x < 0:
return 0
if x >= w:
return w - 1
return x
def get_valid_y(y):
if y < 0:
return 0
if y >= h:
return h - 1
return y
rect = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4])
points = cv2.cv.BoxPoints(rect)
points = np.int0(points)
for i_xy, (x, y) in enumerate(points):
x = get_valid_x(x)
y = get_valid_y(y)
points[i_xy, :] = [x, y]
points = np.reshape(points, -1)
return points
# @util.dec.print_calling_in_short
# @util.dec.timeit
def mask_to_bboxes(mask, image_name, image_data, opath, i, image_shape = None, min_area = None,
min_height = None, min_aspect_ratio = None):
import config
feed_shape = config.train_image_shape
if image_shape is None:
image_shape = feed_shape
image_h, image_w = image_shape[0:2]
if min_area is None:
min_area = config.min_area
if min_height is None:
min_height = config.min_height
bboxes = []
max_bbox_idx = mask.max()
mask = util.img.resize(img = mask, size = (image_w, image_h), | |
<filename>easyscan_app/tests.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime, pprint
# from easyscan_app.models import LasDataMaker, ScanRequest, StatsBuilder
from django.http import QueryDict
from django.test import TestCase
from easyscan_app.lib.data_prepper import LasDataMaker
from easyscan_app.lib.magic_bus import Prepper
from easyscan_app.lib.spacer import Spacer
from easyscan_app.models import RequestViewGetHelper, ScanRequest, StatsBuilder
# maker = LasDataMaker()
prepper = Prepper()
statsbuilder = StatsBuilder()
class LasDataMakerTest( TestCase ):
""" Tests models.LasDataMaker() """
def setUp(self):
self.maker = LasDataMaker()
self.maxDiff = None
def test__utf8list_to_utf8csv__str( self ):
""" Tests good utf8 strings (required by csv module). """
utf8_list = [ b'foo', b'bar', b'“iñtërnâtiônàlĭzætiøn”' ]
result = self.maker.utf8list_to_utf8csv( utf8_list )
self.assertEqual(
b'"foo","bar","\xe2\x80\x9ci\xc3\xb1t\xc3\xabrn\xc3\xa2ti\xc3\xb4n\xc3\xa0l\xc4\xadz\xc3\xa6ti\xc3\xb8n\xe2\x80\x9d"\r\n',
result )
self.assertEqual(
str,
type(result) )
def test__utf8list_to_utf8csv__unicode( self ):
""" Tests bad unicode strings. """
unicode_list = [ u'foo', u'bar', u'“iñtërnâtiônàlĭzætiøn”' ]
result = u'init'
try:
self.maker.utf8list_to_utf8csv( unicode_list )
except Exception as e:
result = unicode(e)
self.assertEqual(
u"entry `u'foo'` not of type str",
result )
def test__make_date_string( self ):
""" Tests conversion of datetime object to string required by LAS. """
dt = datetime.datetime( 2014, 12, 8, 12, 40, 59 )
self.assertEqual(
u'Mon Dec 08 2014',
self.maker.make_date_string( dt )
)
def test__strip_stuff( self ):
""" Tests removal of double-quotes and new-lines. """
self.assertEqual(
u"The title was 'Zen', I think.",
self.maker.strip_stuff(u'The title was "Zen", I think.') )
self.assertEqual(
u'first line - second line',
self.maker.strip_stuff(u'first line\nsecond line') )
self.assertEqual(
u'first line - second line',
self.maker.strip_stuff(u'first line\rsecond line') )
self.assertEqual(
u"The title was 'Zen', I think.",
self.maker.strip_stuff(u'The title was `Zen`, I think.') )
def test__add_email( self ):
""" Checks for space before and after actual email line. """
email = '<EMAIL>'
expected_lst = [
'------------------------------------------------ ', # 50 characters
' ',
'email: <EMAIL> ',
' ',
'EMAIL: <EMAIL> ',
' '
]
result = self.maker.add_email( email )
# print( 'expected, ```%s```' % ''.join( expected_lst ) )
# print( 'resulttt, ```%s```' % result )
self.assertEqual(
''.join( expected_lst ),
result
)
# def test__add_email( self ):
# """ Checks for space before and after actual email line. """
# email = '<EMAIL>'
# expected_lst = [
# ' ', # 50 characters
# 'email: <EMAIL> ',
# ' ',
# 'EMAIL: <EMAIL>.EDU ',
# ' '
# ]
# self.assertEqual(
# ''.join( expected_lst ),
# self.maker.add_email( email )
# )
def test__add_article_chapter_title( self ):
""" Checks for space before and after article-chapter-title line. """
initial_data = 'foo bar ' # 50 characters
article_chapter_title = 'Ultrastructural analysis of the effect of ethane dimethanesulphonate on the testis of the rat, guinea pig, hamster and mouse.'
expected_lst = [
'foo bar ', # 50 characters
'ARTICLE-CHAPTER-TITLE... ',
' ',
'Ultrastructural analysis of the effect of ethane ',
'dimethanesulphonate on the testis of the rat, ',
'guinea pig, hamster and mouse. ',
' '
]
self.assertEqual(
''.join( expected_lst ),
self.maker.add_article_chapter_title( initial_data, article_chapter_title )
)
def test__make_notes_field( self ):
""" Checks for proper spacing. """
patron_email = '<EMAIL>'
item_chap_vol_title = 'test-article-title'
item_page_range_other = 'test-range'
item_other = 'test-other'
expected_lst = [
'------------------------------------------------ ', # 50 characters
' ',
'email: <EMAIL> ',
' ',
'EMAIL: <EMAIL> ',
' ',
'ARTICLE-CHAPTER-TITLE... ',
' ',
'test-article-title ',
' ',
'PAGE-RANGE: test-range ',
'PAGE-OTHER: test-other ',
]
self.assertEqual(
''.join( expected_lst ),
self.maker.make_notes_field( patron_email, item_chap_vol_title, item_page_range_other, item_other )
)
# end class class LasDataMakerTest
class SpacerTest( TestCase ):
""" Checks spacer.py Spacer() """
def setUp(self):
self.spcr = Spacer()
## convert_string_to_lines() ##
def test__convert_string_to_lines__short_blank(self):
self.spcr.notes_line_length = 4
self.assertEqual(
[ '' ],
self.spcr.convert_string_to_lines( ' ' )
)
def test__convert_string_to_lines__short(self):
self.spcr.notes_line_length = 10
self.assertEqual(
[ 'abc' ],
self.spcr.convert_string_to_lines( 'abc' )
)
def test__convert_string_to_lines__full(self):
self.spcr.notes_line_length = 10
self.assertEqual(
['1234567890'],
self.spcr.convert_string_to_lines( '1234567890' )
)
def test__convert_string_to_lines__single_bigger(self):
self.spcr.notes_line_length = 10
self.assertEqual(
[u'123456789012'],
self.spcr.convert_string_to_lines( '123456789012' )
)
def test__convert_string_to_lines__single_bigger_plus_more_words(self):
self.spcr.notes_line_length = 10
self.assertEqual(
['123456789012', 'aaa'],
self.spcr.convert_string_to_lines( '123456789012 aaa' )
)
def test__convert_string_to_lines__bigger_even_break(self):
self.spcr.notes_line_length = 10
self.assertEqual(
['1234567890', '123'],
self.spcr.convert_string_to_lines( '1234567890 123' )
)
def test__convert_string_to_lines__bigger_uneven_break(self):
self.spcr.notes_line_length = 10
self.assertEqual(
['12345678', '012'],
self.spcr.convert_string_to_lines( '12345678 012' )
)
def test__convert_string_to_lines__bigger_miscellaneous(self):
self.spcr.notes_line_length = 50
self.assertEqual( [
'A surprisingly long TEST article-title, because', # 47, would be 50
'of the repetition of the surprisingly long', # 42, would be 50
'article title.'
],
self.spcr.convert_string_to_lines( 'A surprisingly long TEST article-title, because of the repetition of the surprisingly long article title.' )
)
## add_spacer() ##
def test__add_spacer_small_string( self ):
""" Tests filler in no-wrap situation. """
self.spcr.notes_line_length = 10
self.spcr.spacer_character = '|'
self.assertEqual(
'abc ||||| ',
self.spcr.add_spacer( 'abc' )
)
def test__add_spacer_full_length_string( self ):
""" Checks that full-string gets a full extra string added (ending in a space). """
self.spcr.notes_line_length = 10
self.spcr.spacer_character = '|'
ten_characters = 'x' * 10
expected_lst = [
'xxxxxxxxxx',
' |||||||| '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( ten_characters )
)
def test__add_spacer_full_length_string_using_spaces( self ):
""" Checks that full-string gets a full extra string added (ending in a space) when using expected spacer character of ' '. """
self.spcr.notes_line_length = 10
self.spcr.spacer_character = ' '
ten_characters = 'x' * 10
expected_lst = [
'xxxxxxxxxx',
' '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( ten_characters )
)
def test__add_spacer_big_string( self ):
""" Tests filler when wrapping. """
self.spcr.notes_line_length = 10
self.spcr.spacer_character = '|'
fifteen_characters = 'x' * 15
expected_lst = [
'xxxxxxxxxx',
'xxxxx ||| '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( fifteen_characters )
)
def test__add_spacer_big_string2( self ):
""" Tests filler when wrapping will hit on a break. """
self.spcr.notes_line_length = 50
self.spcr.spacer_character = '|'
long_text = '''A really long article title. A really long article title. A really long article title.'''
expected_lst = [
'A really long article title. A really long |||||| ',
'article title. A really long article title. ||||| '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( long_text )
)
def test__add_spacer_big_string3( self ):
""" Tests filler when wrapping will hit on a break2. """
self.spcr.notes_line_length = 50
self.spcr.spacer_character = '|'
long_text = '''First line will break after (46th letter) THIS aanndd then continue.'''
expected_lst = [
'First line will break after (46th letter) THIS || ',
'aanndd then continue. ||||||||||||||||||||||||||| '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( long_text )
)
def test__add_spacer_big_string4( self ):
""" Tests filler when wrapping will hit on end-of-word.
(Should breaking on the previous word.) """
self.spcr.notes_line_length = 50
self.spcr.spacer_character = '|'
long_text = '''First line will break afterrrrr (50th letter) THIS and then continue.'''
expected_lst = [
'First line will break afterrrrr (50th letter) ||| ',
'THIS and then continue. ||||||||||||||||||||||||| '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( long_text )
)
def test__add_spacer_big_string5( self ):
""" Tests filler when wrapping will hit on end-of-word2.
(Should breaking on the previous word.)
For some reason the above passed and this initially didn't """
self.spcr.notes_line_length = 50
self.spcr.spacer_character = '|'
long_text = 'A surprisingly long TEST article-title, because of the repetition of the surprisingly long article title.'
expected_lst = [
'A surprisingly long TEST article-title, because | ',
'of the repetition of the surprisingly long |||||| '
'article title. |||||||||||||||||||||||||||||||||| '
]
self.assertEqual(
''.join( expected_lst ),
self.spcr.add_spacer( long_text )
)
# end class SpacerTest()
class MagicBusPrepperTest( TestCase ):
""" Tests magic_bus.py Prepper() """
def test__make_filename_datestring( self ):
""" Tests conversion of datetime object to string for filename. """
dt = datetime.datetime( 2014, 12, 8, 15, 40, 59 )
self.assertEqual(
u'2014-12-08T15:40:59',
prepper.make_filename_datestring( dt )
)
# end class MagicBusPrepperTest
class StatsBuilderTest( TestCase ):
""" Tests models.py StatsBuilder() """
def test__check_params( self ):
""" Tests keys. """
## bad params
qdict = QueryDict( u'', mutable=True ); qdict.update( {u'start': u'a', u'end': u'b'} )
self.assertEqual( False, statsbuilder.check_params(qdict, u'server_name') )
## good params
qdict = QueryDict( u'', mutable=True ); qdict.update( {u'start_date': 'a', u'end_date': 'b'} )
self.assertEqual( True, statsbuilder.check_params(qdict, u'server_name') )
def test__run_query( self ):
""" Tests that scanrequest is found and returned. """
sr = ScanRequest( item_title=u'foo' )
sr.save()
qdict = QueryDict( u'', mutable=True ); qdict.update( {u'start_date': datetime.date.today(), u'end_date': datetime.date.today()} )
statsbuilder.check_params( qdict, u'server_name' )
results = statsbuilder.run_query()
self.assertEqual( 1, len(results) )
class RequestViewGetHelperTest( TestCase):
""" Tests models.py RequestViewGetHelper() """
def setUp(self):
self.helper = RequestViewGetHelper()
def test_hit_availability_api( self ):
""" Checks | |
<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import codecs
import contextlib
import io
import os
import sys
import unittest
import warnings
import six
from six.moves.urllib.error import HTTPError
import zExceptions
import Zope2
from AccessControl.Permissions import change_proxy_roles
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.SecurityManagement import noSecurityManager
from OFS.Folder import Folder
from Testing.makerequest import makerequest
from Testing.testbrowser import Browser
from Testing.ZopeTestCase import FunctionalTestCase
from ..PythonScript import PythonScript
HERE = os.path.dirname(__file__)
@contextlib.contextmanager
def warning_interceptor():
old_stderr = sys.stderr
sys.stderr = stream = six.StringIO()
try:
yield stream
finally:
sys.stderr = old_stderr
# Test Classes
def readf(name):
path = os.path.join(HERE, 'tscripts', '%s.ps' % name)
with open(path, 'r') as f:
return f.read()
class DummyFolder(Folder):
""" Stitch in an implementation for getPhysicalPath """
def getPhysicalPath(self):
return ()
class PythonScriptTestBase(unittest.TestCase):
def setUp(self):
from AccessControl import ModuleSecurityInfo
from AccessControl.SecurityInfo import _appliedModuleSecurity
from AccessControl.SecurityInfo import _moduleSecurity
self._ms_before = _moduleSecurity.copy()
self._ams_before = _appliedModuleSecurity.copy()
ModuleSecurityInfo('string').declarePublic('split') # noqa: D001
ModuleSecurityInfo('sets').declarePublic('Set') # noqa: D001
newSecurityManager(None, None)
def tearDown(self):
from AccessControl.SecurityInfo import _appliedModuleSecurity
from AccessControl.SecurityInfo import _moduleSecurity
noSecurityManager()
_moduleSecurity.clear()
_moduleSecurity.update(self._ms_before)
_appliedModuleSecurity.clear()
_appliedModuleSecurity.update(self._ams_before)
def _newPS(self, txt, bind=None):
ps = PythonScript('ps')
ps.ZBindings_edit(bind or {})
ps.write(txt)
ps._makeFunction()
if ps.errors:
raise SyntaxError(ps.errors[0])
return ps
def _filePS(self, fname, bind=None):
ps = PythonScript(fname)
ps.ZBindings_edit(bind or {})
ps.write(readf(fname))
ps._makeFunction()
if ps.errors:
raise SyntaxError(ps.errors[0])
return ps
class TestPythonScriptNoAq(PythonScriptTestBase):
def testEmpty(self):
empty = self._newPS('')()
self.assertIsNone(empty)
def testIndented(self):
# This failed to compile in Zope 2.4.0b2.
res = self._newPS('if 1:\n return 2')()
self.assertEqual(res, 2)
def testReturn(self):
res = self._newPS('return 1')()
self.assertEqual(res, 1)
def testReturnNone(self):
res = self._newPS('return')()
self.assertIsNone(res)
def testParam1(self):
res = self._newPS('##parameters=x\nreturn x')('txt')
self.assertEqual(res, 'txt')
def testParam2(self):
eq = self.assertEqual
one, two = self._newPS('##parameters=x,y\nreturn x,y')('one', 'two')
eq(one, 'one')
eq(two, 'two')
def testParam26(self):
import string
params = string.ascii_letters[:26]
sparams = ','.join(params)
ps = self._newPS('##parameters=%s\nreturn %s' % (sparams, sparams))
res = ps(*params)
self.assertEqual(res, tuple(params))
def testArithmetic(self):
res = self._newPS('return 1 * 5 + 4 / 2 - 6')()
self.assertEqual(res, 1)
def testReduce(self):
res = self._newPS('return reduce(lambda x, y: x + y, [1,3,5,7])')()
self.assertEqual(res, 16)
res = self._newPS('return reduce(lambda x, y: x + y, [1,3,5,7], 1)')()
self.assertEqual(res, 17)
def testImport(self):
res = self._newPS('import string; return "7" in string.digits')()
self.assertTrue(res)
def testWhileLoop(self):
res = self._filePS('while_loop')()
self.assertEqual(res, 1)
def testForLoop(self):
res = self._filePS('for_loop')()
self.assertEqual(res, 10)
def testMutateLiterals(self):
eq = self.assertEqual
l, d = self._filePS('mutate_literals')()
eq(l, [2])
eq(d, {'b': 2})
def testTupleUnpackAssignment(self):
eq = self.assertEqual
d, x = self._filePS('tuple_unpack_assignment')()
eq(d, {'a': 0, 'b': 1, 'c': 2})
eq(x, 3)
def testDoubleNegation(self):
res = self._newPS('return not not "this"')()
self.assertEqual(res, 1)
def testTryExcept(self):
eq = self.assertEqual
a, b = self._filePS('try_except')()
eq(a, 1)
eq(b, 1)
def testBigBoolean(self):
res = self._filePS('big_boolean')()
self.assertTrue(res)
def testFibonacci(self):
res = self._filePS('fibonacci')()
self.assertEqual(
res, [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377,
610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657,
46368, 75025, 121393, 196418, 317811, 514229, 832040,
1346269, 2178309, 3524578, 5702887, 9227465, 14930352,
24157817, 39088169, 63245986])
def testSimplePrint(self):
res = self._filePS('simple_print')()
self.assertEqual(res, 'a\n')
def testComplexPrint(self):
script = 'complex_print_py%s' % sys.version_info.major
res = self._filePS(script)()
self.assertEqual(res, 'double\ndouble\nx: 1\ny: 0 1 2\n\n')
def testNSBind(self):
f = self._filePS('ns_bind', bind={'name_ns': '_'})
bound = f.__render_with_namespace__({'yes': 1, 'no': self.fail})
self.assertEqual(bound, 1)
def testNSBindInvalidHeader(self):
self.assertRaises(SyntaxError, self._filePS, 'ns_bind_invalid')
def testBooleanMap(self):
res = self._filePS('boolean_map')()
self.assertTrue(res)
def testGetSize(self):
script = 'complex_print_py%s' % sys.version_info.major
f = self._filePS(script)
self.assertEqual(f.get_size(), len(f.read()))
def testBuiltinSet(self):
res = self._newPS('return len(set([1, 2, 3, 1]))')()
self.assertEqual(res, 3)
@unittest.skipIf(six.PY3, 'sets module does not exist in python3')
def testSetModule(self):
res = self._newPS('from sets import Set; return len(Set([1,2,3]))')()
self.assertEqual(res, 3)
def testDateTime(self):
res = self._newPS(
"return DateTime('2007/12/10').strftime('%d.%m.%Y')")()
self.assertEqual(res, '10.12.2007')
def testRaiseSystemExitLaunchpad257269(self):
ps = self._newPS('raise SystemExit')
self.assertRaises(ValueError, ps)
def testEncodingTestDotTestAllLaunchpad257276(self):
ps = self._newPS("return 'foo'.encode('test.testall')")
self.assertRaises(LookupError, ps)
def test_manage_DAVget(self):
ps = makerequest(self._filePS('complete'))
self.assertEqual(ps.read(), ps.manage_DAVget())
def test_PUT_native_string(self):
ps = makerequest(self._filePS('complete'))
self.assertEqual(ps.title, 'This is a title')
self.assertEqual(ps.body(), 'print(foo+bar+baz)\nreturn printed\n')
self.assertEqual(ps.params(), 'foo, bar, baz=1')
new_body = u"""\
## Script (Python) "complete"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=oops
##title=New Title
##
return \xe4\xe9\xee\xf6\xfc
"""
ps.REQUEST['BODY'] = new_body
ps._filepath = 'fake'
ps.PUT(ps.REQUEST, ps.REQUEST.RESPONSE)
self.assertEqual(ps.title, 'New Title')
if six.PY3:
self.assertEqual(ps.body(), 'return \xe4\xe9\xee\xf6\xfc\n')
else:
self.assertEqual(
ps.body(),
'return \xc3\xa4\xc3\xa9\xc3\xae\xc3\xb6\xc3\xbc\n')
self.assertEqual(ps.params(), 'oops')
def test_PUT_bytes(self):
ps = makerequest(self._filePS('complete'))
self.assertEqual(ps.title, 'This is a title')
self.assertEqual(ps.body(), 'print(foo+bar+baz)\nreturn printed\n')
self.assertEqual(ps.params(), 'foo, bar, baz=1')
new_body = b"""\
## Script (Python) "complete"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=oops
##title=New Title
##
return \xc3\xa4\xc3\xa9\xc3\xae\xc3\xb6\xc3\xbc
"""
ps.REQUEST['BODY'] = new_body
ps._filepath = 'fake'
ps.PUT(ps.REQUEST, ps.REQUEST.RESPONSE)
self.assertEqual(ps.title, 'New Title')
if six.PY3:
self.assertEqual(ps.body(), 'return \xe4\xe9\xee\xf6\xfc\n')
else:
self.assertEqual(
ps.body(),
'return \xc3\xa4\xc3\xa9\xc3\xae\xc3\xb6\xc3\xbc\n')
self.assertEqual(ps.params(), 'oops')
def test_write(self):
ps = self._newPS('')
ps.write(b'return 1')
self.assertEqual(ps.body(), 'return 1\n')
ps.write(u'return 1')
self.assertEqual(ps.body(), 'return 1\n')
def test_factory(self):
from Products.PythonScripts.PythonScript import manage_addPythonScript
# Only passing the id
container = DummyFolder('container')
manage_addPythonScript(container, 'testing')
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, '')
self.assertIn('# Example code:', container.testing.body())
self.assertEqual(container.testing.params(), '')
# Passing id and a title
container = DummyFolder('container')
manage_addPythonScript(container, 'testing', title='This is a title')
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, 'This is a title')
self.assertIn('# Example code:', container.testing.body())
self.assertEqual(container.testing.params(), '')
# Passing id, title and a request that has no file
container = makerequest(DummyFolder('container'))
container.REQUEST.form = {}
manage_addPythonScript(container, 'testing', title='This is a title',
REQUEST=container.REQUEST)
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, 'This is a title')
self.assertIn('# Example code:', container.testing.body())
self.assertEqual(container.testing.params(), '')
# Passing id, title and a request ith a file string
container = makerequest(DummyFolder('container'))
container.REQUEST.form = {'file': 'return 1'}
manage_addPythonScript(container, 'testing', title='This is a title',
REQUEST=container.REQUEST)
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, 'This is a title')
self.assertEqual(container.testing.body(), 'return 1\n')
self.assertEqual(container.testing.params(), '')
# Passing id, title and a request with a file object
container = makerequest(DummyFolder('container'))
container.REQUEST.form = {'file': io.BytesIO(b'return 1')}
manage_addPythonScript(container, 'testing', title='This is a title',
REQUEST=container.REQUEST)
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, 'This is a title')
self.assertEqual(container.testing.body(), 'return 1\n')
self.assertEqual(container.testing.params(), '')
# Passing id, title and a file string
container = makerequest(DummyFolder('container'))
manage_addPythonScript(container, 'testing', title='This is a title',
file=b'return 1')
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, 'This is a title')
self.assertEqual(container.testing.body(), 'return 1\n')
self.assertEqual(container.testing.params(), '')
# Passing id, title and a file object
container = makerequest(DummyFolder('container'))
manage_addPythonScript(container, 'testing', title='This is a title',
file=io.BytesIO(b'return 1'))
self.assertEqual(container.testing.getId(), 'testing')
self.assertEqual(container.testing.title, 'This is a title')
self.assertEqual(container.testing.body(), 'return 1\n')
self.assertEqual(container.testing.params(), '')
class TestPythonScriptErrors(PythonScriptTestBase):
def assertPSRaises(self, error, path=None, body=None):
assert not (path and body) and (path or body)
if body is None:
body = readf(path)
if error is SyntaxError:
self.assertRaises(SyntaxError, self._newPS, body)
else:
ps = self._newPS(body)
self.assertRaises(error, ps)
def testSubversiveExcept(self):
self.assertPSRaises(SyntaxError, path='subversive_except')
def testBadImports(self):
from zExceptions import Unauthorized
self.assertPSRaises(SyntaxError, body='from string import *')
self.assertPSRaises(Unauthorized, body='from datetime import datetime')
self.assertPSRaises(Unauthorized, body='import mmap')
def testAttributeAssignment(self):
# It's illegal to assign to attributes of anything that
# doesn't have enabling security declared.
# Classes (and their instances) defined by restricted code
# are an exception -- they are fully readable and writable.
cases = [('import string', 'string'),
('def f(): pass', 'f'),
]
assigns = ["%s.splat = 'spam'",
"setattr(%s, '_getattr_', lambda x, y: True)",
'del %s.splat',
]
for defn, name in cases:
for asn in assigns:
func = self._newPS(defn + '\n' + asn % name)
self.assertRaises(TypeError, func)
def testBadIdentifiers(self):
"""Some identifiers have to be avoided.
Background:
https://github.com/zopefoundation/Zope/issues/669
"""
bad_identifiers = [
'context', 'container', 'script', 'traverse_subpath',
]
for identifier in bad_identifiers:
with self.assertRaises(ValueError):
PythonScript(identifier)
class TestPythonScriptGlobals(PythonScriptTestBase):
def setUp(self):
PythonScriptTestBase.setUp(self)
def tearDown(self):
PythonScriptTestBase.tearDown(self)
def _exec(self, script, bound_names=None, args=None, kws=None):
if args is None:
args = ()
if kws is None:
kws = {}
bindings = {'name_container': 'container'}
f = self._filePS(script, bindings)
return f._exec(bound_names, args, kws)
def testGlobalIsDeclaration(self):
bindings = {'container': 7}
results = self._exec('global_is_declaration', bindings)
self.assertEqual(results, 8)
def test__name__(self):
f = self._filePS('class.__name__')
if six.PY3:
class_name = "'script.class.__name__.<locals>.foo'>"
else:
class_name = "'script.foo'>"
self.assertEqual(f(), (class_name, "'string'"))
def test_filepath(self):
# This test is meant to raise a deprecation warning.
# It used to fail mysteriously instead.
def warnMe(message):
warnings.warn(message, stacklevel=2)
try:
f = self._filePS('filepath')
with warning_interceptor() as stream:
f._exec({'container': warnMe}, (), {})
self.assertIn('UserWarning: foo', stream.getvalue())
except TypeError as e:
self.fail(e)
class PythonScriptInterfaceConformanceTests(unittest.TestCase):
def test_class_conforms_to_IWriteLock(self):
from zope.interface.verify import verifyClass
try:
from OFS.interfaces import IWriteLock
except ImportError:
from webdav.interfaces import IWriteLock
verifyClass(IWriteLock, PythonScript)
class PythonScriptBrowserTests(FunctionalTestCase):
"""Browser testing Python Scripts"""
def setUp(self):
from Products.PythonScripts.PythonScript import manage_addPythonScript
super(PythonScriptBrowserTests, self).setUp()
Zope2.App.zcml.load_site(force=True)
uf = | |
# -*- coding: utf-8 -*-
#
# Copyright © 2014, Emutex Ltd.
# All rights reserved.
# http://www.emutex.com
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# See license in LICENSE.txt file.
#
# Wiring-x86 is a Python module that lets you use Arduino like functionality
# on
# Intel® Gaileo
# Intel® Gaileo Gen2
# Intel® Edison
import datetime
import os
INPUT = 'in'
INPUT_PULLUP = 'in_pullup'
INPUT_PULLDOWN = 'in_pulldown'
OUTPUT = 'out'
ANALOG_INPUT = 'analog_input'
PWM = 'pwm'
LOW = 'low'
HIGH = 'high'
NONE = 'in'
DRIVE_STRONG = 'strong'
DRIVE_HIZ = 'hiz'
MODE_0 = 'mode0'
MODE_1 = 'mode1'
MODE_2 = 'mode2'
MODE_3 = 'mode3'
MODE_4 = 'mode4'
MODE_5 = 'mode5'
ALL_MODES = (MODE_0, MODE_1, MODE_2, MODE_3, MODE_4, MODE_5)
class GPIOBase(object):
def __init__(self, debug=False):
"""Constructor
Args:
debug: enables the debug mode showing the interaction with sysfs
"""
self.debug = debug
self.pins_in_use = []
self.gpio_handlers = {}
self.exported_pwm = []
self.enabled_pwm = {}
if self.has_pinmux():
self._export_pin(self.pinmux)
self._set_direction(self.pinmux, self.HIGH)
def has_pinmux(self):
return hasattr(self, 'pinmux')
def pinMode(self, pin, mode):
"""Set mode to GPIO pin`.
This function must be called before doing any other operation on the
pin. It also sets up the muxing needed in the board for the pin to
behave as the user wants to.
Args:
pin: Arduino pin number (0-19)
mode: pin mode must be:
OUTPUT: Pin used as output. Use to write into it.
INPUT: Pin used as input (high impedance). Use to read
from it.
INPUT_PULLUP: Pin used as input (pullup resistor). Use to read
from it.
INPUT_PULLDOWN: Pin used as input (pulldown resistor). Use to
read from it.
ANALOG_INPUT: Pin used as analog input (ADC).
PWM: Pin used as analog output (PWM).
"""
if pin not in self.GPIO_MAPPING:
return False
if self.has_pinmux():
self._set_direction(self.pinmux, self.LOW)
mux = self._select_muxing(mode, pin)
if mux is None:
return False
linux_pin = self.GPIO_MAPPING[pin]
self._export_pin(linux_pin)
# In these two cases we open file handlers to write directly into them.
# That makes it faster than going through sysfs.
# No bother with PWM.
if mode == ANALOG_INPUT:
adc = self.ADC_MAPPING[pin]
self._open_analog_handler(linux_pin, adc)
elif mode in (OUTPUT, INPUT, INPUT_PULLUP, INPUT_PULLDOWN):
self._open_digital_handler(linux_pin)
# Walk through the muxing table and set the pins to their values. This
# is the actual muxing.
for vpin, value in mux:
self._export_pin(vpin)
self._set_direction(vpin, value)
if value == NONE:
self._set_drive(vpin, DRIVE_HIZ)
elif value in (HIGH, LOW):
self._set_drive(vpin, DRIVE_STRONG)
self._write_value(vpin, value)
elif value in ALL_MODES:
self._muxmode(vpin, value)
if mode == OUTPUT:
self._set_direction(linux_pin, OUTPUT)
self._set_drive(linux_pin, DRIVE_STRONG)
self._write_value(linux_pin, LOW)
elif mode in (INPUT, INPUT_PULLUP, INPUT_PULLDOWN):
self._set_direction(linux_pin, INPUT)
elif mode == PWM:
self._init_pwm(pin)
if self.has_pinmux():
self._set_direction(self.pinmux, self.HIGH)
return True
def digitalWrite(self, pin, state):
"""Write a value to a GPIO pin.
The GPIO pin is assumed to be configured as OUTPUT
Args:
pin: Arduino pin number (0-19)
state: pin state to be written (LOW-HIGH)
"""
if pin not in self.GPIO_MAPPING:
return
self._write_value_to_handler(self.GPIO_MAPPING[pin], state)
def digitalRead(self, pin):
"""Read GPIO pin's state.
The GPIO pin is assumed to be configured as INPUT
Args:
pin: Arduino pin number (0-19)
Returns:
Current value of the GPIO pin as an Integer
"""
if pin not in self.GPIO_MAPPING:
return
handler = self.gpio_handlers[self.GPIO_MAPPING[pin]]
state = handler.read()
handler.seek(0)
return int(state.strip())
def analogWrite(self, pin, value):
"""Write analog output (PWM)
The GPIO pin is assumed to be configured as PWM. Generates a PWM
signal with the desired duty cycle. The value must be in range 0-255.
Args:
pin: Arduino PWM pin number (3, 5, 6, 9, 10, 11)
value: the duty cycle: between 0 (always off) and 255 (always on)
"""
if pin not in self.PWM_MAPPING:
return
if value < 0:
value = 0
elif value > 255:
value = 255
pwm = self.PWM_MAPPING[pin]
if not self.enabled_pwm.get(pwm, False):
self._enable_pwm(pwm)
self._set_pwm_duty_cycle(pwm, self._get_pwm_period(pin) * value / 255)
def analogRead(self, pin):
"""Read analog input from the pin
The GPIO pin is assumed to be configured as ANALOG_INPUT.
Returns values in range 0-1023
Args:
pin: Arduino analog pin number (14-19)
Returns:
Digital representation with 10 bits resolution (range 0-1023) of
voltage on the pin.
"""
if pin not in self.ADC_MAPPING:
return
handler = self.gpio_handlers[self.GPIO_MAPPING[pin]]
voltage = handler.read()
handler.seek(0)
# ADC chip on the board reports voltages with 12 bits resolution.
# To convert it to 10 bits just shift right 2 bits.
return int(voltage.strip()) >> 2
def setPWMPeriod(self, pin, period):
"""Set the PWM period
Check if the period is valid for the current system and proceed to
set the new period.
Args:
pin: Arduino PWM pin number (3, 5, 6, 9, 10, 11)
period: period in nanoseconds
"""
if period < self.PWM_MIN_PERIOD or period > self.PWM_MAX_PERIOD:
return
self._set_pwm_period(pin, period)
def cleanup(self):
"""Do a general cleanup.
Close all open handlers for reading and writing.
Unexport all exported GPIO pins.
Unexport all exported PWM channels.
Calling this function is not mandatory but it's recommended once you
are done using the library if it's being used with a larger
application that runs for a long period of time.
"""
for pin in self.pins_in_use:
self._unexport_pin(pin)
del self.pins_in_use[:]
for handler in self.gpio_handlers.values():
handler.close()
self.gpio_handlers.clear()
for pwm in self.exported_pwm:
self._unexport_pwm(pwm)
del self.exported_pwm[:]
self.enabled_pwm.clear()
def _select_muxing(self, mode, pin):
if mode == OUTPUT:
return self.GPIO_MUX_OUTPUT[pin]
elif mode == INPUT:
return self.GPIO_MUX_INPUT[pin]
elif mode == INPUT_PULLUP:
return self.GPIO_MUX_INPUT_PULLUP[pin]
elif mode == INPUT_PULLDOWN:
return self.GPIO_MUX_INPUT_PULLDOWN[pin]
elif mode == ANALOG_INPUT and pin in self.ADC_MAPPING:
return self.GPIO_MUX_ANALOG_INPUT[pin]
elif mode == PWM and pin in self.PWM_MAPPING:
return self.GPIO_MUX_PWM[pin]
return None
def _open_digital_handler(self, linux_pin):
try:
f = open('/sys/class/gpio/gpio%d/value' % linux_pin, 'r+')
self.gpio_handlers[linux_pin] = f
except:
print("Failed opening digital value file for pin %d" % linux_pin)
def _open_analog_handler(self, linux_pin, adc):
try:
f = open('/sys/bus/iio/devices/iio:device%d/in_voltage%d_raw' % (self.adc_iio_device, adc), 'r+')
self.gpio_handlers[linux_pin] = f
except:
print("Failed opening analog value file for pin %d" % linux_pin)
def _write_value(self, linux_pin, state):
value = 1
if state == LOW:
value = 0
cmd = 'echo %d > /sys/class/gpio/gpio%d/value' % (value, linux_pin)
self._exec_cmd(self._write_value.__name__, cmd)
def _write_value_to_handler(self, linux_pin, state):
handler = self.gpio_handlers[linux_pin]
value = '0' if state == LOW else '1'
handler.write(value)
handler.seek(0)
def _set_direction(self, linux_pin, direction):
dirfile = '/sys/class/gpio/gpio%d/direction' % linux_pin
cmd = 'test -f %s && echo %s > %s 2>&1' % (dirfile, direction, dirfile)
self._exec_cmd(self._set_direction.__name__, cmd)
def _export_pin(self, linux_pin):
self.pins_in_use.append(linux_pin)
cmd = 'echo %d > /sys/class/gpio/export 2>&1' % linux_pin
self._exec_cmd(self._export_pin.__name__, cmd)
def _unexport_pin(self, linux_pin):
cmd = 'echo %d > /sys/class/gpio/unexport 2>&1' % linux_pin
self._exec_cmd(self._unexport_pin.__name__, cmd)
def _muxmode(self, linux_pin, mode):
cmd = 'echo %s > /sys/kernel/debug/gpio_debug/gpio%d/current_pinmux' % (mode, linux_pin)
self._exec_cmd(self._muxmode.__name__, cmd)
def _set_drive(self, linux_pin, drive):
if not self.has_pinmux():
cmd = 'echo %s > /sys/class/gpio/gpio%d/drive > /dev/null' % (drive, linux_pin)
self._exec_cmd(self._set_drive.__name__, cmd)
def _export_pwm(self, channel):
self.exported_pwm.append(channel)
cmd = 'echo %d > /sys/class/pwm/pwmchip0/export 2>&1' % channel
self._exec_cmd(self._export_pwm.__name__, cmd)
def _unexport_pwm(self, channel):
cmd = 'echo %d > /sys/class/pwm/pwmchip0/unexport 2>&1' % channel
self._exec_cmd(self._unexport_pwm.__name__, cmd)
def _set_pwm_duty_cycle(self, channel, duty_cycle):
cmd = 'echo %d > /sys/class/pwm/pwmchip0/pwm%d/duty_cycle' % (duty_cycle, channel)
self._exec_cmd(self._set_pwm_duty_cycle.__name__, cmd)
def _enable_pwm(self, pwm):
self.enabled_pwm[pwm] = True
cmd = 'echo 1 > /sys/class/pwm/pwmchip0/pwm%d/enable' % pwm
self._exec_cmd(self._enable_pwm.__name__, cmd)
def __debug(self, func_name, cmd):
if self.debug:
now = datetime.datetime.now().strftime("%B %d %I:%M:%S")
print('{0} {1: <20}{2}'.format(now, func_name + ':', cmd))
def _exec_cmd(self, caller, command):
self.__debug(caller, command)
os.system(command)
setattr(GPIOBase, 'INPUT', INPUT)
setattr(GPIOBase, 'INPUT_PULLUP', INPUT_PULLUP)
setattr(GPIOBase, 'INPUT_PULLDOWN', INPUT_PULLDOWN)
setattr(GPIOBase, 'OUTPUT', OUTPUT)
setattr(GPIOBase, 'ANALOG_INPUT', ANALOG_INPUT)
setattr(GPIOBase, 'PWM', PWM)
setattr(GPIOBase, 'LOW', LOW)
setattr(GPIOBase, 'HIGH', HIGH)
class GPIOGalileo(GPIOBase):
"""Class for managing GPIO pinout on Intel® Galileo board
See docs/ directory for more information.
"""
GPIO_MAPPING = {
0: 50,
1: 51,
2: 32,
3: 18,
4: 28,
5: 17,
6: 24,
7: 27,
8: 26,
9: 19,
10: 16,
11: 25,
12: 38,
13: 39,
14: 44,
15: 45,
16: 46,
17: 47,
18: 48,
19: 49,
}
ADC_MAPPING = {
14: 0,
15: 1,
16: 2,
17: 3,
18: 4,
19: 5,
}
PWM_MAPPING = {
3: 3,
5: 5,
6: 6,
9: 1,
10: 7,
11: 4,
}
GPIO_MUX_OUTPUT = {
0: ((40, HIGH), ),
1: ((41, HIGH), ),
2: ((31, HIGH), ),
3: ((30, HIGH), ),
4: (),
5: (),
6: (),
7: (),
8: (),
9: (),
10: ((41, HIGH), ),
11: ((43, HIGH), ),
12: ((54, HIGH), ),
13: ((55, HIGH), ),
14: ((37, HIGH), ),
15: ((36, HIGH), ),
16: ((23, HIGH), ),
| |
<filename>main.py
#######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import logging
from agent import *
from component import *
from utils import *
from model import *
## cart pole
def dqn_cart_pole():
game = 'CartPole-v0'
config = Config()
config.task_fn = lambda: ClassicalControl(game, max_steps=200)
config.evaluation_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda state_dim, action_dim: VanillaNet(action_dim, FCBody(state_dim))
# config.network_fn = lambda state_dim, action_dim: DuelingNet(action_dim, FCBody(state_dim))
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=10000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 1000
config.logger = Logger('./log', logger)
config.double_q = True
# config.double_q = False
run_episodes(DQNAgent(config))
def a2c_cart_pole():
config = Config()
name = 'CartPole-v0'
# name = 'MountainCar-v0'
task_fn = lambda log_dir: ClassicalControl(name, max_steps=200, log_dir=log_dir)
config.evaluation_env = task_fn(None)
config.num_workers = 5
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers,
log_dir=get_default_log_dir(a2c_cart_pole.__name__))
config.optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.network_fn = lambda state_dim, action_dim: ActorCriticNet(action_dim, FCBody(state_dim))
config.policy_fn = SamplePolicy
config.discount = 0.99
config.logger = Logger('./log', logger)
config.gae_tau = 1.0
config.entropy_weight = 0.01
config.rollout_length = 5
run_iterations(A2CAgent(config))
def categorical_dqn_cart_pole():
game = 'CartPole-v0'
config = Config()
config.task_fn = lambda: ClassicalControl(game, max_steps=200)
config.evaluation_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda state_dim, action_dim: \
CategoricalNet(action_dim, config.categorical_n_atoms, FCBody(state_dim))
config.policy_fn = lambda: GreedyPolicy(epsilon=0.1, final_step=10000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 100
config.logger = Logger('./log', logger, skip=True)
config.categorical_v_max = 100
config.categorical_v_min = -100
config.categorical_n_atoms = 50
run_episodes(CategoricalDQNAgent(config))
def quantile_regression_dqn_cart_pole():
config = Config()
config.task_fn = lambda: ClassicalControl('CartPole-v0', max_steps=200)
config.evaluation_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda state_dim, action_dim: \
QuantileNet(action_dim, config.num_quantiles, FCBody(state_dim))
config.policy_fn = lambda: GreedyPolicy(epsilon=0.1, final_step=10000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 100
config.logger = Logger('./log', logger, skip=True)
config.num_quantiles = 20
run_episodes(QuantileRegressionDQNAgent(config))
def n_step_dqn_cart_pole():
config = Config()
task_fn = lambda log_dir: ClassicalControl('CartPole-v0', max_steps=200, log_dir=log_dir)
config.evaluation_env = task_fn(None)
config.num_workers = 5
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda state_dim, action_dim: VanillaNet(action_dim, FCBody(state_dim))
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=10000, min_epsilon=0.1)
config.discount = 0.99
config.target_network_update_freq = 200
config.rollout_length = 5
config.logger = Logger('./log', logger)
run_iterations(NStepDQNAgent(config))
def ppo_cart_pole():
config = Config()
task_fn = lambda log_dir: ClassicalControl('CartPole-v0', max_steps=200, log_dir=log_dir)
config.num_workers = 5
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers)
optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
network_fn = lambda state_dim, action_dim: ActorCriticNet(action_dim, FCBody(state_dim))
config.network_fn = lambda state_dim, action_dim: \
CategoricalActorCriticWrapper(state_dim, action_dim, network_fn, optimizer_fn)
config.discount = 0.99
config.logger = Logger('./log', logger)
config.use_gae = True
config.gae_tau = 0.95
config.entropy_weight = 0.01
config.gradient_clip = 0.5
config.rollout_length = 128
config.optimization_epochs = 10
config.num_mini_batches = 4
config.ppo_ratio_clip = 0.2
config.iteration_log_interval = 1
run_iterations(PPOAgent(config))
## Atari games
def dqn_pixel_atari(name):
config = Config()
config.history_length = 4
config.task_fn = lambda: PixelAtari(name, frame_skip=4, history_length=config.history_length,
log_dir=get_default_log_dir(dqn_pixel_atari.__name__))
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.95, eps=0.01)
config.network_fn = lambda state_dim, action_dim: VanillaNet(action_dim, NatureConvBody(), gpu=0)
# config.network_fn = lambda state_dim, action_dim: DuelingNet(action_dim, NatureConvBody(), gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=100000, batch_size=32)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps= 50000
config.logger = Logger('./log', logger)
# config.double_q = True
config.double_q = False
run_episodes(DQNAgent(config))
def a2c_pixel_atari(name):
config = Config()
config.history_length = 4
config.num_workers = 16
task_fn = lambda log_dir: PixelAtari(name, frame_skip=4, history_length=config.history_length, log_dir=log_dir)
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers, log_dir=get_default_log_dir(a2c_pixel_atari.__name__))
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.0007)
config.network_fn = lambda state_dim, action_dim: \
ActorCriticNet(action_dim, NatureConvBody(), gpu=0)
config.policy_fn = SamplePolicy
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.use_gae = False
config.gae_tau = 0.97
config.entropy_weight = 0.01
config.rollout_length = 5
config.gradient_clip = 0.5
config.logger = Logger('./log', logger, skip=True)
run_iterations(A2CAgent(config))
def a2c_lstm_pixel_atari(name):
config = MultiGPUConfig()
config.history_length = 4
config.num_workers = 16
task_fn = lambda log_dir, worker_id: PixelAtari(name, frame_skip=4, history_length=config.history_length,
log_dir=log_dir, worker_id=worker_id)
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers,
log_dir=get_default_log_dir(a2c_lstm_pixel_atari.__name__),
worker_ids=range(config.num_workers))
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.0007)
config.network_fn = lambda state_dim, action_dim: \
ActorCriticLSTM(action_dim, NatureConvBody(), use_internal_state=False, gpu=config.gpus[0])
config.policy_fn = SamplePolicy
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.use_gae = False
config.gae_tau = 0.97
config.entropy_weight = 0.01
config.rollout_length = 5
config.gradient_clip = 0.5
config.logger = Logger('./log', logger, skip=True)
run_iterations(A2C(config))
def categorical_dqn_pixel_atari(name):
config = Config()
config.history_length = 4
config.task_fn = lambda: PixelAtari(name, frame_skip=4, history_length=config.history_length,
log_dir=get_default_log_dir(categorical_dqn_pixel_atari.__name__))
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00025, eps=0.01 / 32)
config.network_fn = lambda state_dim, action_dim: \
CategoricalNet(action_dim, config.categorical_n_atoms, NatureConvBody(), gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=100000, batch_size=32)
config.discount = 0.99
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.target_network_update_freq = 10000
config.exploration_steps= 50000
config.logger = Logger('./log', logger)
config.double_q = False
config.categorical_v_max = 10
config.categorical_v_min = -10
config.categorical_n_atoms = 51
run_episodes(CategoricalDQNAgent(config))
def quantile_regression_dqn_pixel_atari(name):
config = Config()
config.history_length = 4
config.task_fn = lambda: PixelAtari(name, frame_skip=4, history_length=config.history_length,
log_dir=get_default_log_dir(quantile_regression_dqn_pixel_atari.__name__))
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00005, eps=0.01 / 32)
config.network_fn = lambda state_dim, action_dim: \
QuantileNet(action_dim, config.num_quantiles, NatureConvBody(), gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.01)
config.replay_fn = lambda: Replay(memory_size=100000, batch_size=32)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps= 50000
config.logger = Logger('./log', logger)
config.double_q = False
config.num_quantiles = 200
run_episodes(QuantileRegressionDQNAgent(config))
def n_step_dqn_pixel_atari(name):
config = Config()
config.history_length = 4
task_fn = lambda log_dir: PixelAtari(name, frame_skip=4, history_length=config.history_length, log_dir=log_dir)
config.num_workers = 16
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers,
log_dir=get_default_log_dir(n_step_dqn_pixel_atari.__name__))
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=1e-4, alpha=0.99, eps=1e-5)
config.network_fn = lambda state_dim, action_dim: VanillaNet(action_dim, NatureConvBody(), gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.05)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.rollout_length = 5
config.gradient_clip = 5
config.logger = Logger('./log', logger)
run_iterations(NStepDQNAgent(config))
def ppo_pixel_atari(name):
config = Config()
config.history_length = 4
task_fn = lambda log_dir: PixelAtari(name, frame_skip=4, history_length=config.history_length, log_dir=log_dir)
config.num_workers = 16
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers,
log_dir=get_default_log_dir(ppo_pixel_atari.__name__))
optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025)
network_fn = lambda state_dim, action_dim: ActorCriticNet(action_dim, NatureConvBody(), gpu=0)
config.network_fn = lambda state_dim, action_dim: \
CategoricalActorCriticWrapper(state_dim, action_dim, network_fn, optimizer_fn)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.logger = Logger('./log', logger)
config.use_gae = True
config.gae_tau = 0.95
config.entropy_weight = 0.01
config.gradient_clip = 0.5
config.rollout_length = 128
config.optimization_epochs = 4
config.num_mini_batches = 4
config.ppo_ratio_clip = 0.1
config.iteration_log_interval = 1
run_iterations(PPOAgent(config))
def dqn_ram_atari(name):
config = Config()
config.task_fn = lambda: RamAtari(name, no_op=30, frame_skip=4,
log_dir=get_default_log_dir(dqn_ram_atari.__name__))
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.95, eps=0.01)
config.network_fn = lambda state_dim, action_dim: VanillaNet(action_dim, FCBody(state_dim), gpu=0)
config.policy_fn = lambda: GreedyPolicy(epsilon=0.1, final_step=1000000, min_epsilon=0.1)
config.replay_fn = lambda: Replay(memory_size=100000, batch_size=32)
config.state_normalizer = RescaleNormalizer(1.0 / 128)
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.max_episode_length = 0
config.exploration_steps= 100
config.logger = Logger('./log', logger)
config.double_q = True
# config.double_q = False
run_episodes(DQNAgent(config))
## continuous control
def ppo_continuous():
config = Config()
config.num_workers = 1
# task_fn = lambda log_dir: Pendulum(log_dir=log_dir)
# task_fn = lambda log_dir: Roboschool('RoboschoolInvertedPendulum-v1', log_dir=log_dir)
task_fn = lambda log_dir: Roboschool('RoboschoolAnt-v1', log_dir=log_dir)
# task_fn = lambda log_dir: Roboschool('RoboschoolReacher-v1', log_dir=log_dir)
# task_fn = lambda log_dir: Roboschool('RoboschoolHopper-v1', log_dir=log_dir)
# task_fn = lambda log_dir: DMControl('cartpole', 'balance', log_dir=log_dir)
# task_fn = lambda log_dir: DMControl('hopper', 'hop', log_dir=log_dir)
config.task_fn = lambda: ParallelizedTask(task_fn, config.num_workers, log_dir=get_default_log_dir(ppo_continuous.__name__))
actor_network_fn = lambda state_dim, action_dim: GaussianActorNet(
action_dim, FCBody(state_dim))
critic_network_fn = lambda state_dim: GaussianCriticNet(FCBody(state_dim))
actor_optimizer_fn = lambda params: torch.optim.Adam(params, 3e-4, eps=1e-5)
critic_optimizer_fn = lambda params: torch.optim.Adam(params, 3e-4, eps=1e-5)
config.network_fn = lambda state_dim, action_dim: \
GaussianActorCriticWrapper(state_dim, action_dim, actor_network_fn,
critic_network_fn, actor_optimizer_fn,
critic_optimizer_fn)
# config.state_normalizer = RunningStatsNormalizer()
config.discount = 0.99
config.use_gae = True
config.gae_tau = 0.95
config.gradient_clip = 0.5
config.rollout_length = 2048
config.optimization_epochs = 10
config.num_mini_batches = 32
config.ppo_ratio_clip = 0.2
config.iteration_log_interval = 1
config.logger = Logger('./log', logger)
run_iterations(PPOAgent(config))
def ddpg_continuous():
config = Config()
log_dir = get_default_log_dir(ddpg_continuous.__name__)
# config.task_fn = lambda: Pendulum(log_dir=log_dir)
# config.task_fn = lambda: Roboschool('RoboschoolInvertedPendulum-v1', log_dir=log_dir)
# config.task_fn = lambda: Roboschool('RoboschoolReacher-v1', log_dir=log_dir)
config.task_fn = lambda: Roboschool('RoboschoolHopper-v1')
# config.task_fn = lambda: Roboschool('RoboschoolAnt-v1', log_dir=log_dir)
# config.task_fn = lambda: Roboschool('RoboschoolWalker2d-v1', log_dir=log_dir)
# config.task_fn = lambda: DMControl('cartpole', 'balance', log_dir=log_dir)
# config.task_fn = lambda: DMControl('finger', 'spin', log_dir=log_dir)
config.evaluation_env = Roboschool('RoboschoolHopper-v1', log_dir=log_dir)
config.actor_network_fn = lambda state_dim, action_dim: DeterministicActorNet(
action_dim, FCBody(state_dim, (300, 200)))
config.critic_network_fn = lambda state_dim, action_dim: DeterministicCriticNet(
TwoLayerFCBodyWithAction(state_dim, action_dim, [400, 300]))
config.actor_optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-4)
config.critic_optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-3, weight_decay=0.01)
config.replay_fn = lambda: Replay(memory_size=1000000, batch_size=64)
config.discount = 0.99
config.state_normalizer = RunningStatsNormalizer()
config.random_process_fn = lambda action_dim: GaussianProcess(action_dim, LinearSchedule(0.3, 0, 1e6))
config.min_memory_size = 64
config.target_network_mix = 1e-3
config.logger = Logger('./log', logger)
run_episodes(DDPGAgent(config))
def | |
from sympy import (
Basic,
Symbol,
sin,
cos,
atan,
exp,
sqrt,
Rational,
Float,
re,
pi,
sympify,
Add,
Mul,
Pow,
Mod,
I,
log,
S,
Max,
symbols,
oo,
zoo,
Integer,
sign,
im,
nan,
Dummy,
factorial,
comp,
floor,
)
from sympy.core.parameters import distribute
from sympy.core.expr import unchanged
from sympy.utilities.iterables import cartes
from sympy.testing.pytest import XFAIL, raises
from sympy.testing.randtest import verify_numerically
a, c, x, y, z = symbols("a,c,x,y,z")
b = Symbol("b", positive=True)
def same_and_same_prec(a, b):
# stricter matching for Floats
return a == b and a._prec == b._prec
def test_bug1():
assert re(x) != x
x.series(x, 0, 1)
assert re(x) != x
def test_Symbol():
e = a * b
assert e == a * b
assert a * b * b == a * b ** 2
assert a * b * b + c == c + a * b ** 2
assert a * b * b - c == -c + a * b ** 2
x = Symbol("x", complex=True, real=False)
assert x.is_imaginary is None # could be I or 1 + I
x = Symbol("x", complex=True, imaginary=False)
assert x.is_real is None # could be 1 or 1 + I
x = Symbol("x", real=True)
assert x.is_complex
x = Symbol("x", imaginary=True)
assert x.is_complex
x = Symbol("x", real=False, imaginary=False)
assert x.is_complex is None # might be a non-number
def test_arit0():
p = Rational(5)
e = a * b
assert e == a * b
e = a * b + b * a
assert e == 2 * a * b
e = a * b + b * a + a * b + p * b * a
assert e == 8 * a * b
e = a * b + b * a + a * b + p * b * a + a
assert e == a + 8 * a * b
e = a + a
assert e == 2 * a
e = a + b + a
assert e == b + 2 * a
e = a + b * b + a + b * b
assert e == 2 * a + 2 * b ** 2
e = a + Rational(2) + b * b + a + b * b + p
assert e == 7 + 2 * a + 2 * b ** 2
e = (a + b * b + a + b * b) * p
assert e == 5 * (2 * a + 2 * b ** 2)
e = (a * b * c + c * b * a + b * a * c) * p
assert e == 15 * a * b * c
e = (a * b * c + c * b * a + b * a * c) * p - Rational(15) * a * b * c
assert e == Rational(0)
e = Rational(50) * (a - a)
assert e == Rational(0)
e = b * a - b - a * b + b
assert e == Rational(0)
e = a * b + c ** p
assert e == a * b + c ** 5
e = a / b
assert e == a * b ** (-1)
e = a * 2 * 2
assert e == 4 * a
e = 2 + a * 2 / 2
assert e == 2 + a
e = 2 - a - 2
assert e == -a
e = 2 * a * 2
assert e == 4 * a
e = 2 / a / 2
assert e == a ** (-1)
e = 2 ** a ** 2
assert e == 2 ** (a ** 2)
e = -(1 + a)
assert e == -1 - a
e = S.Half * (1 + a)
assert e == S.Half + a / 2
def test_div():
e = a / b
assert e == a * b ** (-1)
e = a / b + c / 2
assert e == a * b ** (-1) + Rational(1) / 2 * c
e = (1 - b) / (b - 1)
assert e == (1 + -b) * ((-1) + b) ** (-1)
def test_pow():
n1 = Rational(1)
n2 = Rational(2)
n5 = Rational(5)
e = a * a
assert e == a ** 2
e = a * a * a
assert e == a ** 3
e = a * a * a * a ** Rational(6)
assert e == a ** 9
e = a * a * a * a ** Rational(6) - a ** Rational(9)
assert e == Rational(0)
e = a ** (b - b)
assert e == Rational(1)
e = (a + Rational(1) - a) ** b
assert e == Rational(1)
e = (a + b + c) ** n2
assert e == (a + b + c) ** 2
assert e.expand() == 2 * b * c + 2 * a * c + 2 * a * b + a ** 2 + c ** 2 + b ** 2
e = (a + b) ** n2
assert e == (a + b) ** 2
assert e.expand() == 2 * a * b + a ** 2 + b ** 2
e = (a + b) ** (n1 / n2)
assert e == sqrt(a + b)
assert e.expand() == sqrt(a + b)
n = n5 ** (n1 / n2)
assert n == sqrt(5)
e = n * a * b - n * b * a
assert e == Rational(0)
e = n * a * b + n * b * a
assert e == 2 * a * b * sqrt(5)
assert e.diff(a) == 2 * b * sqrt(5)
assert e.diff(a) == 2 * b * sqrt(5)
e = a / b ** 2
assert e == a * b ** (-2)
assert sqrt(2 * (1 + sqrt(2))) == (2 * (1 + 2 ** S.Half)) ** S.Half
x = Symbol("x")
y = Symbol("y")
assert ((x * y) ** 3).expand() == y ** 3 * x ** 3
assert ((x * y) ** -3).expand() == y ** -3 * x ** -3
assert (x ** 5 * (3 * x) ** (3)).expand() == 27 * x ** 8
assert (x ** 5 * (-3 * x) ** (3)).expand() == -27 * x ** 8
assert (x ** 5 * (3 * x) ** (-3)).expand() == x ** 2 * Rational(1, 27)
assert (x ** 5 * (-3 * x) ** (-3)).expand() == x ** 2 * Rational(-1, 27)
# expand_power_exp
assert (x ** (y ** (x + exp(x + y)) + z)).expand(deep=False) == x ** z * x ** (
y ** (x + exp(x + y))
)
assert (x ** (y ** (x + exp(x + y)) + z)).expand() == x ** z * x ** (
y ** x * y ** (exp(x) * exp(y))
)
n = Symbol("n", even=False)
k = Symbol("k", even=True)
o = Symbol("o", odd=True)
assert unchanged(Pow, -1, x)
assert unchanged(Pow, -1, n)
assert (-2) ** k == 2 ** k
assert (-1) ** k == 1
assert (-1) ** o == -1
def test_pow2():
# x**(2*y) is always (x**y)**2 but is only (x**2)**y if
# x.is_positive or y.is_integer
# let x = 1 to see why the following are not true.
assert (-x) ** Rational(2, 3) != x ** Rational(2, 3)
assert (-x) ** Rational(5, 7) != -(x ** Rational(5, 7))
assert ((-x) ** 2) ** Rational(1, 3) != ((-x) ** Rational(1, 3)) ** 2
assert sqrt(x ** 2) != x
def test_pow3():
assert sqrt(2) ** 3 == 2 * sqrt(2)
assert sqrt(2) ** 3 == sqrt(8)
def test_mod_pow():
for s, t, u, v in [
(4, 13, | |
number of grid points"""
return self.params.gridPointsX*self.params.gridPointsY*\
self.params.gridPointsZ
def autocenterCoarseGrid(self):
"""Autocenters coarse grid"""
coords = self.getCoords()
center=(Numeric.maximum.reduce(coords)+Numeric.minimum.reduce(coords))*0.5
center = center.tolist()
self.params.coarseCenterX = round(center[0],4)
self.params.coarseCenterY = round(center[1],4)
self.params.coarseCenterZ = round(center[2],4)
self.refreshGridPage()
def autosizeCoarseGrid(self):
"""Autosizes coarse grid"""
coords = self.getCoords()
length = Numeric.maximum.reduce(coords) - Numeric.minimum.reduce(coords)
self.params.coarseLengthX = self.params.CFAC*(length.tolist())[0] + 10.
self.params.coarseLengthY = self.params.CFAC*(length.tolist())[1] + 10.
self.params.coarseLengthZ = self.params.CFAC*(length.tolist())[2] + 10.
self.refreshGridPage()
def autocenterFineGrid(self):
"""Autocenters fine grid"""
coords = self.getCoords()
center=(Numeric.maximum.reduce(coords)+Numeric.minimum.reduce(coords))*0.5
center = center.tolist()
self.params.fineCenterX = round(center[0],4)
self.params.fineCenterY = round(center[1],4)
self.params.fineCenterZ = round(center[2],4)
self.refreshGridPage()
def autosizeFineGrid(self):
"""Autosizes fine grid"""
coords = self.getCoords()
length=Numeric.maximum.reduce(coords)-Numeric.minimum.reduce(coords)
self.params.fineLengthX = (length.tolist())[0] + 10.0
self.params.fineLengthY = (length.tolist())[1] + 10.0
self.params.fineLengthZ = (length.tolist())[2] + 10.0
self.refreshGridPage()
def getCoords(self):
"""Returns coordinates of atoms included in calculation"""
if not hasattr(self, 'mol1Name'): return [[0,0,0]]
mol = self.vf.getMolFromName(self.mol1Name)
coords = mol.findType(Atom).coords
if self.params.calculationType == 'Binding energy':
if hasattr(self, 'mol2Name'):
mol = self.vf.getMolFromName(self.mol2Name)
if mol:
coords += mol.findType(Atom).coords
if hasattr(self, 'complexName'):
mol = self.vf.getMolFromName(self.complexName)
if mol:
coords += mol.findType(Atom).coords
return coords
# Callbacks
def refreshCalculationPage(self):
"""Refreshes calculation page"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
if(self.params.calculationType=='Binding energy'):
apply(descr.entryByName['molecule2Select']['widget'].grid,
(), descr.entryByName['molecule2Select']['gridcfg'])
apply(descr.entryByName['molecule2']['widget'].grid, (),
descr.entryByName['molecule2']['gridcfg'])
apply(descr.entryByName['complexSelect']['widget'].grid, (),
descr.entryByName['complexSelect']['gridcfg'])
apply(descr.entryByName['complex']['widget'].grid, (),
descr.entryByName['complex']['gridcfg'])
#self.params.energyOutput = 'Total'
elif(self.params.calculationType=='Solvation energy'):
descr.entryByName['molecule2Select']['widget'].grid_forget()
descr.entryByName['molecule2']['widget'].grid_forget()
descr.entryByName['complexSelect']['widget'].grid_forget()
descr.entryByName['complex']['widget'].grid_forget()
#self.params.energyOutput = 'Total'
elif(self.params.calculationType=='Electrostatic potential'):
descr.entryByName['molecule2Select']['widget'].grid_forget()
descr.entryByName['molecule2']['widget'].grid_forget()
descr.entryByName['complexSelect']['widget'].grid_forget()
descr.entryByName['complex']['widget'].grid_forget()
descr.entryByName['calculationType']['widget'].\
selectitem(self.params.calculationType)
descr.entryByName['pbeType']['widget'].\
selectitem(self.params.pbeType)
descr.entryByName['boundaryConditions']['widget'].\
selectitem(self.params.boundaryConditions)
descr.entryByName['chargeDiscretization']['widget'].\
selectitem(self.params.chargeDiscretization)
descr.entryByName['surfaceCalculation']['widget'].\
selectitem(self.params.surfaceCalculation)
descr.entryByName['sdens']['widget'].setentry(self.params.sdens)
descr.entryByName['splineWindow']['widget'].\
setentry(self.params.splineWindow)
if self.params.surfaceCalculation == 'Cubic B-spline' or \
self.params.surfaceCalculation == '7th Order Polynomial':
apply(descr.entryByName['splineWindowLabel']['widget'].grid,
(), descr.entryByName['splineWindowLabel']['gridcfg'])
apply(descr.entryByName['splineWindow']['widget'].grid, (),
descr.entryByName['splineWindow']['gridcfg'])
descr.entryByName['sdensLabel']['widget'].grid_forget()
descr.entryByName['sdens']['widget'].grid_forget()
else:
apply(descr.entryByName['sdensLabel']['widget'].grid,
(), descr.entryByName['sdensLabel']['gridcfg'])
apply(descr.entryByName['sdens']['widget'].grid, (),
descr.entryByName['sdens']['gridcfg'])
descr.entryByName['splineWindowLabel']['widget'].grid_forget()
descr.entryByName['splineWindow']['widget'].grid_forget()
descr.entryByName['molecule1']['widget'].\
setentry(self.params.molecule1Path)
descr.entryByName['molecule2']['widget'].\
setentry(self.params.molecule2Path)
descr.entryByName['complex']['widget'].\
setentry(self.params.complexPath)
descr.entryByName['energyOutput']['widget'].\
selectitem(self.params.energyOutput)
descr.entryByName['forceOutput']['widget'].\
selectitem(self.params.forceOutput)
descr.entryByName['forceOutput']['widget'].\
selectitem(self.params.forceOutput)
descr.entryByName['Profiles']['widget'].\
selectitem(self.params.name)
def testCalculationWidgets(self):
"""Tests calculation widgets"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
if(descr.entryByName['splineWindow']['widget'].get() == ''):
self.errorMsg = 'You must enter a spline window value.'
errorform = self.showForm('error',modal=1,blocking=1,force = 1)
return 1
return 0
def calculationParamUpdate(self, selectItem=0):
"""Updates calculation parameters"""
if self.cmdForms.has_key('default'):
if selectItem == 'Binding energy':
self.params.calculationType = 'Binding energy'
self.refreshCalculationPage()
return
descr = self.cmdForms['default'].descr
# Prevent forcing a particular calculation type on the user
self.params.calculationType = descr.entryByName\
['calculationType']['widget'].get()
if self.testCalculationWidgets()==0:
self.params.calculationType = descr.entryByName\
['calculationType']['widget'].get()
self.params.pbeType = descr.entryByName['pbeType']['widget'].\
get()
self.params.boundaryConditions = descr.entryByName\
['boundaryConditions']['widget'].get()
self.params.chargeDiscretization = descr.entryByName\
['chargeDiscretization']['widget'].get()
self.params.surfaceCalculation = descr.entryByName\
['surfaceCalculation']['widget'].get()
self.params.sdens = float(descr.entryByName['sdens']['widget'].\
get())
self.params.splineWindow = float(descr.entryByName\
['splineWindow']['widget'].get())
self.params.molecule1Path = descr.entryByName['molecule1']\
['widget'].get()
self.params.molecule2Path = descr.entryByName['molecule2']\
['widget'].get()
self.params.complexPath = descr.entryByName['complex']\
['widget'].get()
self.params.energyOutput = descr.entryByName['energyOutput']\
['widget'].get()
self.params.forceOutput = descr.entryByName['forceOutput']\
['widget'].get()
self.params.name = descr.entryByName['Profiles']['widget'].\
get()
else:
return "ERROR"
self.refreshCalculationPage()
def refreshGridPage(self):
"""Refreshes grid page"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
descr.entryByName['gridPointsX']['widget'].set(closestMatch(self.
params.gridPointsX, self.params.GRID_VALUES), update = 0)
descr.entryByName['gridPointsY']['widget'].set(closestMatch(self.
params.gridPointsY, self.params.GRID_VALUES), update = 0)
descr.entryByName['gridPointsZ']['widget'].set(closestMatch(self.
params.gridPointsZ, self.params.GRID_VALUES), update = 0)
descr.entryByName['coarseLengthX']['widget'].set(self.params.
coarseLengthX, update = 0)
descr.entryByName['coarseLengthY']['widget'].set(self.params.
coarseLengthY, update = 0)
descr.entryByName['coarseLengthZ']['widget'].set(self.params.
coarseLengthZ, update = 0)
descr.entryByName['coarseCenterX']['widget'].set(self.params.
coarseCenterX, update = 0)
descr.entryByName['coarseCenterY']['widget'].set(self.params.
coarseCenterY, update = 0)
descr.entryByName['coarseCenterZ']['widget'].set(self.params.
coarseCenterZ, update = 0)
descr.entryByName['coarseResolutionX']['widget'].configure(text =
"%5.3f"%self.coarseResolutionX())
descr.entryByName['coarseResolutionY']['widget'].configure(text =
"%5.3f"%self.coarseResolutionY())
descr.entryByName['coarseResolutionZ']['widget'].configure(text =
"%5.3f"%self.coarseResolutionZ())
descr.entryByName['fineLengthX']['widget'].set(self.params.
fineLengthX, update = 0)
descr.entryByName['fineLengthY']['widget'].set(self.params.
fineLengthY, update = 0)
descr.entryByName['fineLengthZ']['widget'].set(self.params.
fineLengthZ, update = 0)
descr.entryByName['fineCenterX']['widget'].set(self.params.
fineCenterX, update = 0)
descr.entryByName['fineCenterY']['widget'].set(self.
params.fineCenterY, update = 0)
descr.entryByName['fineCenterZ']['widget'].set(self.params.
fineCenterZ, update = 0)
descr.entryByName['fineResolutionX']['widget'].configure(text =
"%5.3f"%self.fineResolutionX())
descr.entryByName['fineResolutionY']['widget'].configure(text =
"%5.3f"%self.fineResolutionY())
descr.entryByName['fineResolutionZ']['widget'].configure(text =
"%5.3f"%self.fineResolutionZ())
descr.entryByName['gridPointsNumberLabel']['widget'].\
configure(text = "%d"%self.totalGridPoints())
descr.entryByName['mallocSizeLabel']['widget'].configure(text =
"%5.3f"%self.memoryToBeAllocated())
self.coarseBox.Set(visible = descr.\
entryByName['showCoarseGrid']['wcfg']['variable'].get(),
xside = self.params.coarseLengthX,
yside = self.params.coarseLengthY,
zside = self.params.coarseLengthZ,
center = [self.params.coarseCenterX, self.params.coarseCenterY,
self.params.coarseCenterZ], tagModified=False)
self.fineBox.Set(visible = descr.\
entryByName['showFineGrid']['wcfg']['variable'].get(),
xside = self.params.fineLengthX,yside = self.params.fineLengthY,
zside = self.params.fineLengthZ,
center = [self.params.fineCenterX, self.params.fineCenterY,
self.params.fineCenterZ], tagModified=False)
self.vf.GUI.VIEWER.Redraw()
def testGridWidgets(self):
"""Tests grid widget"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
#Boundary check: make sure coarse grid encloses fine grid
ccx = descr.entryByName['coarseCenterX']['widget'].value
ccy = descr.entryByName['coarseCenterY']['widget'].value
ccz = descr.entryByName['coarseCenterZ']['widget'].value
clx = descr.entryByName['coarseLengthX']['widget'].value/2
cly = descr.entryByName['coarseLengthY']['widget'].value/2
clz = descr.entryByName['coarseLengthZ']['widget'].value/2
fcx = descr.entryByName['fineCenterX']['widget'].value
fcy = descr.entryByName['fineCenterY']['widget'].value
fcz = descr.entryByName['fineCenterZ']['widget'].value
flx = descr.entryByName['fineLengthX']['widget'].value/2
fly = descr.entryByName['fineLengthY']['widget'].value/2
flz = descr.entryByName['fineLengthZ']['widget'].value/2
if (fcx+flx>ccx+clx) or (fcx-flx<ccx-clx) or (fcy+fly>ccy+cly) or \
(fcy-fly<ccy-cly) or (fcz+flz>ccz+clz) or (fcz-flz<ccz-clz):
self.errorMsg = 'The coarse grid must enclose the fine grid.'
errorform = self.showForm('error',modal=1,blocking=1,force=1)
return 1
return 0
else :
#Boundary check: make sure coarse grid encloses fine grid
ccx = self.params.coarseCenterX
ccy = self.params.coarseCenterY
ccz = self.params.coarseCenterZ
clx = self.params.coarseLengthX
cly = self.params.coarseLengthY
clz = self.params.coarseLengthZ
fcx = self.params.fineCenterX
fcy = self.params.fineCenterY
fcz = self.params.fineCenterZ
flx = self.params.fineLengthX
fly = self.params.fineLengthY
flz = self.params.fineLengthZ
if (fcx+flx>ccx+clx) or (fcx-flx<ccx-clx) or (fcy+fly>ccy+cly) or \
(fcy-fly<ccy-cly) or (fcz+flz>ccz+clz) or (fcz-flz<ccz-clz):
self.errorMsg = 'The coarse grid must enclose the fine grid.'
errorform = self.showForm('error',modal=1,blocking=1,force=1)
return 1
return 0
def gridParamUpdate(self, selectItem=0):
"""Updates grid parameters. Returns "ERROR" is failed"""
if self.testGridWidgets() == 0:
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
self.params.gridPointsX = closestMatch(descr.entryByName
['gridPointsX']['widget'].get(), self.params.GRID_VALUES)
self.params.gridPointsY = closestMatch(descr.entryByName
['gridPointsY']['widget'].get(), self.params.GRID_VALUES)
self.params.gridPointsZ = closestMatch(descr.entryByName
['gridPointsZ']['widget'].get(), self.params.GRID_VALUES)
self.params.coarseLengthX = descr.entryByName['coarseLengthX']\
['widget'].value
self.params.coarseLengthY = descr.entryByName['coarseLengthY']\
['widget'].value
self.params.coarseLengthZ = descr.entryByName['coarseLengthZ']\
['widget'].value
self.params.coarseCenterX = descr.entryByName['coarseCenterX']\
['widget'].value
self.params.coarseCenterY = descr.entryByName['coarseCenterY']\
['widget'].value
self.params.coarseCenterZ = descr.entryByName['coarseCenterZ']\
['widget'].value
self.params.fineLengthX = descr.entryByName['fineLengthX']\
['widget'].value
self.params.fineLengthY = descr.entryByName['fineLengthY']\
['widget'].value
self.params.fineLengthZ = descr.entryByName['fineLengthZ']\
['widget'].value
self.params.fineCenterX = descr.entryByName['fineCenterX']\
['widget'].value
self.params.fineCenterY = descr.entryByName['fineCenterY']\
['widget'].value
self.params.fineCenterZ = descr.entryByName['fineCenterZ']\
['widget'].value
self.flag_grid_changed = True
else:
return "ERROR"
self.refreshGridPage()
def refreshPhysicsPage(self):
"""Refreshes physics page"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
descr.entryByName['proteinDielectric']['widget'].\
setentry(self.params.proteinDielectric)
descr.entryByName['solventDielectric']['widget'].\
setentry(self.params.solventDielectric)
descr.entryByName['solventRadius']['widget'].\
setentry(self.params.solventRadius)
descr.entryByName['systemTemperature']['widget'].\
setentry(self.params.systemTemperature)
descr.entryByName['ionsList']['widget'].clear()
for i in range(len(self.params.ions)):
descr.entryByName['ionsList']['widget'].\
insert('end', self.params.ions[i].toString())
if self.params.saltConcentration:
self.salt_var.set(1)
else:
self.salt_var.set(0)
def testPhysicsWidgets(self):
"""Tests physics widget"""
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
if(descr.entryByName['proteinDielectric']['widget'].get() == ''):
self.errorMsg = 'You must enter a protein dielectric\
value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
if(descr.entryByName['solventDielectric']['widget'].get() == ''):
self.errorMsg = 'You must enter a solvent dielectric\
value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
if(descr.entryByName['solventRadius']['widget'].get() == ''):
self.errorMsg = 'You must enter a solvent radius value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
if(descr.entryByName['systemTemperature']['widget'].get() == ''):
self.errorMsg = 'You must enter a system temperature \
value.'
errorform = self.showForm('error', modal=1, blocking=1,
force = 1)
return 1
return 0
def physicsParamUpdate(self):
"""Updates physics parameter. Returns "ERROR" is failed"""
if self.testPhysicsWidgets() != 1:
if self.cmdForms.has_key('default'):
descr = self.cmdForms['default'].descr
self.params.proteinDielectric = float(descr.entryByName\
['proteinDielectric']['widget'].get())
self.params.solventDielectric = float(descr.entryByName\
['solventDielectric']['widget'].get())
self.params.solventRadius = float(descr.entryByName\
['solventRadius']['widget'].get())
self.params.systemTemperature = float(descr.entryByName\
['systemTemperature']['widget'].get())
salt = self.salt_var.get()
if salt:
self.params.saltConcentration = float(descr.entryByName\
['saltConcentration']['widget'].get())
else:
self.params.saltConcentration = 0
else:
return "ERROR"
self.refreshPhysicsPage()
def refreshAll(self,cmdForm = None):
"""Refreshes calculation, grid and physics pages"""
if cmdForm:
self.cmdForms['default'] = cmdForm
descr = cmdForm.descr
if APBSservicesFound:
ResourceFolder = getResourceFolderWithVersion()
if os.path.isdir(ResourceFolder):
pass
else:
os.mkdir(ResourceFolder)
self.rc_apbs = ResourceFolder + os.sep + "ws"
if os.path.isdir(self.rc_apbs):
pass
else:
os.mkdir(self.rc_apbs)
self.rc_apbs += os.sep + "rc_apbs"
if not os.path.exists(self.rc_apbs):
open(self.rc_apbs,'w')
else:
file = open(self.rc_apbs)
text = file.read()
text = text.split()
for line in text:
tmp_line = line.split('User:')
if len(tmp_line) > 1:
descr.entryByName['UserName_Entry']['wcfg']\
['textvariable'].set(tmp_line[1])
tmp_line = line.split('Password:')
if len(tmp_line) > 1:
descr.entryByName['Password_Entry']['wcfg']\
['textvariable'].set(tmp_line[1])
file.close()
# descr.entryByName['ParallelGroup']['widget'].toggle()
if not descr.entryByName['web service address']['widget'].get():
descr.entryByName['web service address']['widget']\
.selectitem(0)
url = descr.entryByName['web service address']['widget'].get()
url = url.strip()
if url.find('https://') != 0:
descr.entryByName['UserName_Label']['widget'].grid_forget()
descr.entryByName['UserName_Entry']['widget'].grid_forget()
descr.entryByName['Password_Label']['widget'].grid_forget()
descr.entryByName['Password_Entry']['widget'].grid_forget()
descr.entryByName['Remember_Label']['widget'].grid_forget()
descr.entryByName['Remember_Checkbutton']['widget']\
.grid_forget()
self.progressBar = ProgressBar(
descr.entryByName['WS_ProgressBar']['widget']
, labelside=None,
width=200, height=20, mode='percent')
self.progressBar.setLabelText('Progress...')
self.progressBar.set(0)
descr.entryByName['WS_ProgressBar']['widget'].grid_forget()
else:
descr.entryByName['WS_http']['widget'].bind(
sequence = "<Button-1>", func = self.WS_http)
descr.entryByName['calculationType']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['pbeType']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['boundaryConditions']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['chargeDiscretization']['widget'].\
_entryWidget.config(state = 'readonly')
descr.entryByName['surfaceCalculation']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['energyOutput']['widget']._entryWidget.\
config(state = 'readonly')
descr.entryByName['forceOutput']['widget']._entryWidget.\
config(state = 'readonly')
self.refreshCalculationPage()
self.refreshGridPage()
self.refreshPhysicsPage()
def paramUpdateAll(self):
"""Updates all parameters. Returns "ERROR" if failed """
if self.calculationParamUpdate() == "ERROR":
return "ERROR"
if self.gridParamUpdate() == "ERROR":
return "ERROR"
if self.physicsParamUpdate() == "ERROR":
return "ERROR"
def setOutputFiles(self):
"""Sets output files using outputFilesForm GUI"""
outputFilesForm = self.showForm('outputFilesForm', \
modal = 1, blocking = 1,force=1,master=self.cmdForms['default'].f)
descr = self.cmdForms['outputFilesForm'].descr
self.params.chargeDistributionFile = descr.entryByName\
['chargeDistributionFile']['widget'].get()
self.params.potentialFile = descr.entryByName['potentialFile']\
['widget'].get()
self.params.solventAccessibilityFile = descr.entryByName\
['solventAccessibilityFile']['widget'].get()
self.params.splineBasedAccessibilityFile = descr.entryByName\
['splineBasedAccessibilityFile']['widget'].get()
self.params.VDWAccessibilityFile = descr.entryByName\
['VDWAccessibilityFile']['widget'].get()
self.params.ionAccessibilityFile = descr.entryByName\
['ionAccessibilityFile']['widget'].get()
self.params.laplacianOfPotentialFile = descr.entryByName\
['laplacianOfPotentialFile']['widget'].get()
self.params.energyDensityFile = descr.entryByName\
['energyDensityFile']['widget'].get()
self.params.ionNumberFile = descr.entryByName\
['ionNumberFile']['widget'].get()
self.params.ionChargeDensityFile = descr.entryByName\
['ionChargeDensityFile']['widget'].get()
self.params.xShiftedDielectricFile = descr.entryByName\
['xShiftedDielectricFile']['widget'].get()
self.params.yShiftedDielectricFile = descr.entryByName\
['yShiftedDielectricFile']['widget'].get()
self.params.zShiftedDielectricFile = descr.entryByName\
['zShiftedDielectricFile']['widget'].get()
self.params.kappaFunctionFile = descr.entryByName\
['kappaFunctionFile']['widget'].get()
def addIon(self):
"""Adds an Ion"""
ionForm | |
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.engine.input_layer import InputLayer
from keras.layers import Lambda, BatchNormalization
from keras.models import Model
from keras import backend as K
from sklearn import metrics
from fully_connected_opt_weight_generation import *
import time
import warnings
"""
This lambda layer take the output variables (vectors) from last layer,
clip range to clip_range
quantize to the bits.
"""
def quant_layer(x, clip_range, bits):
import tensorflow as tf
return tf.fake_quant_with_min_max_vars(x, min=clip_range[0], max=clip_range[1], num_bits=bits)
def quant_shape(input_shape):
return input_shape
def fake_clip(frac_bit=0, bit=8):
'''
:param frac_bit: fractional bit number. Q3.4 = shift 4
:param bit: width
:return:
'''
max = 2**(bit - frac_bit) / 2 - (1/(2**frac_bit))
min = -2**(bit - frac_bit) / 2
return Lambda(quant_layer, output_shape=quant_shape, arguments={'clip_range': [min, max], 'bits': bit})
def fake_clip_min_max(min=0, max =1, bit=8):
return Lambda(quant_layer, output_shape=quant_shape, arguments={'clip_range': [min, max], 'bits': bit})
"""
this is the generate the test set data to a bin file
bin file can be used to validate the implementation in MCU
"""
def generate_test_bin(x, y, name='test_data_with_label.bin'):
'''
this method generate the
:param x: input x data size
:param y: input label (one hot label)
:return:
'''
# quantize input x
min_value = np.min(x)
max_value = np.max(x)
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits = 7 - int_bits
x = np.round(x*2**dec_bits).astype(np.int8)
# get label
test_label = np.argwhere(y == 1).astype(np.int8) # test data
test_label = test_label[:, 1]
# get data
dat = x.astype(dtype="byte") # test data
batch_size = dat.shape[0] # total pices of data
dat = dat.flatten() # flatten to get the total size.
block_size = int(dat.size / batch_size) # this must be integer but... just to confirm
# write (label x 128) (data_block x 128)
label_batch = 128 # the Y-modem example uses 128 batch
with open(name, 'wb') as f:
start = 0
while start <= (test_label.size - label_batch):
test_label[start: start + label_batch].tofile(f)
dat[block_size * start: block_size * (start + label_batch)].tofile(f)
start += label_batch
# the rest data
if (start < test_label.size):
rest_len = test_label.size - start
new_labls = test_label[start:]
new_labls = np.pad(new_labls, (0, label_batch - rest_len), mode='constant')
new_labls.tofile(f)
dat[block_size * start:].tofile(f)
print("binary test file generated:", name)
print("test data length:", test_label.size)
return
def is_shift_layer(layer):
''' layer which can change the output encoding'''
#FIXME: add more which will change the output shift
if('input' in layer.name or
'conv2d' in layer.name or
'conv1d' in layer.name or
'dense' in layer.name or
'softmax' in layer.name or
('add' in layer.name and 'zero' not in layer.name) or # the name, zero_padding contains 'add'
'subtract' in layer.name or
'multiply' in layer.name or
('activation' in layer.name and layer.get_config()['activation'] == 'softmax')
):
return True
return False
def fuse_bn_to_conv(layer):
# try to fuse BN layer to convolutional
if ('conv' in layer.name) and \
('batch_normalization' in layer._outbound_nodes[0].outbound_layer.name):
print("fusing batch normalization to", layer.name)
bn_layer = layer._outbound_nodes[0].outbound_layer
c_w = layer.get_weights()[0]
c_b = layer.get_weights()[1]
print('original weight max', c_w.max(), 'min', c_w.min())
print('original bias max', c_b.max(), 'min', c_b.min())
bn_gamma = bn_layer.get_weights()[0]
bn_beta = bn_layer.get_weights()[1]
bn_mean = bn_layer.get_weights()[2]
bn_variance = bn_layer.get_weights()[3]
if ('conv2d' in layer.name):
epsilon = 1e-3 # default epsilon for tf.slim.batch_norm
for l in range(c_w.shape[3]):
for k in range(c_w.shape[2]):
for j in range(c_w.shape[1]):
for i in range(c_w.shape[0]):
if "depthwise" in layer.name: # depthwise batchnorm params are ordered differently
c_w[i][j][k][l] *= bn_gamma[k] / np.sqrt(bn_variance[k] + epsilon)
else:
c_w[i][j][k][l] *= bn_gamma[l] / np.sqrt(bn_variance[l] + epsilon)
if "depthwise" in layer.name:
depth_dim = c_w.shape[2]
else:
depth_dim = c_w.shape[3]
for l in range(depth_dim):
c_b[l] = (bn_gamma[l] * (c_b[l] - bn_mean[l]) / np.sqrt(bn_variance[l] + epsilon)) + bn_beta[l]
# conv1d
else:
epsilon = 1e-3 # default epsilon for tf.slim.batch_norm
for k in range(c_w.shape[2]):
for j in range(c_w.shape[1]):
for i in range(c_w.shape[0]):
if "depthwise" in layer.name: # depthwise batchnorm params are ordered differently
c_w[i][j][k] *= bn_gamma[j] / np.sqrt(bn_variance[j] + epsilon)
else:
c_w[i][j][k] *= bn_gamma[k] / np.sqrt(bn_variance[k] + epsilon)
if "depthwise" in layer.name:
depth_dim = c_w.shape[1]
else:
depth_dim = c_w.shape[2]
for l in range(depth_dim):
c_b[l] = (bn_gamma[l] * (c_b[l] - bn_mean[l]) / np.sqrt(bn_variance[l] + epsilon)) + bn_beta[l]
print('fused weight max', c_w.max(), 'min', c_w.min())
print('fused bias max', c_b.max(), 'min', c_b.min())
# write the weights back to the layer
# after that, the model will be destroyed.. need a better way to pass the new weight
layer.set_weights([c_w, c_b])
def generate_weights(model, name='weights.h', shift_list=None):
# Quantize weights to 8-bits using (min,max) and write to file
f = open(name, 'w')
f.close()
for curr_idx, layer in enumerate(model.layers):
if (not layer.weights):
continue
# before merging bn layer, check if the bn is "legally" after Conv
if('batch_normalization' in layer.name) and \
('conv' not in layer._inbound_nodes[0].inbound_layers[0].name):
raise Exception('Currently only support batch_normalization after conv', layer.name,
layer._inbound_nodes[0].inbound_layers[0].name)
# try to fuse BN layer to convolutional
if ('conv' in layer.name) and \
('batch_normalization' in layer._outbound_nodes[0].outbound_layer.name):
fuse_bn_to_conv(layer)
# generate weights and bias now
weight_dec_shift = 0
print('weights for layer', layer.name)
for var in layer.weights:
var_name = str(var.name)
if("kernel" in var_name ):
var_values = layer.get_weights()[0] # weight
print(" weight:", var_name)
elif("bias" in var_name):
var_values = layer.get_weights()[1] # bias
print(" bias: ",var_name)
else:
continue
print(" original shape: ", var_values.shape)
min_value = np.min(var_values)
max_value = np.max(var_values)
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits = 7 - int_bits
print(" dec bit", dec_bits)
bSameAsKernel = False
if(is_shift_layer(layer)):
bSameAsKernel = False
inp = layer.input.name.replace(':','/').split('/')[0]
input_encoding = shift_list[inp]
if ("kernel" in var_name):
weight_dec_shift = dec_bits
else:
shift = input_encoding+weight_dec_shift-dec_bits
if(shift < 0):
bSameAsKernel = True
if(shift_list is None or bSameAsKernel):
# check if bias shift > weight shift, then reduce bias shift to weight shift
if ("kernel" in var_name):
weight_dec_shift = dec_bits
else:
if(dec_bits > weight_dec_shift):
dec_bits = weight_dec_shift
print(" new dec bit", dec_bits)
# convert to [-128,128) or int8
var_values = np.round(var_values * 2 ** dec_bits)
var_name = var_name.replace('/', '_')
var_name = var_name.replace(':', '_')
with open(name, 'a') as f:
f.write('#define ' + var_name.upper() + ' {')
if (len(var_values.shape) == 3): # 1D convolution layer weights
transposed_wts = np.transpose(var_values, (2, 0, 1))
#transposed_wts = var_values
elif (len(var_values.shape) > 2): # 2D convolution layer weights
transposed_wts = np.transpose(var_values, (3, 0, 1, 2))
else: # fully connected layer weights or biases of any layer
# test, use opt weight reorder
if "dense" in var_name and "kernel" in var_name:
transposed_wts = np.transpose(var_values)
transposed_wts = convert_to_x4_q7_weights(np.reshape(transposed_wts ,(transposed_wts.shape[0], transposed_wts.shape[1], 1, 1)))
else:
transposed_wts = np.transpose(var_values)
print(" reshape to:",transposed_wts.shape)
with open(name, 'a') as f:
transposed_wts.tofile(f, sep=", ", format="%d")
f.write('}\n\n')
if ("bias" in var_name):
f.write('#define ' + var_name.upper() + '_SHIFT ' + '(' + str(dec_bits) + ')\n\n\n')
if ("kernel" in var_name ):
f.write('#define ' + var_name.upper() + '_SHIFT ' + '(' + str(dec_bits) + ')\n\n')
"""
# for checking the quantised and dequantised range.
with K.tf.Session() as session:
# convert back original range but quantized to 8-bits or 256 levels
var_values = var_values / (2 ** dec_bits)
var_values = session.run(K.tf.assign(var, var_values))
print(' '+var_name + ' number of wts/bias: ' + str(var_values.shape) + \
' dec bits: ' + str(dec_bits) + \
' max: (' + str(np.max(var_values)) + ',' + str(max_value) + ')' + \
' min: (' + str(np.min(var_values)) + ',' + str(min_value) + ')')
"""
def layers_output_ranges(model, x_test):
# test, show the output ranges
shift_list = {}
# FIXME: only support one input
if(type(model.layers[0]) != InputLayer):
L = [model.input] + model.layers
else:
L = model.layers
last_layer = None
for layer in L:
if("input" in layer.name):
features = x_test
else:
# batch_normalization will need to be handle differently, since we are fusing the weight to its predecessor.
# sigmoid and tanh are different, their shift is fixed to 7
if(is_shift_layer(layer) or
('batch_normalization' in layer.name) or
('activation' in layer.name and layer.get_config()['activation'] == 'sigmoid') or
('activation' in layer.name and layer.get_config()['activation'] == 'tanh')):
layer_model = Model(inputs=model.input, outputs=layer.output)
# FIXME, when the test data is too large, it might return memory error. need to slice data into small pices
features = layer_model.predict(x_test)
else:
# leave the features not changed, so this layer shift will be the same
# as its inputs
pass
max_val = features.max()
min_val = features.min()
int_bits = int(np.ceil(np.log2(max(abs(max_val), abs(min_val)))))
dec_bits = 7 - int_bits
print( layer.name, "max value:", max_val, "min value:", min_val,"dec bit", dec_bits)
# record the shift
if(model.input == layer and type(model.layers[0]) | |
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
| |
x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as `P(y)
\tfrac{dy}{dx} = Q(x)`. The solution can then just be found by
rearranging terms and integrating: `\int P(y) \,dy = \int Q(x) \,dx`.
This hint uses :py:meth:`sympy.simplify.simplify.separatevars` as its back
end, so if a separable equation is not caught by this solver, it is most
likely the fault of that function.
:py:meth:`~sympy.simplify.simplify.separatevars` is
smart enough to do most expansion and factoring necessary to convert a
separable equation `F(x, y)` into the proper form `P(x)\cdot{}Q(y)`. The
general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
a(x)*b(f(x))*--(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable', simplify=False))
/ 2 \ 2
log\3*f (x) - 1/ x
---------------- = C1 + --
6 2
References
==========
- <NAME> & <NAME>, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
r = match # {'m1':m1, 'm2':m2, 'y':y}
u = r.get('hint', f(x)) # get u from separable_reduced else get f(x)
return Eq(C.Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, u)), C.Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x) + C1)
def checkinfsol(eq, infinitesimals, func=None, order=None):
r"""
This function is used to check if the given infinitesimals are the
actual infinitesimals of the given first order differential equation.
This method is specific to the Lie Group Solver of ODEs.
As of now, it simply checks, by substituting the infinitesimals in the
partial differential equation.
.. math:: \frac{\partial \eta}{\partial x} + \left(\frac{\partial \eta}{\partial y}
- \frac{\partial \xi}{\partial x}\right)*h
- \frac{\partial \xi}{\partial y}*h^{2}
- \xi\frac{\partial h}{\partial x} - \eta\frac{\partial h}{\partial y} = 0
where `\eta`, and `\xi` are the infinitesimals and `h(x,y) = \frac{dy}{dx}`
The infinitesimals should be given in the form of a list of dicts
``[{xi(x, y): inf, eta(x, y): inf}]``, corresponding to the
output of the function infinitesimals. It returns a list
of values of the form ``[(True/False, sol)]`` where ``sol`` is the value
obtained after substituting the infinitesimals in the PDE. If it
is ``True``, then ``sol`` would be 0.
"""
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Lie groups solver has been implemented "
"only for first order differential equations")
else:
df = func.diff(x)
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy('y')
h = h.subs(func, y)
xi = Function('xi')(x, y)
eta = Function('eta')(x, y)
dxi = Function('xi')(x, func)
deta = Function('eta')(x, func)
pde = (eta.diff(x) + (eta.diff(y) - xi.diff(x))*h -
(xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)))
soltup = []
for sol in infinitesimals:
tsol = {xi: S(sol[dxi]).subs(func, y),
eta: S(sol[deta]).subs(func, y)}
sol = simplify(pde.subs(tsol).doit())
if sol:
soltup.append((False, sol.subs(y, func)))
else:
soltup.append((True, 0))
return soltup
def ode_lie_group(eq, func, order, match):
r"""
This hint implements the Lie group method of solving first order differential
equations. The aim is to convert the given differential equation from the
given coordinate given system into another coordinate system where it becomes
invariant under the one-parameter Lie group of translations. The converted ODE is
quadrature and can be solved easily. It makes use of the
:py:meth:`sympy.solvers.ode.infinitesimals` function which returns the
infinitesimals of the transformation.
The coordinates `r` and `s` can be found by solving the following Partial
Differential Equations.
.. math :: \xi\frac{\partial r}{\partial x} + \eta\frac{\partial r}{\partial y}
= 0
.. math :: \xi\frac{\partial s}{\partial x} + \eta\frac{\partial s}{\partial y}
= 1
The differential equation becomes separable in the new coordinate system
.. math :: \frac{ds}{dr} = \frac{\frac{\partial s}{\partial x} +
h(x, y)\frac{\partial s}{\partial y}}{
\frac{\partial r}{\partial x} + h(x, y)\frac{\partial r}{\partial y}}
After finding the solution by integration, it is then converted back to the original
coordinate system by subsituting `r` and `s` in terms of `x` and `y` again.
Examples
========
>>> from sympy import Function, dsolve, Eq, exp, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x) + 2*x*f(x) - x*exp(-x**2), f(x),
... hint='lie_group'))
/ 2\ 2
| x | -x
f(x) = |C1 + --|*e
\ 2 /
References
==========
- Solving differential equations by Symmetry Groups,
<NAME>, pp. 1 - pp. 14
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.pde import pdsolve
heuristics = lie_heuristics
inf = {}
f = func.func
x = func.args[0]
df = func.diff(x)
xi = Function("xi")
eta = Function("eta")
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
xis = match.pop('xi')
etas = match.pop('eta')
if match:
h = -simplify(match[match['d']]/match[match['e']])
y = match['y']
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Unable to solve the differential equation " +
str(eq) + " by the lie group method")
else:
y = Dummy("y")
h = sol[0].subs(func, y)
if xis is not None and etas is not None:
inf = [{xi(x, f(x)): S(xis), eta(x, f(x)): S(etas)}]
if not checkinfsol(eq, inf, func=f(x), order=1)[0][0]:
raise ValueError("The given infinitesimals xi and eta"
" are not the infinitesimals to the given equation")
else:
heuristics = ["user_defined"]
match = {'h': h, 'y': y}
# This is done so that if:
# a] solve raises a NotImplementedError.
# b] any heuristic raises a ValueError
# another heuristic can be used.
tempsol = [] # Used by solve below
for heuristic in heuristics:
try:
if not inf:
inf = infinitesimals(eq, hint=heuristic, func=func, order=1, match=match)
except ValueError:
continue
else:
for infsim in inf:
xiinf = (infsim[xi(x, func)]).subs(func, y)
etainf = (infsim[eta(x, func)]).subs(func, y)
# This condition creates recursion while using pdsolve.
# Since the first step while solving a PDE of form
# a*(f(x, y).diff(x)) + b*(f(x, y).diff(y)) + c = 0
# is to solve the ODE dy/dx = b/a
if simplify(etainf/xiinf) == h:
continue
rpde = f(x, y).diff(x)*xiinf + f(x, y).diff(y)*etainf
r = pdsolve(rpde, func=f(x, y)).rhs
s = pdsolve(rpde - 1, func=f(x, y)).rhs
newcoord = [_lie_group_remove(coord) for coord in [r, s]]
r = Dummy("r")
s = Dummy("s")
C1 = Symbol("C1")
rcoord = newcoord[0]
scoord = newcoord[-1]
try:
sol = solve([r - rcoord, s - scoord], x, y, dict=True)
except NotImplementedError:
continue
else:
sol = sol[0]
xsub = sol[x]
ysub = sol[y]
num = simplify(scoord.diff(x) + scoord.diff(y)*h)
denom = simplify(rcoord.diff(x) + rcoord.diff(y)*h)
if num and denom:
diffeq = simplify((num/denom).subs([(x, xsub), (y, ysub)]))
sep = separatevars(diffeq, symbols=[r, s], dict=True)
if sep:
# Trying to separate, r and s coordinates
deq = integrate((1/sep[s]), s) + C1 - integrate(sep['coeff']*sep[r], r)
# Substituting and reverting back to original coordinates
deq = deq.subs([(r, rcoord), (s, scoord)])
try:
sdeq = solve(deq, y)
except NotImplementedError:
tempsol.append(deq)
else:
if len(sdeq) == 1:
return Eq(f(x), sdeq.pop())
else:
return [Eq(f(x), sol) for sol in sdeq]
elif denom: # (ds/dr) is zero which means s is constant
return Eq(f(x), solve(scoord - C1, y)[0])
elif num: # (dr/ds) is zero which means r is constant
return Eq(f(x), solve(rcoord - C1, y)[0])
# If | |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import glance_store
import mock
from oslo_config import cfg
import oslo_messaging
import webob
import glance.async_
from glance.common import exception
from glance.common import timeutils
import glance.context
from glance import notifier
import glance.tests.unit.utils as unit_test_utils
from glance.tests import utils
DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355)
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
class ImageStub(glance.domain.Image):
def get_data(self, offset=0, chunk_size=None):
return ['01234', '56789']
def set_data(self, data, size, backend=None, set_active=True):
for chunk in data:
pass
class ImageRepoStub(object):
def remove(self, *args, **kwargs):
return 'image_from_get'
def save(self, *args, **kwargs):
return 'image_from_save'
def add(self, *args, **kwargs):
return 'image_from_add'
def get(self, *args, **kwargs):
return 'image_from_get'
def list(self, *args, **kwargs):
return ['images_from_list']
class ImageMemberRepoStub(object):
def remove(self, *args, **kwargs):
return 'image_member_from_remove'
def save(self, *args, **kwargs):
return 'image_member_from_save'
def add(self, *args, **kwargs):
return 'image_member_from_add'
def get(self, *args, **kwargs):
return 'image_member_from_get'
def list(self, *args, **kwargs):
return ['image_members_from_list']
class TaskStub(glance.domain.TaskStub):
def run(self, executor):
pass
class Task(glance.domain.Task):
def succeed(self, result):
pass
def fail(self, message):
pass
class TaskRepoStub(object):
def remove(self, *args, **kwargs):
return 'task_from_remove'
def save(self, *args, **kwargs):
return 'task_from_save'
def add(self, *args, **kwargs):
return 'task_from_add'
def get_task(self, *args, **kwargs):
return 'task_from_get'
def list(self, *args, **kwargs):
return ['tasks_from_list']
class TestNotifier(utils.BaseTestCase):
@mock.patch.object(oslo_messaging, 'Notifier')
@mock.patch.object(oslo_messaging, 'get_notification_transport')
def _test_load_strategy(self,
mock_get_transport, mock_notifier,
url, driver):
nfier = notifier.Notifier()
mock_get_transport.assert_called_with(cfg.CONF)
self.assertIsNotNone(nfier._transport)
mock_notifier.assert_called_with(nfier._transport,
publisher_id='image.localhost')
self.assertIsNotNone(nfier._notifier)
def test_notifier_load(self):
self._test_load_strategy(url=None, driver=None)
@mock.patch.object(oslo_messaging, 'set_transport_defaults')
def test_set_defaults(self, mock_set_trans_defaults):
notifier.set_defaults(control_exchange='foo')
mock_set_trans_defaults.assert_called_with('foo')
notifier.set_defaults()
mock_set_trans_defaults.assert_called_with('glance')
class TestImageNotifications(utils.BaseTestCase):
"""Test Image Notifications work"""
def setUp(self):
super(TestImageNotifications, self).setUp()
self.image = ImageStub(
image_id=UUID1, name='image-1', status='active', size=1024,
created_at=DATETIME, updated_at=DATETIME, owner=TENANT1,
visibility='public', container_format='ami', virtual_size=2048,
tags=['one', 'two'], disk_format='ami', min_ram=128,
min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91',
locations=['http://127.0.0.1'])
self.context = glance.context.RequestContext(tenant=TENANT2,
user=USER1)
self.image_repo_stub = ImageRepoStub()
self.notifier = unit_test_utils.FakeNotifier()
self.image_repo_proxy = glance.notifier.ImageRepoProxy(
self.image_repo_stub, self.context, self.notifier)
self.image_proxy = glance.notifier.ImageProxy(
self.image, self.context, self.notifier)
def test_image_save_notification(self):
self.image_repo_proxy.save(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.update', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_image_save_notification_disabled(self):
self.config(disabled_notifications=["image.update"])
self.image_repo_proxy.save(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_add_notification(self):
self.image_repo_proxy.add(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.create', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_image_add_notification_disabled(self):
self.config(disabled_notifications=["image.create"])
self.image_repo_proxy.add(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_delete_notification(self):
self.image_repo_proxy.remove(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.delete', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
self.assertTrue(output_log['payload']['deleted'])
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_image_delete_notification_disabled(self):
self.config(disabled_notifications=['image.delete'])
self.image_repo_proxy.remove(self.image_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_get(self):
image = self.image_repo_proxy.get(UUID1)
self.assertIsInstance(image, glance.notifier.ImageProxy)
self.assertEqual('image_from_get', image.repo)
def test_image_list(self):
images = self.image_repo_proxy.list()
self.assertIsInstance(images[0], glance.notifier.ImageProxy)
self.assertEqual('images_from_list', images[0].repo)
def test_image_get_data_should_call_next_image_get_data(self):
with mock.patch.object(self.image, 'get_data') as get_data_mock:
self.image_proxy.get_data()
self.assertTrue(get_data_mock.called)
def test_image_get_data_notification(self):
self.image_proxy.size = 10
data = ''.join(self.image_proxy.get_data())
self.assertEqual('0123456789', data)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.send', output_log['event_type'])
self.assertEqual(self.image.image_id,
output_log['payload']['image_id'])
self.assertEqual(TENANT2, output_log['payload']['receiver_tenant_id'])
self.assertEqual(USER1, output_log['payload']['receiver_user_id'])
self.assertEqual(10, output_log['payload']['bytes_sent'])
self.assertEqual(TENANT1, output_log['payload']['owner_id'])
def test_image_get_data_notification_disabled(self):
self.config(disabled_notifications=['image.send'])
self.image_proxy.size = 10
data = ''.join(self.image_proxy.get_data())
self.assertEqual('0123456789', data)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_get_data_size_mismatch(self):
self.image_proxy.size = 11
list(self.image_proxy.get_data())
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.send', output_log['event_type'])
self.assertEqual(self.image.image_id,
output_log['payload']['image_id'])
def test_image_set_data_prepare_notification(self):
insurance = {'called': False}
def data_iterator():
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.prepare', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
yield 'abcd'
yield 'efgh'
insurance['called'] = True
self.image_proxy.set_data(data_iterator(), 8)
self.assertTrue(insurance['called'])
def test_image_set_data_prepare_notification_disabled(self):
insurance = {'called': False}
def data_iterator():
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
yield 'abcd'
yield 'efgh'
insurance['called'] = True
self.config(disabled_notifications=['image.prepare'])
self.image_proxy.set_data(data_iterator(), 8)
self.assertTrue(insurance['called'])
def test_image_set_data_upload_and_activate_notification(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
yield 'fghij'
self.image_proxy.set_data(data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(2, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
output_log = output_logs[1]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.activate', output_log['event_type'])
self.assertEqual(self.image.image_id, output_log['payload']['id'])
def test_image_set_data_upload_and_activate_notification_disabled(self):
insurance = {'called': False}
def data_iterator():
self.notifier.log = []
yield 'abcde'
yield 'fghij'
insurance['called'] = True
self.config(disabled_notifications=['image.activate', 'image.upload'])
self.image_proxy.set_data(data_iterator(), 10)
self.assertTrue(insurance['called'])
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_set_data_storage_full(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise glance_store.StorageFull(message='Modern Major General')
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Modern Major General', output_log['payload'])
def test_image_set_data_value_error(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise ValueError('value wrong')
self.assertRaises(webob.exc.HTTPBadRequest,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('value wrong', output_log['payload'])
def test_image_set_data_duplicate(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.Duplicate('Cant have duplicates')
self.assertRaises(webob.exc.HTTPConflict,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Cant have duplicates', output_log['payload'])
def test_image_set_data_storage_write_denied(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise glance_store.StorageWriteDenied(message='The Very Model')
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('The Very Model', output_log['payload'])
def test_image_set_data_forbidden(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.Forbidden('Not allowed')
self.assertRaises(webob.exc.HTTPForbidden,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Not allowed', output_log['payload'])
def test_image_set_data_not_found(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.NotFound('Not found')
self.assertRaises(webob.exc.HTTPNotFound,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Not found', output_log['payload'])
def test_image_set_data_HTTP_error(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise webob.exc.HTTPError('Http issue')
self.assertRaises(webob.exc.HTTPError,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Http issue', output_log['payload'])
def test_image_set_data_error(self):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.GlanceException('Failed')
self.assertRaises(exception.GlanceException,
self.image_proxy.set_data, data_iterator(), 10)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('ERROR', output_log['notification_type'])
self.assertEqual('image.upload', output_log['event_type'])
self.assertIn('Failed', output_log['payload'])
class TestImageMemberNotifications(utils.BaseTestCase):
"""Test Image Member Notifications work"""
def setUp(self):
super(TestImageMemberNotifications, self).setUp()
self.context = glance.context.RequestContext(tenant=TENANT2,
user=USER1)
self.notifier = unit_test_utils.FakeNotifier()
self.image = ImageStub(
image_id=UUID1, name='image-1', status='active', size=1024,
created_at=DATETIME, updated_at=DATETIME, owner=TENANT1,
visibility='public', container_format='ami',
tags=['one', 'two'], disk_format='ami', min_ram=128,
min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91',
locations=['http://127.0.0.1'])
self.image_member = glance.domain.ImageMembership(
id=1, image_id=UUID1, member_id=TENANT1, created_at=DATETIME,
updated_at=DATETIME, status='accepted')
self.image_member_repo_stub = ImageMemberRepoStub()
self.image_member_repo_proxy = glance.notifier.ImageMemberRepoProxy(
self.image_member_repo_stub, self.image,
self.context, self.notifier)
self.image_member_proxy = glance.notifier.ImageMemberProxy(
self.image_member, self.context, self.notifier)
def _assert_image_member_with_notifier(self, output_log, deleted=False):
self.assertEqual(self.image_member.member_id,
output_log['payload']['member_id'])
self.assertEqual(self.image_member.image_id,
output_log['payload']['image_id'])
self.assertEqual(self.image_member.status,
output_log['payload']['status'])
self.assertEqual(timeutils.isotime(self.image_member.created_at),
output_log['payload']['created_at'])
self.assertEqual(timeutils.isotime(self.image_member.updated_at),
output_log['payload']['updated_at'])
if deleted:
self.assertTrue(output_log['payload']['deleted'])
self.assertIsNotNone(output_log['payload']['deleted_at'])
else:
self.assertFalse(output_log['payload']['deleted'])
self.assertIsNone(output_log['payload']['deleted_at'])
def test_image_member_add_notification(self):
self.image_member_repo_proxy.add(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.member.create', output_log['event_type'])
self._assert_image_member_with_notifier(output_log)
def test_image_member_add_notification_disabled(self):
self.config(disabled_notifications=['image.member.create'])
self.image_member_repo_proxy.add(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_member_save_notification(self):
self.image_member_repo_proxy.save(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.member.update', output_log['event_type'])
self._assert_image_member_with_notifier(output_log)
def test_image_member_save_notification_disabled(self):
self.config(disabled_notifications=['image.member.update'])
self.image_member_repo_proxy.save(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_member_delete_notification(self):
self.image_member_repo_proxy.remove(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('image.member.delete', output_log['event_type'])
self._assert_image_member_with_notifier(output_log, deleted=True)
def test_image_member_delete_notification_disabled(self):
self.config(disabled_notifications=['image.member.delete'])
self.image_member_repo_proxy.remove(self.image_member_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_image_member_get(self):
image_member = self.image_member_repo_proxy.get(TENANT1)
self.assertIsInstance(image_member, glance.notifier.ImageMemberProxy)
self.assertEqual('image_member_from_get', image_member.repo)
def test_image_member_list(self):
image_members = self.image_member_repo_proxy.list()
self.assertIsInstance(image_members[0],
glance.notifier.ImageMemberProxy)
self.assertEqual('image_members_from_list', image_members[0].repo)
class TestTaskNotifications(utils.BaseTestCase):
"""Test Task Notifications work"""
def setUp(self):
super(TestTaskNotifications, self).setUp()
task_input = {"loc": "fake"}
self.task_stub = TaskStub(
task_id='aaa',
task_type='import',
status='pending',
owner=TENANT2,
expires_at=None,
created_at=DATETIME,
updated_at=DATETIME,
)
self.task = Task(
task_id='aaa',
task_type='import',
status='pending',
owner=TENANT2,
expires_at=None,
created_at=DATETIME,
updated_at=DATETIME,
task_input=task_input,
result='res',
message='blah'
)
self.context = glance.context.RequestContext(
tenant=TENANT2,
user=USER1
)
self.task_repo_stub = TaskRepoStub()
self.notifier = unit_test_utils.FakeNotifier()
self.task_repo_proxy = glance.notifier.TaskRepoProxy(
self.task_repo_stub,
self.context,
self.notifier
)
self.task_proxy = glance.notifier.TaskProxy(
self.task,
self.context,
self.notifier
)
self.task_stub_proxy = glance.notifier.TaskStubProxy(
self.task_stub,
self.context,
self.notifier
)
self.patcher = mock.patch.object(timeutils, 'utcnow')
mock_utcnow = self.patcher.start()
mock_utcnow.return_value = datetime.datetime.utcnow()
def tearDown(self):
super(TestTaskNotifications, self).tearDown()
self.patcher.stop()
def test_task_create_notification(self):
self.task_repo_proxy.add(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.create', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
self.assertEqual(
timeutils.isotime(self.task.updated_at),
output_log['payload']['updated_at']
)
self.assertEqual(
timeutils.isotime(self.task.created_at),
output_log['payload']['created_at']
)
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_task_create_notification_disabled(self):
self.config(disabled_notifications=['task.create'])
self.task_repo_proxy.add(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_delete_notification(self):
now = timeutils.isotime()
self.task_repo_proxy.remove(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(1, len(output_logs))
output_log = output_logs[0]
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual('task.delete', output_log['event_type'])
self.assertEqual(self.task.task_id, output_log['payload']['id'])
self.assertEqual(
timeutils.isotime(self.task.updated_at),
output_log['payload']['updated_at']
)
self.assertEqual(
timeutils.isotime(self.task.created_at),
output_log['payload']['created_at']
)
self.assertEqual(
now,
output_log['payload']['deleted_at']
)
if 'location' in output_log['payload']:
self.fail('Notification contained location field.')
def test_task_delete_notification_disabled(self):
self.config(disabled_notifications=['task.delete'])
self.task_repo_proxy.remove(self.task_stub_proxy)
output_logs = self.notifier.get_logs()
self.assertEqual(0, len(output_logs))
def test_task_run_notification(self):
with mock.patch('glance.async_.TaskExecutor') as mock_executor:
executor = mock_executor.return_value
executor._run.return_value = mock.Mock()
self.task_proxy.run(executor=mock_executor)
output_logs = self.notifier.get_logs()
self.assertEqual(1, | |
"""This module contains unit tests for :mod:`~prody.select`."""
import os
import os.path
import inspect
import numpy as np
from numpy.testing import *
from prody import *
from prody import LOGGER
from prody.tests import unittest
from prody.tests.datafiles import *
from prody.atomic.atommap import DUMMY
try:
range = xrange
except NameError:
pass
prody.atomic.select.DEBUG = False
LOGGER.verbosity = 'none'
TESTS_PATH = os.path.abspath(os.path.split(inspect.getfile(
inspect.currentframe()))[0])
# If a selection string is paired with None, SelectionError is expected
# If two selection strings are paired, they must select exactly same of atoms
# Else, number must be the number atoms that the string is expected to select
pdb3mht = prody.parsePDB(pathDatafile('pdb3mht.pdb'), secondary=True)
SELECTION_TESTS = {'pdb3mht':
{'n_atoms': len(pdb3mht),
'ag': pdb3mht,
'all': pdb3mht.all,
'atommap': AtomMap(pdb3mht, [DUMMY] + list(range(1500)) + [DUMMY] +
list(range(1500, len(pdb3mht))) + [DUMMY]),
'test_flags': [('none', 0),
('all', 3211),
('acidic', 334),
('acyclic', 2040),
('aliphatic', 821),
('aromatic', 475),
('at', 0),
('basic', 450),
('buried', 944),
('cg', 0),
('charged', 784),
('cyclic', 566),
('heme', 0),
('hydrophobic', 999),
('ion', 0),
('large', 1629),
('lipid', 0),
('medium', 689),
('neutral', 1822),
('nucleic', 509),
('nucleotide', 509),
('nucleobase', 0),
('nucleoside', 0),
('polar', 1607),
('protein', 2606, 'aminoacid'),
('stdaa', 2606),
('nonstdaa', 0),
('purine', 0),
('pyrimidine', 0),
('small', 288),
('sugar', 0),
('surface', 1662),
('water', 70),
('hetero', 96),
('hetatm', 96),
('calpha', 327, 'ca'),
('backbone', 1308, 'bb'),
('backbonefull', 1309, 'bbfull'),
('sidechain', 1297, 'sc'),
('carbon', 1920),
('hydrogen', 0),
('noh', 3211),
('nitrogen', 542),
('oxygen', 711),
('sulfur', 14),
('extended', 503),
('helix', 763),
('helix310', 118),
('turn', 0),
('bridge', 0),
('bend', 0),
('coil', 1222),],
'test_without_and': [
('coil protein', 1222),
('sidechain sc protein', 1297),
('bbfull bb', 1308),
('(charged basic)', 450),
('(protein nucleic)', 0),
('noh hetero water', 70, 'water hetero noh'),
('ca occupancy > 0', 327, 'occupancy > 0 ca'),
('ca occupancy - 0 > 0', 327, 'occupancy - 0 > 0 ca'),
('ca occupancy - 0 > 0 + 0', 327,
'occupancy - 0 > 0 + 0 ca'),
('occupancy > ca 0', None),
('noh hetero (water)', 70),
('noh hetero not (water)', 26),
('(water) hetero', 70),
('ca abs(beta) = beta + abs(0)', 327,
'abs(beta) = beta + abs(0) ca'),],
'test_unintended': [
('abs beta = beta', 3211, 'abs (beta) = beta')],
'test_string': [
('name P', 24),
('name P CA', 352),
('name `A 1`', 0),
('chain C', 248),
('chain x', 0),
('chain x y', 0),
('chain x y z', 0),
('chain x y z C', 248),
('chain C D', 521),
('chain CD', 0),
('resname DG', 132),
('resname DG ALA', 212),
('altloc A', 0),
('altloc _', 3211),
('secondary H', 763, 'helix'),
('secondary H E', 1266),
('secondary _', 605),
('segment _', 3211),],
'test_integer': [
('index 10 20 10000', 2),
('serial 0', 0),
('serial 1 2', 2),
('resnum 0', 0),
('resnum 100 105', 13),
('resid 0', 0),
('resid 100 105', 13),
('resid 100 A 105', 13),
('fragindex 0', None),
('fragment 0', None),],
'test_range': [
('index 0:10', 10),
('index 0to10', 11, 'index 0 to 10'),
('serial 0:10:2', 4),
('serial 0:10:10', 0),
('resnum 10to15', 49),
('resnum 10:16:1', 49),
('resnum `-3:16:1`', 125),
('resid 10to15', 49),
('resid 10:16:1', 49),
('x `-10:20`', 673, 'x `-10 to 20`'),
('x 0:20:1', 0),
('beta 13.02:13.01', None)],
'test_float': [
('beta 5.0 41.15 11.85', 2),
('occupancy 1.0', 3211),
('x 6.665', 1),
('y 69.99 13.314', 2),
('z 115.246 45.784', 2),
('charge 0', 0),
('mass 1', 0),
('radius 0', None),
('beta "1."', 0),
('beta = "1."', None),],
'test_comparisons': [
('x = -51.659', 1),
('x != -51.659', 3210),
('z >= 82.813', 1670),
('z < 82.813', 1541),
('beta > 10', 2874),
('beta < 10', 336),
('occupancy > 0.999999', 3211),
('-10 <= x < 0', 557, '-10 <= x and x < 0'),
('11 > 10', None),
('radius > 10', None),
('chain = A', None),
('x x < 1', None),
('name < 1', None),],
'test_operation': [
('x ** 2 < 10', 238),
('x ** 2 ** 2 ** 2 < 10', 87),
('x ** (+2 ** (+2 ** +2)) < 10', 87),
('occupancy % 2 == 1', 3211),
('x**2 + y**2 + z**2 < 10000', 1975),],
'test_function': [
('sqrt(x**2 + y**2 + z**2) < 100', 1975,
'x**2 + y**2 + z**2 < 10000'),
('sqrt(x**2 + y**2 + z**2) == '
'(x**2 + y**2 + z**2) ** 0.5', 3211),
('beta % 3 < 1', 1070),
('beta % 4 % 3 < 1', 1530),
('ceil(beta) == 10', 60),
('floor(beta) == 10', 58),
('abs(x) == sqrt(sq(x))', 3211),
('sq(x-5)+sq(y+4)+sq(z) > sq(100)', 1444),
('1 > sq(occ)', None),
('sq(x x) > 1', None),],
'test_composite': [
('chain x y z C and x 10', 0),
('resnum `1` `2`', 16, 'resnum 1 2'),
('same residue as within 4 of resname SAH', 177),
('name CA and same residue as within 4 of resname SAH', 20),
('water and within 5 of not protein', 70),
('backbone and sqrt((x - 25)**2 + (y - 74)**2 + '
'(z - 13)**2) <= 500', 1308),
('(not resname SAH) and (protein and name CA) or '
'(nucleic and name P)', 351,
'(protein and name CA) or (nucleic and name P)'),
('protein and (backbone or name H)', 1308),
('same residue as within 4 of and resname SAH', None),
('protein and name CA CB and same residue as '
'((x+21.2)**2 + (y-35.9)**2 + (z-80.0)**2)**0.5 < 10',
78, 'protein and name CA CB and same residue as '
'within 10 of center', {'center': np.array([21.2, 35.9, 80.0])})],
'test_within': [
('within 10 of index 0', 72),
('exwithin 100 of index 0', 3210),
('exwithin 4 of resname SAH', 61),
('(within 4 of water) and not water', 534, 'exwithin 4 of water'),
('within 5 of within 5 of within 5 of index 0', 135),
('exwithin 5 of exwithin 5 of exwithin 5 of index 0', 99),
('within 1 of pdb', 3211, None, {'pdb': pdb3mht}),
('exwithin 1 of pdb', 0, None, {'pdb': pdb3mht}),
('exwithin 1 of ag', None, None, {'ag': AtomGroup()}),
('within 100 of index 10000', 0),],
'test_sameas': [
('same residue as index 0', 22),
('same chain as index 0', 248),
('same segment as index 0', 3211),
('same residue as resname DG ALA', 212),
('same chain as chain C', 248),
('same residue as chain X', 0),
('same none as chain C', None),
('same residue as same residue as same residue as index 0', 22,
'resindex 0')],
'test_regexp': [
('resname "S.."', 122),
('name "C.*"', 1920),
('name ".*\'"', 208),
('name "C(A|B)"', 628),
('name "C((A|B)"', None),],
'test_specialchar': [
('altloc ` `', 3211),
('name A` `CA`', 328),
('name `A``', 0),
('z `+100.291`', 1),],
'test_logical': [
('name or name', None),
('name and name', None),
('name CA and name CA', 328),
('name CA or name CA', 328),
('index 0 or index 1 ', 2),
('not not not not index 1', 1),
('index 0 or index 1 or index 2', 3, 'index 0 1 2'),
('index 0 or index 1 or index 2 or index 4', 4, 'index 0 1 2 4'),
('index 0 and index 1 ', 0),
('index < 50 and index < 5', 5, 'index < 5'),
('index < 50 and index < 25 and index < 5', 5),
('index < 5 and index < 25 and index < 50', 5),
('index 0 to 5 and index 0 to 25 and index 0 to 50', 6),
('index < 5 and index < 25 and index < 50 or index < 50 or index < 5',
50),],
'test_kwargs': [
('within 100 of origin', 1975, None, {'origin': np.zeros(3)}),
('within 100 of origin', 1975, None, {'origin': np.zeros((1, 3))}),
('within 100 of origin', 1975, None, {'origin': np.zeros((10, 3))}),
('within 100 of origin', 1975, None, {'origin': np.zeros((50, 3))}),
('within 100 of none', None, None, {'none': np.zeros((50, 3))}),],
'test_equivalent':[
('chain C', 248, 'not not chain C'),
('chain C', 248, 'not not not not chain C'),
('nucleic', 509, 'nucleoside or nucleotide or nucleobase'),],
'test_invalid':[
('chain C and and chain C', None),
('chain C or or chain D', None),
('chain C or not or chain D', None),
('chain C + 3', None),
('sqr(x-5)+sqr(y+4)+sqr(z) > sqr(100)', None),
('x > sq(calpha)', None),
('x > sq(name CA and resname ALA)', None),
('resname ALA and +1', None)],
'test_userdata':[
('temp < 10', 336, 'beta < 10'),
('temp < 10 and chain | |
<reponame>jay51/pyscript<filename>test/test_all.py<gh_stars>0
import pytest
from pjscript import *
from io import StringIO
import sys
example1 = """
var x = 2;
log("x: ", x);
var y = x++ * 2;
log("y: ", y);
log("x: ", x);
var x = 2;
log("x: ", x);
var y = ++x * 2;
log("y: ", y);
log("x: ", x);
"""
example2 = """
var x = 0 + add(2, 3);
var x = x + add(2, 3);
var x = (x * 2) / 2;
log(x); /* should log 10*/
"""
example3 = """
var x = 2;
var y = x >= 2;
log("y: ", y);
log("x: ", x);
"""
example4 = """
function printme(st1, st2, st3){
log(st1);
log(st2);
log(st3);
return 1;
};
var result = printme(1, 2, 3);
log(result);
"""
example5 = """
function loop(){
var i = 0;
for(; i < 5; i++){
log(i);
};
return null;
};
log(loop());
"""
example6 = """
var i = 0;
if(i == 1){
log("if");
} else if (i == 0) {
log("else if");
} else {
log("else");
};
var y = 0;
if (y == 0){
log("y is:", y);
};
"""
example7 = """
var numbers = [1, 2, 3];
log(numbers[1]);
"""
class TestLexer:
def test_expression(self):
self.math_operators()
self.post_pre_inc_dec()
self.comparison_operators()
def post_pre_inc_dec(self):
# fmt: off
expected_result = [
"var", "IDENTIFIER", "=", "NUMBER", ";", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")",
";", "var", "IDENTIFIER", "=", "IDENTIFIER", "++", "*", "NUMBER", ";", "IDENTIFIER", "(", "STRING",
",", "IDENTIFIER", ")", ";", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")", ";", "var", "IDENTIFIER",
"=", "NUMBER", ";", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")", ";", "var", "IDENTIFIER", "=", "++",
"IDENTIFIER", "*", "NUMBER", ";", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")", ";", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")", ";", "EOF"
]
# fmt: on
lexer = Lexer(example1)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
def math_operators(self):
# fmt: off
expected_result = [
"var", "IDENTIFIER", "=", "NUMBER", "+",
"IDENTIFIER", "(", "NUMBER", ",", "NUMBER", ")", ";",
"var", "IDENTIFIER", "=", "IDENTIFIER", "+", "IDENTIFIER",
"(", "NUMBER", ",", "NUMBER", ")", ";", "var", "IDENTIFIER",
"=", "(", "IDENTIFIER", "*", "NUMBER", ")", "/", "NUMBER", ";",
"IDENTIFIER", "(", "IDENTIFIER", ")", ";", "EOF"
]
# fmt: on
lexer = Lexer(example2)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
def comparison_operators(self):
# fmt: off
expected_result = [
"var", "IDENTIFIER", "=", "NUMBER", ";", "var", "IDENTIFIER", "=", "IDENTIFIER", ">=",
"NUMBER", ";", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")", ";", "IDENTIFIER", "(",
"STRING", ",", "IDENTIFIER", ")", ";", "EOF"
]
# fmt: on
lexer = Lexer(example3)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
def test_function(self):
# fmt: off
expected_result = [
"function", "IDENTIFIER", "(", "IDENTIFIER", ",", "IDENTIFIER", ",", "IDENTIFIER", ")",
"{", "IDENTIFIER", "(", "IDENTIFIER", ")", ";", "IDENTIFIER", "(", "IDENTIFIER", ")", ";",
"IDENTIFIER", "(", "IDENTIFIER", ")", ";", "return", "NUMBER", ";", "}", ";", "var", "IDENTIFIER",
"=", "IDENTIFIER", "(", "NUMBER", ",", "NUMBER", ",", "NUMBER", ")", ";", "IDENTIFIER", "(", "IDENTIFIER", ")", ";", "EOF"
]
# fmt: on
lexer = Lexer(example4)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
def test_forloop(self):
# fmt: off
expected_result = [
"function", "IDENTIFIER", "(", ")", "{", "var", "IDENTIFIER", "=", "NUMBER", ";", "for", "(", ";", "IDENTIFIER", "<", "NUMBER",
";", "IDENTIFIER", "++", ")", "{", "IDENTIFIER", "(", "IDENTIFIER", ")", ";", "}", ";", "return", "null", ";", "}", ";",
"IDENTIFIER", "(", "IDENTIFIER", "(",")", ")", ";", "EOF"
]
# fmt: on
lexer = Lexer(example5)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
def test_if_else(self):
# fmt: off
expected_result = [
"var", "IDENTIFIER", "=", "NUMBER", ";", "if", "(", "IDENTIFIER", "==", "NUMBER", ")", "{", "IDENTIFIER",
"(", "STRING", ")", ";", "}", "else", "if", "(", "IDENTIFIER", "==", "NUMBER", ")", "{", "IDENTIFIER", "(",
"STRING", ")", ";", "}", "else", "{", "IDENTIFIER", "(", "STRING", ")", ";", "}", ";", "var", "IDENTIFIER", "=",
"NUMBER", ";", "if", "(", "IDENTIFIER", "==", "NUMBER", ")", "{", "IDENTIFIER", "(", "STRING", ",", "IDENTIFIER", ")", ";", "}", ";", "EOF"
]
# fmt: on
lexer = Lexer(example6)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
def test_array(self):
# fmt: off
expected_result = [
"var", "IDENTIFIER", "=", "[", "NUMBER", ",", "NUMBER", ",", "NUMBER", "]", ";",
"IDENTIFIER", "(", "IDENTIFIER", "[", "NUMBER", "]", ")", ";", "EOF"
]
# fmt: on
lexer = Lexer(example7)
for idx, tok in enumerate(lexer):
assert tok.type == expected_result[idx]
class TestParser:
def test_expression(self):
self.post_pre_inc_dec()
self.math_operators()
self.comparison_operators()
self.comparison_operators()
def post_pre_inc_dec(self):
expected_result = [
VarDeclaration,
CallExpression,
VarDeclaration,
CallExpression,
CallExpression,
VarDeclaration,
CallExpression,
VarDeclaration,
CallExpression,
CallExpression,
NoOp,
]
lexer = Lexer(example1)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
def math_operators(self):
expected_result = [
VarDeclaration,
VarDeclaration,
VarDeclaration,
CallExpression,
NoOp,
]
lexer = Lexer(example2)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
def comparison_operators(self):
expected_result = [
VarDeclaration,
VarDeclaration,
CallExpression,
CallExpression,
NoOp,
]
lexer = Lexer(example3)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
def test_function(self):
expected_result = [FuncDeclaration, VarDeclaration, CallExpression, NoOp]
lexer = Lexer(example4)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
def test_forloop(self):
expected_result = [FuncDeclaration, CallExpression, NoOp]
lexer = Lexer(example5)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
def test_if_else(self):
expected_result = [VarDeclaration, IfStmt, VarDeclaration, IfStmt, NoOp]
lexer = Lexer(example6)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
def test_array(self):
expected_result = [VarDeclaration, CallExpression]
lexer = Lexer(example7)
parser = Parser(lexer)
result = []
for node in parser.parse().body:
result.append(node)
for idx, node in enumerate(result):
assert isinstance(node, expected_result[idx])
class TestInterpreter:
def test_expression(self):
self.post_pre_inc_dec()
self.math_operators()
self.comparison_operators()
def post_pre_inc_dec(self):
expected_result = StringIO()
print("x: 2", file=expected_result, end=" \n")
print("y: 4", file=expected_result, end=" \n")
print("x: 3", file=expected_result, end=" \n")
print("x: 2", file=expected_result, end=" \n")
print("y: 6", file=expected_result, end=" \n")
print("x: 3", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example1)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def math_operators(self):
expected_result = StringIO()
print("10", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example2)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def comparison_operators(self):
expected_result = StringIO()
print("y: True", file=expected_result, end=" \n")
print("x: 2", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example3)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def test_function(self):
expected_result = StringIO()
print("1", file=expected_result, end=" \n")
print("2", file=expected_result, end=" \n")
print("3", file=expected_result, end=" \n")
print("1", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example4)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def test_forloop(self):
expected_result = StringIO()
print("0", file=expected_result, end=" \n")
print("1", file=expected_result, end=" \n")
print("2", file=expected_result, end=" \n")
print("3", file=expected_result, end=" \n")
print("4", file=expected_result, end=" \n")
print("null", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example5)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def test_if_else(self):
expected_result = StringIO()
print("else if", file=expected_result, end=" \n")
print("y is: 0", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example6)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def test_operators_order(self):
example = """
if( 1 == 6 || 3 == 3 ){
log("yes");
};
if( 1 + 6 > 3 + 3 ){
log("yes");
};
if( 1 + 6 > 3 + 3 && 1 == 1 ){
log("yes");
};
if(!(1 != 1)) {
log("yes");
};
"""
expected_result = StringIO()
print("yes", file=expected_result, end=" \n")
print("yes", file=expected_result, end=" \n")
print("yes", file=expected_result, end=" \n")
print("yes", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert tmp_stdout.getvalue() == expected_result.getvalue()
def test_array(self):
expected_result = StringIO()
print("2", file=expected_result, end=" \n")
tmp_stdout = StringIO()
sys.stdout = tmp_stdout
lexer = Lexer(example7)
tree = Parser(lexer).parse()
interpreter = Interpreter(tree)
interpreter.interpret()
sys.stdout = sys.__stdout__
assert | |
"""Main Flask code handling REST API"""
import base64
import json
import os
import re
import rq
import saml2
import threading
import time
import uuid
import crackq
from crackq.db import db
from crackq.logger import logger
from crackq.models import User, Templates, Tasks
from crackq import crackqueue, hash_modes, auth
from crackq.validator import FileValidation as valid
from crackq.conf import hc_conf
from datetime import datetime
from flask import (
abort,
Flask,
jsonify,
redirect,
request,
session)
from flask.views import MethodView
from flask_bcrypt import Bcrypt
from flask_seasurf import SeaSurf
from flask_login import (
LoginManager,
login_required,
login_user,
logout_user,
current_user)
from functools import wraps
from marshmallow import Schema, fields, validate, ValidationError
from marshmallow.validate import Length, Range
from operator import itemgetter
from pathlib import Path
from pypal import pypal
from redis import Redis
from rq import Queue
from rq.serializers import JSONSerializer
from saml2 import BINDING_HTTP_POST
from saml2 import sigver
from sqlalchemy.orm import exc
# set perms
os.umask(0o077)
# Setup Flask App
login_manager = LoginManager()
app = Flask(__name__)
csrf = SeaSurf()
bcrypt = Bcrypt(app)
CRACK_CONF = hc_conf()
# Define HTTP messages
ERR_INVAL_JID = {'msg': 'Invalid Job ID'}
ERR_METH_NOT = {'msg': 'Method not supported'}
ERR_BAD_CREDS = {"msg": "Bad username or password"}
class StringContains(validate.Regexp):
"""
Custom validation class to reject any strtings matching supplied regex
See validate.Regexp for args/return values
"""
default_message = 'Invalid input for this field.'
def __call__(self, value):
if len(self.regex.findall(value)) > 0:
raise ValidationError(self._format_error(value))
return value
class batch_schema(Schema):
"""
Child schema for parse_json_schema to handle nested dict fields
"""
job_id = fields.UUID()
place = fields.Int()
class job_schema(Schema):
"""
Child schema for parse_json_schema to handle jobs nested dict fields
"""
error_messages = {
"name": "Invalid input characters",
"username": "Invalid input characters",
}
job_id = fields.UUID(allow_none=None)
task_id = fields.UUID(allow_none=None)
hash_list = fields.List(fields.String(validate=StringContains(
r'[^A-Za-z0-9\*\$\@\/\\\.\:\-\_\+\.\+\~]')),
allow_none=False, error_messages=error_messages)
wordlist = fields.Str(allow_none=True, validate=[StringContains(r'[^A-Za-z0-9\_\-]'),
Length(min=1, max=60)])
wordlist2 = fields.Str(allow_none=True, validate=[StringContains(r'[^A-Za-z0-9\_\-]'),
Length(min=1, max=60)])
attack_mode = fields.Int(allow_none=True, validate=Range(min=0, max=9))
rules = fields.List(fields.String(validate=[StringContains(r'[^A-Za-z0-9\_\-]'),
Length(min=1, max=60)]),
allow_none=True)
username = fields.Bool(allow_none=True)
notify = fields.Bool(allow_none=True)
increment = fields.Bool(allow_none=True)
disable_brain = fields.Bool(allow_none=True)
potcheck = fields.Bool(allow_none=True)
increment_min = fields.Int(allow_none=True, validate=Range(min=0, max=20))
increment_max = fields.Int(allow_none=True, validate=Range(min=0, max=20))
mask = fields.Str(allow_none=True,
validate=StringContains(r'[^aldsu\?0-9a-zA-Z]'))
mask_file = fields.List(fields.String(validate=[StringContains(r'[^A-Za-z0-9\_\-]'),
Length(min=1, max=60)]),
allow_none=True)
name = fields.Str(allow_none=True,
validate=StringContains(r'[^A-Za-z0-9\_\-\ ]'),
error_messages=error_messages)
hash_mode = fields.Int(allow_none=False, validate=Range(min=0, max=65535))
restore = fields.Int(validate=Range(min=0, max=1000000000000))
benchmark_all = fields.Bool(allow_none=True)
timeout = fields.Int(validate=Range(min=1, max=28800000), allow_none=True)
class parse_json_schema(job_schema):
"""
Class to create the schema for parsing received JSON arguments
job_details: str
string returned from rq.job.description
Returns
------
deets_dict: dictionary
only the specified job details are returned
"""
error_messages = {
"name": "Invalid input characters",
"username": "Invalid input characters",
}
user = fields.Str(allow_none=False, validate=StringContains(r'[^A-Za-z0-9\_\-]'))
password = fields.Str(allow_none=False,
validate=StringContains(r'[^\w\!\@\#\$\%\^\&\*\(\)\-\+\.\,\\\/\]\[\=]'))
confirm_password = fields.Str(allow_none=True,
validate=[StringContains(r'[^\w\!\@\#\$\%\^\&\*\(\)\-\+\.\,\\\/\]\[\=]'),
Length(min=10, max=60)])
new_password = fields.Str(allow_none=True,
validate=[StringContains(r'[^\w\!\@\#\$\%\^\&\*\(\)\-\+\.\,\\\/\]\[\=]'),
Length(min=10, max=60)])
email = fields.Str(allow_none=False,
validate=StringContains(r'[^\w\@\^\-\+\./]'))
admin = fields.Bool(allow_none=True)
policy_check = fields.Bool(allow_none=True)
complexity_length = fields.Int(validate=Range(min=1, max=48),
allow_none=True)
admin_list = fields.List(fields.String(validate=StringContains(
r'[^A-Za-z0-9\*\$\@\/\.\-\_\+]')),
allow_none=True, error_messages=error_messages)
batch_job = fields.List(fields.Nested(batch_schema))
jobs = fields.List(fields.Nested(job_schema))
fields.Nested(job_schema)
def get_jobdetails(job_details):
"""
Function to help pull only required information from a specified redis job
description string.
job_details: str
string returned from rq.job.description
Returns
------
deets_dict: dictionary
only the specified job details are returned
"""
deets_dict = {}
if 'Benchmark' in job_details:
deet_match_list = [
'name',
'benchmark',
'benchmark_all'
]
else:
deet_match_list = [
'hash_mode',
'attack_mode',
'mask',
'wordlist',
'wordlist2',
'rules',
'name',
'username',
'increment',
'increment_min',
'increment_max',
'disable_brain',
'brain_check',
'restore']
###***make this less ugly
###***review stripping here for improvement
logger.debug('Parsing job details:\n{}'.format(job_details))
# Process rules list separately as workaround for splitting on comma
if '[' in job_details:
###***add mask_file here when updating to allow list of files
rules_split = job_details[job_details.rfind('[')+1:job_details.rfind(']')].strip()
rules_list = [rule.strip().rstrip("'").lstrip("'") for rule in rules_split.split(',')]
else:
rules_list = None
deets_split = job_details[job_details.rfind('(')+1:job_details.rfind(')')].split(',')
for deets in deets_split:
deet = deets.split('=')[0].strip(' ')
if deet in deet_match_list:
deets_dict[deet] = deets.strip().split('=')[1].strip().rstrip("'").lstrip("'")
if 'Benchmark' in job_details:
return deets_dict
if rules_list:
rule_names = []
for key, rule in dict(CRACK_CONF['rules']).items():
if rule in rules_list:
rule_names.append(key)
deets_dict['rules'] = rule_names
else:
deets_dict['rules'] = None
if 'mask' in deets_dict:
if deets_dict['mask']:
mask = deets_dict['mask']
for key, mask_file in dict(CRACK_CONF['masks']).items():
if mask in mask_file:
deets_dict['mask'] = key
if 'wordlist' in deets_dict:
if deets_dict['wordlist']:
wordlist = deets_dict['wordlist']
for key, word in dict(CRACK_CONF['wordlists']).items():
if wordlist in word:
deets_dict['wordlist'] = key
break
else:
deets_dict['wordlist'] = None
if 'wordlist2' in deets_dict:
wordlist = deets_dict['wordlist2']
for key, word in dict(CRACK_CONF['wordlists']).items():
if wordlist in word:
deets_dict['wordlist2'] = key
break
else:
deets_dict['wordlist2'] = None
return deets_dict
def add_jobid(job_id):
"""Add job_id to job_ids column in user table"""
user = User.query.filter_by(username=current_user.username).first()
if user.job_ids:
logger.debug('Current registered job_ids: {}'.format(user.job_ids))
jobs = json.loads(user.job_ids)
else:
logger.debug('No job_ids registered with current user')
jobs = None
logger.debug('Registering new job_id to current user: {}'.format(job_id))
if isinstance(jobs, list):
if job_id not in jobs:
jobs.append(job_id)
else:
logger.warning('job_id already registered to user: {}'.format(job_id))
else:
jobs = [job_id]
user.job_ids = json.dumps(jobs)
db.session.commit()
logger.debug('user.job_ids: {}'.format(user.job_ids))
def del_jobid(job_id):
"""Delete job_id from job_ids column in user table"""
with crackq.app.app_context():
for user in User.query.all():
if user.job_ids and job_id in user.job_ids:
jobs = json.loads(user.job_ids)
logger.debug('Registered jobs: {}'.format(jobs))
if isinstance(jobs, list):
logger.debug('Unregistering job_id: {}'.format(job_id))
if job_id in jobs:
jobs.remove(job_id)
user.job_ids = json.dumps(jobs)
db.session.commit()
logger.debug('user.job_ids: {}'.format((user.job_ids)))
return True
else:
logger.error('Error removing job_id')
else:
logger.debug('Job ID not registered with user')
return False
def check_jobid(job_id):
"""Check user owns the job_id"""
logger.debug('Checking job_id: {} belongs to user: {}'.format(
job_id, current_user.username))
user = User.query.filter_by(username=current_user.username).first()
if user.job_ids:
if job_id in user.job_ids:
return True
else:
return False
else:
return False
def check_rules(orig_rules):
"""
Check provided rules list is sane
Arguments
---------
orig_rules: list
List of rules to check
Returns
-------
rules: list
List of rules or False if any are invalid
"""
logger.debug('Checking rules valid')
try:
if orig_rules is None:
rules = None
elif isinstance(orig_rules, list):
rules = []
for rule in orig_rules:
if rule in CRACK_CONF['rules']:
logger.debug('Using rules file: {}'.format(CRACK_CONF['rules'][rule]))
rules.append(CRACK_CONF['rules'][rule])
else:
logger.debug('Invalid rules provided')
rules = False
except KeyError:
logger.debug('Invalid rules provided')
rules = False
return rules
def check_mask(orig_masks):
"""
Check provided mask file list is sane
Arguments
---------
orig_masks: list
List of mask files to check
Returns
-------
mask_files: list
List of mask files or False if any are invalid
"""
logger.debug('Checking mask files are valid')
try:
if orig_masks is None:
mask_file = None
elif isinstance(orig_masks, list):
mask_file = []
for mask in orig_masks:
if mask in CRACK_CONF['masks']:
#mask_name = CRACK_CONF['masks'][mask]
logger.debug('Using mask file: {}'.format(mask))
mask_file.append(CRACK_CONF['masks'][mask])
return mask_file if len(mask_file) > 0 else None
else:
logger.debug('Invalid mask file provided')
return False
except KeyError:
logger.debug('Invalid mask file provided')
return False
# this is just set to use the first mask file in the list for now
#mask = mask_file[0] if mask_file else mask
def admin_required(func):
@wraps(func)
def wrap(*args, **kwargs):
"""Decorator to check user is admin"""
try:
logger.debug('User authenticating {}'.format(current_user.username))
if current_user.is_admin:
return func(*args, **kwargs)
except AttributeError as err:
logger.debug(err)
logger.debug('Anonymous user cant be admin')
return abort(401)
return wrap
def create_user(username, email=None, password=<PASSWORD>):
"""
Adds a new user to the SQLAlchemy datastore
Arguments
---------
username: string
Username to create
Returns
-------
result: boolean
True/False indicating status of delete operation
"""
if User.query.filter_by(username=username).first():
logger.debug('User already exists')
return False
else:
user = User(username=username, email=email,
password=password, is_admin=False)
db.session.add(user)
db.session.commit()
logger.debug('New user added')
return True
def del_user(user_id):
"""
Delete a user from the SQLAlchemy datastore
Arguments
---------
user_id: uuid
User ID number for the user to delete
Returns
-------
result: boolean
True/False indicating status of delete operation
"""
try:
user = User.query.filter_by(id=user_id).first()
db.session.delete(user)
db.session.commit()
return True
except AttributeError:
return False
except exc.UnmappedInstanceError:
return False
def email_check(email):
"""
Simple regex check string is an email address
Arguments
--------
email: str
email address string to check
Returns
-------
match: boolean
true/false for valid email match
"""
regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if re.search(regex, email):
logger.debug('Email address found')
return True
else:
return False
def del_job(job):
"""
Function to delete a job. Used to spawn a thread
and wait for jobs to cleanup hashcat proc
"""
time.sleep(22)
logger.debug('Thread: deleting job')
job.delete()
del_jobid(job.id)
def write_template(template_dict, temp_file):
"""
Write a CrackQ json state file
This could be a job template or a current
job state file.
Arguments
---------
template_dict: dict
JSON job details in dict format
temp_file: Path object
File path location to store the file
Returns
"""
logger.debug('Writing template/status file')
try:
with open(temp_file, 'x') as fh_temp:
fh_temp.write(json.dumps(template_dict))
return True
except FileExistsError as err:
logger.debug('Status/Template file already exists {}'.format(err))
return False
@login_manager.user_loader
def load_user(user_id):
"""user_loader function requried as part of Flask login-manager"""
return User.query.filter_by(username=user_id).first()
class Sso(MethodView):
"""
SAML2 Single Sign On Class
Login class handles saml | |
info_poi = info_poi[:int(num_poi)]
return jsonify(result = info_poi, status = query_success, timestamp=time.time(), log_time=task_id)
#-----------------------------------------------------------------
#Get points that are a certain travel time away from a point selected by ID, can filter by category and/or concelho and choose a
#profile (driving (default option) or walking)
def get_poi_trip_time_b(poi_id, time2, task_id, category=None, concelho=None, profile=None, num_poi=None, order=None):
#Query to get the distances of trips that start from the request point
query_ini = 'SELECT pois.id, pois.poi_name, pois.poi_lat, pois.poi_lon, category.categ_name_pt, concelhos.conc_name, pois.poi_descri_pt_short, pois.poi_source'
query_from = ' FROM pois, category, concelhos, pois_distances'
query_where = ' WHERE pois.category_id = category.categ_id AND pois.concelho_id = concelhos.conc_id AND pois.id = pois_distances.end_poi_id AND pois_distances.start_poi_id = ' +str(poi_id)
#set the order of the query results, based on the user input
if order == 'score':
query_orderby = ' ORDER BY pois.poi_score DESC'
elif order == 'time':
query_orderby = ' ORDER BY pois_distances.trip_duration'
elif order == 'dist':
query_orderby = ' ORDER BY pois_distances.trip_distance'
else:
query_orderby = ''
#checks existance of category and concelho/municipality parameters in order to filter based on them
if category:
query_where = query_where + ' AND pois.category_id = '+str(category)
if concelho:
query_where = query_where + ' AND pois.concelho_id = '+str(concelho)
#if check for the profile the user wants and add conditions to the query, defaults to "driving"
#also add the condition to query the database about the trip duration, based on profile
if profile == 'walking':
query_ini = query_ini + ', pois_distances.trip_duration_walk AS trip_duration, pois_distances.trip_distance_walk AS trip_distance'
query_where = query_where + ' AND pois_distances.trip_duration_walk <= ' +str(time2)
if order and order != 'score': query_orderby = query_orderby + '_walk'
else:
query_ini = query_ini + ', pois_distances.trip_duration, pois_distances.trip_distance'
query_where = query_where + ' AND pois_distances.trip_duration <= ' +str(time2)
if order and order != 'score': query_orderby = query_orderby + ' ASC'
#set the limit of results from the query, based on user inputs
if num_poi: query_limit = ' LIMIT '+str(num_poi)
else: query_limit = ''
query = query_ini+query_from+query_where+query_orderby+query_limit
result = db.engine.execute(query).fetchall()
if not result:
raise InvalidUsage(nothing_error, status_code=404, task_id=task_id)
column = ["id", "poi_name", "poi_lat", "poi_lon", "poi_categ", "poi_conc", "poi_descript", "trip_duration","trip_distance","images", "source"]
info_poi =[]
for row in result:
query_image = 'SELECT original_img FROM images WHERE poi_id = ' +str(row['id'])
result_image = list(db.engine.execute(query_image).fetchall())
images = []
if result_image:
for i in result_image:
images.append( request.host_url + 'static/uploads/512px_'+ str(i[0]))
else:
images = ""
info_poi.append(dict(zip(column, [row['id'], row['poi_name'],
row['poi_lat'], row['poi_lon'],
row['categ_name_pt'],
row['conc_name'],
row['poi_descri_pt_short'],
row['trip_duration'],
row['trip_distance'],
images,
row['poi_source']
])))
return jsonify(result = info_poi, status = query_success, timestamp=time.time(), log_time=task_id)
#-----------------------------------------------------------------
#Get points that are a certain travel time away from a set of coordinates, can filter by category and/or concelho and choose a
#profile (driving (default option) or walking)
def get_poi_trip_time2_b(lat, lon, time2, task_id, num_poi=None, order=None, category=None, concelho=None, profile=None):
# load shapefile containing Portugal's shape
poly = gpd.read_file('static/shapefiles/portugal.shp')
# construct point based on lon/lat returned by geocoder
point = Point(lon, lat)
# check each polygon to see if it contains the point
if poly.contains(point).bool():
query_ini = 'SELECT pois.id, pois.poi_name, pois.poi_lat, pois.poi_lon, category.categ_name_pt, concelhos.conc_name, pois.poi_descri_pt_short, pois.poi_source FROM pois, category, concelhos'
query_where = ' WHERE pois.category_id = category.categ_id AND pois.concelho_id = concelhos.conc_id'
if category:
query_where = query_where + ' AND pois.category_id = '+str(category)
if concelho:
query_where = query_where + ' AND pois.concelho_id = '+str(concelho)
if order == 'score': query_orderby = ' ORDER BY pois.poi_score DESC'
else: query_orderby = ''
query = query_ini+query_where+query_orderby
result = db.engine.execute(query).fetchall()
if not result:
raise InvalidUsage(nothing_error, status_code=404, task_id=task_id)
column = ["id", "poi_name", "poi_lat", "poi_lon", "poi_categ", "poi_conc", "poi_descript", "trip_duration","images", "source"]
info_poi=[]
#iterates over the results of the query and depending on profile calculates the travel distance and time between the two
if profile == "walking":
durations = get_trip_distance_duration_table_walk([(lon, lat)],[(float(row['poi_lon']),float(row['poi_lat'])) for row in result])
else:
duration = get_trip_distance_duration_table([(lon, lat)], [(float(row['poi_lon']),float(row['poi_lat'])) for row in result])
for k,row in enumerate(result):
#'if' check that validates whether the duration of the trip is below the requested one and if so adds that point's information to the result list
if duration[k] >= time2: continue
else:
query_image = 'SELECT original_img FROM images WHERE poi_id = ' +str(row['id'])
result_image = list(db.engine.execute(query_image).fetchall())
images = []
if result_image:
for i in result_image:
images.append( request.host_url + 'static/uploads/512px_'+ str(i[0]))
else:
images = ""
info_poi.append(dict(zip(column, [row['id'], row['poi_name'],
row['poi_lat'], row['poi_lon'],
row['categ_name_pt'],
row['conc_name'],
row['poi_descri_pt_short'],
duration[k],
images,
row['poi_source']
])))
if not info_poi:
raise InvalidUsage(nothing_error, status_code=404, task_id=task_id)
#sort the result list by time
if order == "time":
info_poi = sorted(info_poi, key=lambda k: k['trip_duration'])
#select num_poi results from the result list
if num_poi:
info_poi = info_poi[:int(num_poi)]
return jsonify(result = info_poi, status = query_success, timestamp=time.time(), log_time=task_id)
raise InvalidUsage("Coordenates are outside the country's borders, please try another pair", status_code=400, task_id=task_id)
#-----------------------------------------------------------------
#Function that calculates the best route when starting from a point defined by ID based on score and time spent on the point, number of days and max time for the trip. Also able to filter based on category and/or concelho
def route_calculator_id(m, poi_id, start_time, duration, task_id): #m = number of days, Tmax = max time
#Initialize all the data types that will be filled later
O=[] #list that stores POI opening hours
C=[] #list that stores POI closing hours
T=[] #list that stores POI average visit duration time
Score=[] #list that stores POI scores
D=[] #Distance matrix
Tmax = start_time+duration
#query to get data relevant to the first point (the point the user requested the trip start from)
try:
poi2 = POIS.query.get_or_404(poi_id)
except:
raise InvalidUsage(id_missing, status_code=404, task_id=task_id)
if poi2:
O.append(start_time)
C.append(Tmax)
T.append(0)
Score.append(0)
poi_list = db.engine.execute('SELECT id, poi_score FROM pois WHERE poi_review=1 AND category_id IN (1, 2, 3, 6, 10) AND id !='+ str(poi2.id)+' ORDER BY id ASC')
ids_tuple = []
dic_score = {}
for row in poi_list:
dic_score[row[0]] = row[1] #tuple that stores POI scores
ids_tuple.append(row[0]) #tuple that stores POI IDs
ids_tuple = tuple(ids_tuple)
res2 = db.engine.execute('SELECT poi_id,poi_open_h, poi_close_h, poi_vdura FROM pois_schedule WHERE poi_id IN'+str(ids_tuple)+' GROUP BY poi_id ORDER BY poi_id').fetchall()
for row2 in res2:
O.append(row2[1])
C.append(row2[2])
T.append(row2[3])
if dic_score[row2[0]] == 0:
Score.append(1)
else:
Score.append(dic_score[row2[0]])
size=len(T)
ids_tuple = (poi2.id,) + ids_tuple
#queries the distance between the points in order to build the distance matrix
pois2=[]
for row2 in db.engine.execute('SELECT trip_duration FROM pois_distances WHERE Start_POI_id IN'+str(ids_tuple)+' AND End_POI_id IN'+str(ids_tuple)+''):
pois2.append(row2[0])
D = [pois2[i:i+size] for i in range(0,len(pois2), size)]
for i in range(0,m):
O.append(O[0]) #adds the arrival point
C.append(C[0]) #adds the arrival point
T.append(T[0]) #adds the arrival point
Score.append(0) #adds the arrival point
#Creates matrix for each day
for i in range(0,size):
for j in range(0,m):
D[i].append(D[0][i])
for i in range(0,m):
D.append(D[0][:])
#executes the function defined above to calculate the best round based on score and time spent
besttour,bestfound=ILS.ILS_heuristic(m,Tmax,T,Score,O,C,D)
ids_tuple = ids_tuple + (poi2.id,)
day_list = {};
#iterates over the results of the previous function in order to build a result list with the data for all the points in the route
for d,tour in enumerate(besttour):
info_poi =[];
newlist = []
for item in tour:
newlist.append(ids_tuple[item])
poi = POIS.query.get_or_404(ids_tuple[item])
column = ["id", "poi_name", "poi_lat", "poi_lon", "poi_categ", "poi_conc", "poi_descript", "images", "source"]
category = Category.query.get(int(poi.category_id))
if category:
categoryname = category.categ_name_pt
else:
categoryname = ""
if poi.concelho_id:
concelho = Concelho.query.get(int(poi.concelho_id))
if concelho:
concelhoname = concelho.conc_name
else:
concelhoname = ""
else:
concelhoname = ""
images_db = db.session.query(Images.original_img).filter(Images.poi_id == poi.id).all()
images = []
if images_db:
for item2 in images_db:
images.append( request.host_url + 'static/uploads/512px_'+ item2.original_img)
else:
images = ""
info_poi.append(dict(zip(column, [poi.id, poi.poi_name,
poi.poi_lat, poi.poi_lon,
categoryname,
concelhoname,
poi.poi_descri_pt_short,
images,
poi.poi_source])))
day_list["day_"+str(d+1)] = info_poi
return jsonify(result = day_list, status = query_success, timestamp=time.time(), log_time=task_id,best=besttour[0],best2=newlist)
#-----------------------------------------------------------------
#Function that calculates the best route when starting from a point defined by ID based on score and time spent on the point, number of days and max time for the trip. Also able to filter based on category and/or concelho
def route_calculator_id2(m, poi_id, start_time, duration, task_id, category=None, concelho=None): #m = number of days, Tmax = max time
#Initialize all the data types that will be | |
"""
Core functions of the forward models.
.. seealso::
:mod:`arim.models`
:mod:`arim.scat`
:mod:`arim.ut`
"""
# This module is imported on demand. It should be imported only for modelling.
# Function that are not modelling-specific should go to arim.ut, which is always imported.
import warnings
import abc
import logging
from collections import namedtuple
import math
import numpy as np
import numba
from numpy.core.umath import sin, cos
from . import core as c, _scat, helpers, signal
logger = logging.getLogger(__name__)
def make_toneburst(
num_cycles, centre_freq, dt, num_samples=None, wrap=False, analytical=False
):
"""
Returns a toneburst defined by centre frequency and a number of cycles.
The signal is windowed by a Hann window (strictly zero outside the window). The
toneburst is always symmetrical and its maximum is 1.0.
With ``wrap=False``, the result is made up of (in this order) the toneburst then zeros
(controlled by ``num_samples``).
With ``wrap=True``, the result is made up of (in this order) the second half of the toneburst,
then zeros, then the first half of the toneburst.
Parameters
----------
num_cycles : int
Number of cycles of the toneburst.
centre_freq : float
Centre frequency
dt : float
Time step
num_samples : int or None
Number of time points. If None, returns a time vector that contains
exactly the the toneburst. If larger, pads with zeros.
wrap : bool, optional
If False, the signal starts at n=0. If True, the signal is wrapped around such
as its maximum is at n=0. The beginning of the signal is at the end of the vector.
Default: False.
analytical : bool, optional
If True, returns the corresponding analytical signal (cos(...) + i sin(...)).
Default: False.
Returns
-------
toneburst : ndarray
Array of length ``num_samples``
See Also
--------
:func:`make_toneburst2`
"""
if dt <= 0.0:
raise ValueError("negative time step")
if centre_freq <= 0.0:
raise ValueError("negative centre frequency")
if num_cycles <= 0:
raise ValueError("negative number of cycles")
if num_samples is not None and num_samples <= 0:
raise ValueError("negative number of time samples")
len_pulse = int(np.ceil(num_cycles / centre_freq / dt))
# force an odd length for pulse symmetry
if len_pulse % 2 == 0:
len_pulse += 1
half_len_window = len_pulse // 2
if num_samples is None:
num_samples = len_pulse
if len_pulse > num_samples:
raise ValueError("time vector is too short for this pulse")
t = np.arange(len_pulse)
if analytical:
sig = np.exp(2j * np.pi * dt * centre_freq * (t - half_len_window))
else:
sig = cos(2 * np.pi * dt * centre_freq * (t - half_len_window))
window = np.hanning(len_pulse)
toneburst = sig * window
full_toneburst = np.zeros(num_samples, toneburst.dtype)
full_toneburst[:len_pulse] = toneburst
if wrap:
full_toneburst = _rotate_array(full_toneburst, half_len_window)
return full_toneburst
def _rotate_array(arr, n):
"""
>>> _rotate_array([1, 2, 3, 4, 5, 6, 7], 2)
array([3, 4, 5, 6, 7, 1, 2])
>>> _rotate_array([1, 2, 3, 4, 5, 6, 7], -2)
array([6, 7, 1, 2, 3, 4, 5])
"""
return np.concatenate([arr[n:], arr[:n]])
def make_toneburst2(
num_cycles,
centre_freq,
dt,
num_before=2,
num_after=1,
analytical=False,
use_fast_len=True,
):
"""
Returns a toneburst defined by centre frequency and a number of cycles.
The result array is made up of (in this order) zeros (number controlled by ``num_before``),
then the toneburst, then zeros (number controlled by ``num_after``).
Parameters
----------
num_cycles : int
Number of cycles of the toneburst.
centre_freq : float
Centre frequency
dt : float
Time step
num_before : int, optional
Amount of zeros before the toneburst (in toneburst length).
num_after : int, optional
Amount of zeros after the toneburst (in toneburst length).
analytical : bool, optional
use_fast_len : bool, optional
Use a FFT-friendly length (the default is True).
Returns
-------
toneburst_time : arim.core.Time
toneburst : ndarray
t0_idx : int
Index of the time sample ``t=0``.
See Also
--------
:func:`make_toneburst`
"""
signal = make_toneburst(
num_cycles, centre_freq, dt, num_samples=None, wrap=False, analytical=analytical
)
n = len(signal)
m = num_before * n
p = num_after * n
toneburst_len = m + n + p
if use_fast_len:
import scipy.fftpack
toneburst_len = scipy.fftpack.next_fast_len(toneburst_len)
toneburst = np.zeros(toneburst_len, dtype=signal.dtype)
toneburst[m : m + n] = signal
t0_idx = m + n // 2
toneburst_time = c.Time(-t0_idx * dt, dt, len(toneburst))
return toneburst_time, toneburst, t0_idx
def directivity_2d_rectangular_in_fluid(theta, element_width, wavelength):
"""
Returns the directivity of an element based on the integration of uniformally radiating sources
along a straight line in 2D.
A element is modelled as 'rectangle' of finite width and infinite length out-of-plane.
This directivity is based only on the element width: each source is assumed to radiate
uniformally.
Considering a points1 in the axis Ox in the cartesian basis (O, x, y, z),
``theta`` is the inclination angle, ie. the angle in the plane Oxz. Cf. Wooh's paper.
The directivity is normalised by the its maximum value, obtained for
theta=0°.
Returns:
sinc(pi*a*sin(theta)/lambda)
where: sinc(x) = sin(x)/x
Parameters
----------
theta : ndarray
Angles in radians.
element_width : float
In meter.
wavelength : float
In meter.
Returns
-------
directivity : ndarray
Signed directivity for each angle.
References
----------
Wooh, Shi-Chang, and <NAME>. 1999. ‘Three-Dimensional Beam Directivity of Phase-Steered Ultrasound’.
The Journal of the Acoustical Society of America 105 (6): 3275–82. doi:10.1121/1.424655.
See Also
--------
:func:`transmission_2d_rectangular_in_fluid`
"""
if element_width < 0:
raise ValueError("Negative width")
if wavelength < 0:
raise ValueError("Negative wavelength")
# /!\ numpy.sinc defines sinc(x) := sin(pi * x)/(pi * x)
x = (element_width / wavelength) * np.sin(theta)
return np.sinc(x)
def directivity_2d_rectangular_in_fluid_for_path(
ray_geometry, element_width, wavelength
):
"""
Wrapper for :func:`directivity_2d_rectangular_in_fluid` that uses a
:class:`RayGeometry` object.
Parameters
----------
ray_geometry : arim.ray.RayGeometry
element_width : float
wavelength : float
Returns
-------
directivity : ndarray
Signed directivity for each angle.
"""
return directivity_2d_rectangular_in_fluid(
ray_geometry.conventional_out_angle(0), element_width, wavelength
)
def _f0(x, k2):
# Miller and Pursey 1954 eq (74)
x2 = x * x
# Warning: sqrt(a) * sqrt(b) != sqrt(a * b) because of negative values
return (2 * x2 - k2) ** 2 - 4 * x2 * np.sqrt((x2 - 1)) * np.sqrt((x2 - k2))
def directivity_2d_rectangular_on_solid_l(
theta, element_width, wavelength_l, wavelength_t
):
"""
L-wave directivity of rectangular element on solid
The element is modelled by an infinitely long strip of finite width
vibrating in a direction normal to the surface of the solid medium.
Parameters
----------
theta : ndarray
Angles in radians.
element_width : float
wavelength_l : float
wavelength_t : float
Returns
-------
directivity_l : ndarray
Complex
Notes
-----
Equations MP (93) and DW (2), (3), (6)
The sinc results of the integration of MP (90) with far field
approximation.
Normalisation coefficients are ignored, but the values are consistent with
:func:`directivity_2d_rectangular_on_solid_t`.
References
----------
<NAME>., and <NAME>. 1954. ‘The Field and Radiation Impedance of
Mechanical Radiators on the Free Surface of a Semi-Infinite Isotropic
Solid’. Proceedings of the Royal Society of London A: Mathematical,
Physical and Engineering Sciences 223 (1155): 521–41.
https://doi.org/10.1098/rspa.1954.0134.
Drinkwater, <NAME>., and <NAME>. 2006. ‘Ultrasonic Arrays for
Non-Destructive Evaluation: A Review’. NDT & E International 39 (7):
525–41. https://doi.org/10.1016/j.ndteint.2006.03.006.
See Also
--------
:func:`directivity_2d_rectangular_on_solid_t`
"""
k = wavelength_l / wavelength_t
k2 = k * k
theta = np.asarray(theta).astype(np.complex_)
S = sin(theta)
C = cos(theta)
return (
((k2 - 2 * S ** 2) * C)
/ _f0(S, k2)
* np.sinc((element_width / wavelength_l) * S)
)
def directivity_2d_rectangular_on_solid_t(
theta, element_width, wavelength_l, wavelength_t
):
"""
T-wave directivity of rectangular element on solid
See :func:`directivity_2d_rectangular_on_solid_l` for further information.
Parameters
----------
theta : ndarray
Angles in radians.
element_width : float
wavelength_l : float
wavelength_t : float
Returns
-------
directivity_t : ndarray
Complex
Notes
-----
Equations MP (94) and DW (2), (4), (6)
See Also
--------
:func:`directivity_2d_rectangular_on_solid_t`
"""
k = wavelength_l / wavelength_t
k2 = k * k
theta = np.asarray(theta).astype(np.complex_)
S = sin(theta)
C = cos(theta)
return (
k ** 2.5
* (np.sqrt(k2 * S * S - 1) * sin(2 * theta))
/ _f0(k * S, k2)
* np.sinc((element_width / wavelength_t) * S)
)
def snell_angles(incidents_angles, c_incident, c_refracted):
"""
Returns the angles of the refracted rays according to Snell–Descartes law:
c1/c2 = sin(alpha1)/sin(alpha2)
In case of total internal reflection (incident angles above the critical angles), the output depends
on the datatype of the incident angle.
If the incident angle is real, the refracted angle is | |
axis=[1,2,3], keepdims=True)
lname = self._write_caffe(name)
return res , lname
class activation(KLayer):
"""
Basic activation layer
"""
def __init__(self, param, **kwargs):
"""
Possible values:
- model3.PARAM_RELU
- model3.PARAM_LRELU
- model3.PARAM_ELU
- model3.PARAM_TANH
- model3.PARAM_MFM
- model3.PARAM_MFM_FC
- model3.PARAM_SIGMOID
- model3.PARAM_SWISH
"""
super(activation, self).__init__()
self.param = param
self.kwargs = kwargs
def _write_caffe(self, btm):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'actv%d_%d'%(layer_counter, self.param)
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
if self.param == 0:
caffe_string += ' type: "ReLU"\n'
elif self.param == 1:
caffe_string += ' type: "PReLU"\n'
params_dict[layer_name] = {}
params_dict[layer_name]['gamma'] = 0.2
elif self.param == 6:
caffe_string += ' type: "Sigmoid"\n'
caffe_string += ' bottom: "%s"\n'%btm()
caffe_string += ' top: "%s"\n'%btm()
caffe_string += '}\n'
layer_counter += 1
return btm
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.param == 0:
res = tf.nn.relu(x)
elif self.param == 1:
if 'leaky' in self.kwargs:
leaky = self.kwargs['leaky']
else:
leaky = 0.2
res = tf.maximum(x,x*leaky)
elif self.param == 2:
res = tf.nn.elu(x)
elif self.param == 3:
res = tf.tanh(x)
elif self.param == 4:
shape = x.get_shape().as_list()
res = tf.reshape(x,[-1,shape[1],shape[2],2,shape[-1]//2]) # potential bug in conv_net
res = tf.reduce_max(res,axis=[3])
elif self.param == 5:
shape = x.get_shape().as_list()
res = tf.reduce_max(tf.reshape(x,[-1,2,shape[-1]//2]),axis=[1])
elif self.param == 6:
res = tf.sigmoid(x)
elif self.param == 7:
# res = tf.nn.swish(x)
# res = tf.sigmoid(x) * x
res = swish(x)
else:
res = x
lname = self._write_caffe(name)
return res, lname
class fcLayer(KLayer):
"""
Basic fully connected layer
"""
def __init__(self, outsize, usebias=True, values=None, norm=False, map_shape=None):
"""
:type outsize: int
:param outsize: Number of output channels
:type usebias: bool
:param usebias: Whether to add bias term in this layer.
:type values: list[np.array]
:param values: If the param 'values' is set, the layer will be initialized with the list of numpy array.
:type norm: bool (default=False)
:param norm: Whether to normalize the kernel (along axis 0) before matrix multiplication
:type map_shape: list (default=None)
:param map_shape: If shape is set, weight will be re-shaped to fit NCHW format
"""
super(fcLayer, self).__init__()
self.outsize = outsize
self.usebias = usebias
self.values = values
self.norm = norm
self.map_shape = map_shape
def _parse_args(self, input_shape):
# set size
insize = input_shape[0][-1]
self.size = [insize, self.outsize]
def build(self, input_shape):
values = self.values
self._parse_args(input_shape)
if self.values is not None:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.constant(values[0]))
else:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.GlorotUniform())
if self.usebias:
if self.values is not None:
self.bias = self.add_variable('bias', shape=[self.outsize], initializer=tf.initializers.constant(values[1]))
else:
self.bias = self.add_variable('bias', shape=[self.outsize], initializer=tf.initializers.constant(0.0))
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'fc%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "InnerProduct"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' inner_product_param{\n'
caffe_string += ' num_output: %d\n'%self.outsize
caffe_string += ' bias_term: %s\n'%('true' if self.usebias else 'false')
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
if self.map_shape is None:
params_dict[layer_name]['fckernel'] = self.kernel.numpy()
else:
transpose_w = self.kernel.numpy()
transpose_w = np.reshape(transpose_w, [self.map_shape[0], self.map_shape[1], self.map_shape[2], self.outsize])
transpose_w = np.transpose(transpose_w, [2,1,0,3])
transpose_w = np.reshape(transpose_w, [-1, self.outsize])
params_dict[layer_name]['fckernel'] = transpose_w
if self.usebias:
params_dict[layer_name]['bias'] = self.bias.numpy()
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.norm:
k = tf.nn.l2_normalize(self.kernel, axis=0)
else:
k = self.kernel
res = tf.matmul(x, k)
if self.usebias:
res = tf.nn.bias_add(res, self.bias)
lname = self._write_caffe(name)
return res, lname
class batch_norm(KLayer):
"""
Basic batch normalization layer
"""
def __init__(self, decay=1e-2, epsilon=1e-5, is_training=None, values=None):
"""
:type decay: float
:param decay: Decay rate.
:type epsilon: float
:param epsilon: Epsilon value to avoid 0 division.
:type is_training: bool
:param is_training: Define whether this layer is in training mode
:type values: list[np.array]
:param values: If the param 'values' is set, the layer will be initialized with the list of numpy array.
"""
super(batch_norm, self).__init__()
self.decay = decay
self.epsilon = epsilon
self.is_training = is_training
self.values = values
def build(self, input_shape):
values = self.values
shape = input_shape[-1]
if self.values is None:
self.moving_average = self.add_variable('moving_average',[shape],initializer=tf.initializers.constant(0.0),trainable=False)
self.variance = self.add_variable('variance',[shape],initializer=tf.initializers.constant(1.0),trainable=False)
self.gamma = self.add_variable('gamma',[shape],initializer=tf.initializers.constant(1.0),trainable=True)
self.beta = self.add_variable('beta',[shape],initializer=tf.initializers.constant(0.0),trainable=True)
else:
self.moving_average = self.add_variable('moving_average',[shape],initializer=tf.initializers.constant(self.values[0]),trainable=False)
self.variance = self.add_variable('variance',[shape],initializer=tf.initializers.constant(values[1]),trainable=False)
self.gamma = self.add_variable('gamma',[shape],initializer=tf.initializers.constant(values[2]),trainable=True)
self.beta = self.add_variable('beta',[shape],initializer=tf.initializers.constant(values[3]),trainable=True)
def update(self,variable,value):
delta = (variable - value) * self.decay
variable.assign_sub(delta)
def _write_caffe(self, btm):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'bn%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "BatchNorm"\n'
caffe_string += ' bottom: "%s"\n'%btm()
caffe_string += ' top: "%s"\n'%btm()
caffe_string += ' batch_norm_param{\n use_global_stats:true\n eps:1e-5\n }\n'
caffe_string += '}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['mean'] = self.moving_average.numpy()
params_dict[layer_name]['var'] = self.variance.numpy()
params_dict[layer_name]['scale'] = 1.
layer_name = 'scale%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Scale"\n'
caffe_string += ' bottom: "%s"\n'%btm()
caffe_string += ' top: "%s"\n'%btm()
caffe_string += ' scale_param{\n bias_term:true\n }\n'
caffe_string += '}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['scale'] = self.gamma.numpy()
params_dict[layer_name]['bias'] = self.beta.numpy()
return btm
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.is_training is None:
is_training = bool(tf.keras.backend.learning_phase())
else:
is_training = self.is_training
# is_training = True
# print(is_training, time.time())
inp_shape = x.get_shape().as_list()
inp_dim_num = len(inp_shape)
if inp_dim_num==3:
x = tf.expand_dims(x, axis=1)
elif inp_dim_num==2:
x = tf.expand_dims(x, axis=1)
x = tf.expand_dims(x, axis=1)
elif inp_dim_num==5:
x = tf.reshape(x, [inp_shape[0], inp_shape[1], inp_shape[2]*inp_shape[3], inp_shape[4]])
if is_training:
res, mean, var = tf.compat.v1.nn.fused_batch_norm(x, self.gamma, self.beta, None, None, self.epsilon, is_training=is_training)
self.update(self.moving_average, mean)
self.update(self.variance, var)
else:
res, mean, var = tf.compat.v1.nn.fused_batch_norm(x, self.gamma, self.beta, self.moving_average, self.variance, self.epsilon, is_training=is_training)
if inp_dim_num==3:
res = tf.squeeze(res , axis=1)
elif inp_dim_num==2:
res = tf.squeeze(res, axis=[1,2])
elif inp_dim_num==5:
res = tf.reshape(res, inp_shape)
lname = self._write_caffe(name)
return res, lname
class flatten(KLayer):
"""
Basic flatten layer
"""
def __init__(self):
super(flatten, self).__init__()
def build(self, input_shape):
self.shape = input_shape
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'flatten%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Flatten"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' crop_param{\n offset:%d\n offset:%d\n }\n}\n'%(1,1)
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
self.shape = x.get_shape().as_list()
num = 1
for k in self.shape[1:]:
num *= k
res = tf.reshape(x, [-1, num])
lname = self._write_caffe(name)
return res , lname
class NNUpSample2D(KLayer):
"""docstring for NNUpSample"""
def __init__(self, factor):
super(NNUpSample2D, self).__init__()
self.factor = factor
def _get_weights(self):
w = np.zeros([self.factor, self.factor, self.chn, self.chn])
w = np.float32(w)
for i in range(self.chn):
w[:,:,i,i] = 1
return w
def build(self, input_shape):
self.chn = input_shape[0][-1]
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'nnup%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Deconvolution"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%self.chn
caffe_string += ' bias_term: %s\n'%('false')
caffe_string += ' stride: %d\n'%self.factor
caffe_string += ' kernel_h: %d\n'%(self.factor)
caffe_string += ' kernel_w: %d\n'%(self.factor)
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['kernel'] = self._get_weights()
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
name = x[1]
x = x[0]
shape = x.get_shape().as_list()
w = self._get_weights()
outshape = [shape[0], shape[1]*self.factor, shape[2]*self.factor, self.chn]
stride = [1, self.factor, self.factor, 1]
x = tf.nn.conv2d_transpose(x, w, outshape, stride)
lname = self._write_caffe(name)
return x, lname
class BroadcastMUL(KLayer):
def __init__(self):
super(BroadcastMUL, self).__init__()
def _write_caffe(self, names, tiles, outchn):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
# manual tiling layers to match the size
# layer_name = 'tile_0_%d'%layer_counter
# caffe_string += 'layer{\n'
# caffe_string += ' name: "%s"\n'%layer_name
# caffe_string += ' type: "Tile"\n'
# caffe_string += ' bottom:"%s"\n'%names[0]()
# caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' tile_param{\n axis:2\n tiles:%d\n }\n'%tiles
# caffe_string += '}\n'
# layer_name = 'tile_1_%d'%layer_counter
# caffe_string += 'layer{\n'
# caffe_string += ' name: "%s"\n'%layer_name
# caffe_string += ' type: "Tile"\n'
# caffe_string += ' bottom:"tile_0_%d"\n'%layer_counter
# caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' tile_param{\n axis:3\n tiles:%d\n }\n'%tiles
# caffe_string += '}\n'
layer_name = 'tile_0_%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Deconvolution"\n'
caffe_string += ' bottom: "%s"\n'%names[0]()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%outchn
caffe_string += ' bias_term: %s\n'%('false')
caffe_string += ' group: %d\n'%outchn
caffe_string += ' stride: 1\n'
caffe_string += ' pad_h: 0\n'
caffe_string += ' pad_w: 0\n'
caffe_string += ' kernel_h: %d\n'%tiles
caffe_string += ' kernel_w: %d\n'%tiles
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['dwkernel'] = np.ones([tiles, tiles, outchn, 1]).astype(np.float32)
# do multiplication
layer_name = 'mul%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Eltwise"\n'
caffe_string += ' bottom:"tile_0_%d"\n'%layer_counter
caffe_string += ' bottom:"%s"\n'%names[1]()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' eltwise_param{\n operation:PROD\n }\n'
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
names = [i[1] for i in x]
xs = [i[0] for i in x]
out = xs[0]*xs[1]
lname = self._write_caffe(names, xs[1].shape[1], xs[1].shape[-1])
return out, lname
class SUM(KLayer):
def __init__(self):
super(SUM, self).__init__()
def _write_caffe(self, names):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'add%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Eltwise"\n'
for n in names:
caffe_string += ' bottom:"%s"\n'%n()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' eltwise_param{\n operation:SUM\n }\n'
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
names = [i[1] for i | |
# -*- coding: utf-8 -*-
from sst_unittest import *
from sst_unittest_support import *
import os
import shutil
import fnmatch
import csv
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
pin_exec_path = ""
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
try:
# Put your single instance Init Code Here
class_inst._setup_sieve_test_files()
except:
pass
module_init = 1
module_sema.release()
###
def is_PIN_loaded():
# Look to see if PIN is available
pindir_found = False
pin_path = os.environ.get('INTEL_PIN_DIRECTORY')
if pin_path is not None:
pindir_found = os.path.isdir(pin_path)
log_debug("memHSieve Test - Intel_PIN_Path = {0}; Valid Dir = {1}".format(pin_path, pindir_found))
return pindir_found
def is_PIN_Compiled():
global pin_exec_path
pin_crt = sst_elements_config_include_file_get_value_int("HAVE_PINCRT", 0, True)
pin_exec = sst_elements_config_include_file_get_value_str("PINTOOL_EXECUTABLE", "", True)
log_debug("memHSieve Test - Detected PIN_CRT = {0}".format(pin_crt))
log_debug("memHSieve Test - Detected PIN_EXEC = {0}".format(pin_exec))
pin_exec_path = pin_exec
return pin_exec != ""
def is_Pin2_used():
global pin_exec_path
if is_PIN_Compiled():
if "/pin.sh" in pin_exec_path:
return True
else:
return False
else:
return False
def is_Pin3_used():
global pin_exec_path
if is_PIN_Compiled():
if is_Pin2_used():
return False
else:
# Make sure pin is at the end of the string
pinstr = "/pin"
idx = pin_exec_path.rfind(pinstr)
if idx == -1:
return False
else:
return (idx+len(pinstr)) == len(pin_exec_path)
else:
return False
################################################################################
################################################################################
################################################################################
class testcase_memHierarchy_memHSieve(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
pin_compiled = is_PIN_Compiled()
pin_version_valid = is_Pin2_used() | is_Pin3_used()
pin_loaded = is_PIN_loaded()
@unittest.skipIf(not pin_compiled, "memHSieve: Requires PIN, but PinTool is not compiled with Elements. In sst_element_config.h PINTOOL_EXECUTABLE={0}".format(pin_exec_path))
@unittest.skipIf(not pin_version_valid, "memHSieve: Requires PIN, but PinTool does not seem to be a valid version. PINTOOL_EXECUTABLE={0}".format(pin_exec_path))
@unittest.skipIf(not pin_loaded, "memHSieve: Requires PIN, but Env Var 'INTEL_PIN_DIR' is not found or path does not exist.")
def test_memHSieve(self):
self.memHSieve_Template("memHSieve")
#####
def memHSieve_Template(self, testcase):
pin2defined = is_Pin2_used()
pin3defined = is_Pin3_used()
# Get the path to the test files
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
MemHElementDir = os.path.abspath("{0}/../".format(test_path))
MemHElementSieveTestsDir = "{0}/Sieve/tests".format(self.MemHElementDir)
testMemHSieveDir = "{0}/testmemhsieve".format(tmpdir)
# Set the various file paths
testDataFileName=("test_{0}".format(testcase))
sdlfile = "{0}/sieve-test.py".format(MemHElementSieveTestsDir, testDataFileName)
reffile = "{0}/refFiles/{1}.out".format(MemHElementSieveTestsDir, testDataFileName)
outfile = "{0}/{1}.out".format(outdir, testDataFileName)
errfile = "{0}/{1}.err".format(outdir, testDataFileName)
mpioutfiles = "{0}/{1}.testfile".format(outdir, testDataFileName)
grep_outfile = "{0}/{1}_grep_lines_23_43.out".format(tmpdir, testDataFileName)
grep_reffile = "{0}/{1}_grep_lines_23_43.ref".format(tmpdir, testDataFileName)
log_debug("testcase = {0}".format(testcase))
log_debug("sdl file = {0}".format(sdlfile))
log_debug("ref file = {0}".format(reffile))
log_debug("out file = {0}".format(outfile))
log_debug("err file = {0}".format(errfile))
# Run SST in the tests directory
self.run_sst(sdlfile, outfile, errfile, set_cwd=testMemHSieveDir, mpi_out_files=mpioutfiles)
# NOTE: THE PASS / FAIL EVALUATIONS ARE PORTED FROM THE SQE BAMBOO
# BASED testSuite_XXX.sh THESE SHOULD BE RE-EVALUATED BY THE
# DEVELOPER AGAINST THE LATEST VERSION OF SST TO SEE IF THE
# TESTS & RESULT FILES ARE STILL VALID
TestPassed = True
TestFailureMsg = ""
######
backtrace_good = False
if pin3defined:
# FOR PIN3
# Make sure ANY of the backtrace*.txt files exist
pattern = "backtrace_*.txt"
file_found = False
for filename in os.listdir(testMemHSieveDir):
if fnmatch.fnmatch(filename, pattern):
file_found = True
break
if file_found == True:
backtrace_good = True
else:
TestFailureMsg += "Did not find any {0} files in directory {1}; ".format(pattern, testMemHSieveDir)
else:
# FOR PIN2
# Make sure ANY of the backtrace*.txt.gz files exist
pattern = "backtrace_*.txt.gz"
file_found = False
for filename in os.listdir(testMemHSieveDir):
if fnmatch.fnmatch(filename, pattern):
file_found = True
break
if file_found == True:
backtrace_good = True
else:
TestFailureMsg += "Did not find any {0} files in directory {1}; ".format(pattern, testMemHSieveDir)
# Unzip all expanded backtrace*.txt.gz files
for filename in os.listdir(testMemHSieveDir):
if fnmatch.fnmatch(filename, pattern):
filepath = "{0}/{1}".format(testMemHSieveDir, filename)
cmd = 'gzip -d {0}'.format(filepath)
os.system(cmd)
# Make sure all the unzipped backtrace*.txt.gz files have data
pattern = "backtrace_*.txt"
file_found = False
for filename in os.listdir(testMemHSieveDir):
if fnmatch.fnmatch(filename, pattern):
file_found = True
filepath = "{0}/{1}".format(testMemHSieveDir, filename)
if os_test_file(filepath, expression='-s') == False:
backtrace_good = False
TestFailureMsg += "Unzipped Backtrace File {0} does not contain data; ".format(filename)
if file_found == False:
TestFailureMsg += "Did not find any unzipped {0} files in directory {1}; ".format(pattern, testMemHSieveDir)
backtrace_good = False
TestPassed &= backtrace_good
######
# Test if ANY mallocRank files to contain words
pattern = "mallocRank.txt*"
file_found = False
mallocrank_good = False
for filename in os.listdir(testMemHSieveDir):
if fnmatch.fnmatch(filename, pattern):
file_found = True
break
if file_found == True:
filepath = "{0}/{1}".format(testMemHSieveDir, filename)
file = open(filepath, "rt")
data = file.read()
words = data.split()
if len(words) > 0:
mallocrank_good = True
else:
TestFailureMsg += "File {0} does not contain any words; ".format(filepath)
else:
TestFailureMsg += "Did not find any {0} files in directory {1}; ".format(pattern, testMemHSieveDir)
TestPassed &= mallocrank_good
######
# Test Statistics
stats_good = True
pattern = "StatisticOutput*.csv"
stats_list = [" ReadHits", " ReadMisses", " WriteHits", " WriteMisses", \
" UnassociatedReadMisses", " UnassociatedWriteMisses"]
# Look at each of the statnames in the list and if the statname exists in
# at least one of files; also, if it exists is the data correct?
for statname in stats_list:
statnamefoundinfile = False
for filename in os.listdir(testMemHSieveDir):
if fnmatch.fnmatch(filename, pattern):
statfilepath = "{0}/{1}".format(testMemHSieveDir, filename)
# See if stat exists in the file
stat_check_result = self._sieve_check_stat_exists(statfilepath, statname)
if stat_check_result == True:
statnamefoundinfile = True
# Its found, now check its data fields
stat_check_result = self._sieve_check_stat_data(statfilepath, statname)
if stat_check_result == False:
stats_good = False
TestFailureMsg += "Statistic '{0}' in file {1} contains a 0 in one of last 3 fields; ".format(statname, statfilepath)
# Check to see if stat name found in file
if statnamefoundinfile == False:
stats_good = False
TestFailureMsg += "Statistic '{0}' Not found in any statistics file; ".format(statname, statfilepath)
TestPassed &= stats_good
######
# Test Reference File
# This will do a grep of both the output file and ref file looking at lines 23- 43
cmd = 'grep -w -e \"^.$\" -e \"^..$\" {0} > {1}'.format(outfile, grep_outfile)
os.system(cmd)
cmd = 'grep -w -e \"^.$\" -e \"^..$\" {0} > {1}'.format(reffile, grep_reffile)
os.system(cmd)
cmp_result = testing_compare_diff(testDataFileName, grep_outfile, grep_reffile)
if cmp_result == False:
TestFailureMsg += "Diffed grepped (lines 23-43) compared Output file {0} does not match Reference File {1}".format(grep_outfile, grep_reffile)
diffdata = testing_get_diff_data(testDataFileName)
log_failure(diffdata)
TestPassed &= cmp_result
######
# The final Test Assertion
self.assertTrue(TestPassed, TestFailureMsg)
#######################
def _setup_sieve_test_files(self):
# NOTE: This routine is called a single time at module startup, so it
# may have some redunant
log_debug("_setup_sieve_test_files() Running")
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
self.MemHElementDir = os.path.abspath("{0}/../".format(test_path))
self.MemHElementSieveTestsDir = "{0}/Sieve/tests".format(self.MemHElementDir)
self.testMemHSieveDir = "{0}/testmemhsieve".format(tmpdir)
# Create a clean version of the testCramSim Directory
if os.path.isdir(self.testMemHSieveDir):
shutil.rmtree(self.testMemHSieveDir, True)
os.makedirs(self.testMemHSieveDir)
# Copy the Makefile to the test directory
shutil.copy("{0}/Makefile".format(self.MemHElementSieveTestsDir), self.testMemHSieveDir)
# Create a simlink of the ompsievetest.c file
os_symlink_file(self.MemHElementSieveTestsDir, self.testMemHSieveDir, "ompsievetest.c")
# Now run the make on it
cmd = "make"
rtn = OSCommand(cmd, set_cwd=self.testMemHSieveDir).run()
log_debug("Make result = {0}; output =\n{1}".format(rtn.result(), rtn.output()))
self.assertTrue(rtn.result() == 0, "ompsievetest.c failed to compile")
###
def _sieve_check_stat_exists(self, statfile, stattocheck):
# Verify that a stat exists in the file
rtn_result = True
found_row = False
with open(statfile, 'rt') as csvfile:
statreader = csv.reader(csvfile, delimiter=',')
for row in statreader:
if stattocheck in row:
found_row = True
break
# Final eval to ensure we found the row
rtn_result &= found_row
return rtn_result
def _sieve_check_stat_data(self, statfile, stattocheck):
# Verify that the last 3 fields of the stat data is != 0
rtn_result = True
with open(statfile, 'rt') as csvfile:
statreader = csv.reader(csvfile, delimiter=',')
for row in statreader:
if stattocheck in row:
log_debug("*** Found Stat {0} in {1}; Last 3 Data Fields = {2}, {3}, {4}".format(stattocheck, statfile, int(row[-3]), int(row[-2]), int(row[-1])))
rtn_result &= int(row[-3]) != 0
rtn_result &= int(row[-2]) != 0
rtn_result &= int(row[-1]) != 0
break
return rtn_result
# NOTE: This is the bash code from the bamboo test system that possibly cleans up
# the old ompsievetest runs. We are not doing this unless necessary
# for the new frameworks
#Remove_old_ompsievetest_task() {
#memHS_PID=$$
# echo " Begin Remover -------------------"
#ps -ef | | |
<reponame>luxunxiansheng/DRLGP<gh_stars>0
# Lint as: python3
from __future__ import absolute_import, division, print_function
import collections
import enum
import math
import threading
import typing
from typing import Dict, List, Optional
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
"""Pseudocode description of the MuZero algorithm."""
# pylint: disable=unused-argument
# pylint: disable=missing-docstring
# pylint: disable=assignment-from-no-return
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
##########################
####### Helpers ##########
MAXIMUM_FLOAT_VALUE = float('inf')
KnownBounds = collections.namedtuple('KnownBounds', ['min', 'max'])
# noinspection PyArgumentList
Winner = enum.Enum("Winner", "black white draw")
# noinspection PyArgumentList
Player = enum.Enum("Player", "black white")
num_filters = 2
num_blocks = 8
class MinMaxStats(object):
"""A class that holds the min-max values of the tree."""
def __init__(self, known_bounds: Optional[KnownBounds]):
self.maximum = known_bounds.max if known_bounds else -MAXIMUM_FLOAT_VALUE
self.minimum = known_bounds.min if known_bounds else MAXIMUM_FLOAT_VALUE
def update(self, value: float):
self.maximum = max(self.maximum, value)
self.minimum = min(self.minimum, value)
def normalize(self, value: float) -> float:
if self.maximum > self.minimum:
# We normalize only when we have set the maximum and minimum values.
return (value - self.minimum) / (self.maximum - self.minimum)
return value
class MuZeroConfig(object):
def __init__(self,
action_space_size: int,
max_moves: int,
discount: float,
dirichlet_alpha: float,
num_simulations: int,
batch_size: int,
td_steps: int,
num_actors: int,
lr_init: float,
lr_decay_steps: float,
visit_softmax_temperature_fn,
known_bounds: Optional[KnownBounds] = None):
# Self-Play
self.action_space_size = action_space_size
self.num_actors = num_actors
self.visit_softmax_temperature_fn = visit_softmax_temperature_fn
self.max_moves = max_moves
self.num_simulations = num_simulations
self.discount = discount
# Root prior exploration noise.
self.root_dirichlet_alpha = dirichlet_alpha
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
# If we already have some information about which values occur in the
# environment, we can use them to initialize the rescaling.
# This is not strictly necessary, but establishes identical behaviour to
# AlphaZero in board games.
self.known_bounds = known_bounds
# Training
self.training_steps = int(1e6)
self.checkpoint_interval = int(100)
self.window_size = int(1e6)
self.batch_size = batch_size
self.num_unroll_steps = 4
self.td_steps = td_steps
self.weight_decay = 1e-4
self.momentum = 0.9
# Exponential learning rate schedule
self.lr_init = lr_init
self.lr_decay_rate = 0.1
self.lr_decay_steps = lr_decay_steps
def new_game(self):
return Game(self.action_space_size, self.discount)
def make_board_game_config(action_space_size: int, max_moves: int,
dirichlet_alpha: float,
lr_init: float) -> MuZeroConfig:
def visit_softmax_temperature(num_moves, training_steps):
if num_moves < 30:
return 1.0
else:
return 0.0 # Play according to the max.
return MuZeroConfig(
action_space_size=action_space_size,
max_moves=max_moves,
discount=1.0,
dirichlet_alpha=dirichlet_alpha,
num_simulations=10,
batch_size=64,
td_steps=max_moves, # Always use Monte Carlo return.
num_actors=1,
lr_init=lr_init,
lr_decay_steps=400e3,
visit_softmax_temperature_fn=visit_softmax_temperature,
known_bounds=KnownBounds(-1, 1))
def make_connect4_config() -> MuZeroConfig:
return make_board_game_config(
action_space_size=7, max_moves=20, dirichlet_alpha=0.03, lr_init=0.01)
class Action(object):
def __init__(self, index: int):
self.index = index
def __hash__(self):
return self.index
def __eq__(self, other):
return self.index == other
def __gt__(self, other):
return self.index > other
class Node(object):
def __init__(self, prior: float):
self.visit_count = 0
self.to_play = -1
self.prior = prior
self.value_sum = 0
self.children = {}
self.hidden_state = None
self.reward = 0
def expanded(self) -> bool:
return len(self.children) > 0
def value(self) -> float:
if self.visit_count == 0:
return 0
return self.value_sum / self.visit_count
class ActionHistory(object):
"""Simple history container used inside the search.
Only used to keep track of the actions executed.
"""
def __init__(self, history: List[Action], action_space_size: int):
self.history = list(history)
self.action_space_size = action_space_size
def clone(self):
return ActionHistory(self.history, self.action_space_size)
def add_action(self, action: Action):
self.history.append(action)
def last_action(self) -> Action:
return self.history[-1]
def action_space(self) -> List[Action]:
return [i for i in range(self.action_space_size)]
def to_play(self) -> Player:
if len(self.history) % 2 == 0:
return Player.white
else:
return Player.black
class Environment(object):
"""The environment MuZero is interacting with."""
def __init__(self):
self.board = None
self.turn = 0
self.done = False
self.winner = None # type: Winner
self.resigned = False
def reset(self):
self.board = []
for i in range(6):
self.board.append([])
for j in range(7): # pylint: disable=unused-variable
self.board[i].append(' ')
self.turn = 0
self.done = False
self.winner = None
self.resigned = False
return self
def update(self, board):
self.board = numpy.copy(board)
self.turn = self.turn_n()
self.done = False
self.winner = None
self.resigned = False
return self
def turn_n(self):
turn = 0
for i in range(6):
for j in range(7):
if self.board[i][j] != ' ':
turn += 1
return turn
def player_turn(self):
if self.turn % 2 == 0:
return Player.white
else:
return Player.black
def step(self, action):
for i in range(6):
if self.board[i][action] == ' ':
self.board[i][action] = (
'X' if self.player_turn() == Player.white else 'O')
break
self.turn += 1
self.check_for_fours()
if self.turn >= 42:
self.done = True
if self.winner is None:
self.winner = Winner.draw
r = 0
if self.done:
if self.turn % 2 == 0:
if Winner.white:
r = 1
elif Winner.black:
r = -1
else:
if Winner.black:
r = 1
elif Winner.white:
r = -1
return r
def legal_moves(self):
legal = [0, 0, 0, 0, 0, 0, 0]
for j in range(7):
for i in range(6):
if self.board[i][j] == ' ':
legal[j] = 1
break
return legal
def legal_actions(self):
legal = []
for j in range(7):
for i in range(6):
if self.board[i][j] == ' ':
legal.append(j)
break
return legal
def check_for_fours(self):
for i in range(6):
for j in range(7):
if self.board[i][j] != ' ':
# check if a vertical four-in-a-row starts at (i, j)
if self.vertical_check(i, j):
self.done = True
return
# check if a horizontal four-in-a-row starts at (i, j)
if self.horizontal_check(i, j):
self.done = True
return
# check if a diagonal (either way) four-in-a-row starts at (i, j)
diag_fours = self.diagonal_check(i, j)
if diag_fours:
self.done = True
return
def vertical_check(self, row, col):
# print("checking vert")
four_in_a_row = False
consecutive_count = 0
for i in range(row, 6):
if self.board[i][col].lower() == self.board[row][col].lower():
consecutive_count += 1
else:
break
if consecutive_count >= 4:
four_in_a_row = True
if 'x' == self.board[row][col].lower():
self.winner = Winner.white
else:
self.winner = Winner.black
return four_in_a_row
def horizontal_check(self, row, col):
four_in_a_row = False
consecutive_count = 0
for j in range(col, 7):
if self.board[row][j].lower() == self.board[row][col].lower():
consecutive_count += 1
else:
break
if consecutive_count >= 4:
four_in_a_row = True
if 'x' == self.board[row][col].lower():
self.winner = Winner.white
else:
self.winner = Winner.black
return four_in_a_row
def diagonal_check(self, row, col):
four_in_a_row = False
count = 0
consecutive_count = 0
j = col
for i in range(row, 6):
if j > 6:
break
elif self.board[i][j].lower() == self.board[row][col].lower():
consecutive_count += 1
else:
break
j += 1
if consecutive_count >= 4:
count += 1
if 'x' == self.board[row][col].lower():
self.winner = Winner.white
else:
self.winner = Winner.black
consecutive_count = 0
j = col
for i in range(row, -1, -1):
if j > 6:
break
elif self.board[i][j].lower() == self.board[row][col].lower():
consecutive_count += 1
else:
break
j += 1
if consecutive_count >= 4:
count += 1
if 'x' == self.board[row][col].lower():
self.winner = Winner.white
else:
self.winner = Winner.black
if count > 0:
four_in_a_row = True
return four_in_a_row
def black_and_white_plane(self):
board_white = numpy.copy(self.board)
board_black = numpy.copy(self.board)
for i in range(6):
for j in range(7):
if self.board[i][j] == ' ':
board_white[i][j] = 0
board_black[i][j] = 0
elif self.board[i][j] == 'X':
board_white[i][j] = 1
board_black[i][j] = 0
else:
board_white[i][j] = 0
board_black[i][j] = 1
return numpy.array(board_white), numpy.array(board_black)
def render(self):
print("\nRound: " + str(self.turn))
for i in range(5, -1, -1):
print("\t", end="")
for j in range(7):
print("| " + str(self.board[i][j]), end=" ")
print("|")
print("\t _ _ _ _ _ _ _ ")
print("\t 1 2 3 4 5 6 7 ")
if self.done:
print("Game Over!")
if self.winner == Winner.white:
print("X is the winner")
elif self.winner == Winner.black:
print("O is the winner")
else:
print("Game was a draw")
@property
def observation(self):
return ''.join(''.join(x for x in y) for y in self.board)
class Game(object):
"""A single episode of interaction with the environment."""
def __init__(self, action_space_size: int, discount: float):
self.environment = Environment().reset() # Game specific environment.
self.history = []
self.rewards = []
self.child_visits = []
self.root_values = []
self.action_space_size = action_space_size
self.discount = discount
def terminal(self) -> bool:
# Game specific termination rules.
return self.environment.done
def legal_actions(self) -> List[Action]:
# Game specific calculation of legal actions.
return self.environment.legal_actions()
def apply(self, action: Action):
reward = self.environment.step(action)
reward = reward if self.environment.turn % 2 != 0 and reward == 1 else -reward
self.rewards.append(reward)
self.history.append(action)
def store_search_statistics(self, root: Node):
sum_visits = sum(child.visit_count for child in root.children.values())
action_space = (Action(index)
for index in range(self.action_space_size))
self.child_visits.append([
root.children[a].visit_count /
sum_visits if a in root.children else 0
for a in action_space
])
self.root_values.append(root.value())
def make_image(self, state_index: int):
# Game specific feature planes.
| |
(mantissa, int(exp))
def __write_images(image_outputs, display_image_num, file_name):
"""Save output image
Arguments:
image_outputs {Tensor list} -- list of output images
display_image_num {int} -- number of images to be displayed
file_name {str} -- name of the file where to save the images
"""
image_outputs = [
images.expand(-1, 3, -1, -1) for images in image_outputs
] # expand gray-scale images to 3 channels
image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)
image_grid = vutils.make_grid(
image_tensor.data, nrow=display_image_num, padding=0, normalize=True
)
vutils.save_image(image_grid, file_name, nrow=1)
def write_2images(image_outputs, display_image_num, image_directory, postfix, comet_exp=None):
"""Write images from both worlds a and b of the cycle A-B-A as jpg
Arguments:
image_outputs {Tensor list} -- list of images, the first half being outputs in B,
the second half being outputs in A
display_image_num {int} -- number of images to be displayed
image_directory {str} --
postfix {str} -- postfix to filename
Keyword Arguments:
comet_exp {Comet experience} -- (default: {None})
"""
n = len(image_outputs)
__write_images(
image_outputs[0 : n // 2],
display_image_num,
"%s/gen_a2b_%s.jpg" % (image_directory, postfix),
)
__write_images(
image_outputs[n // 2 : n],
display_image_num,
"%s/gen_b2a_%s.jpg" % (image_directory, postfix),
)
if comet_exp is not None:
comet_exp.log_image("%s/gen_a2b_%s.jpg" % (image_directory, postfix))
comet_exp.log_image("%s/gen_b2a_%s.jpg" % (image_directory, postfix))
def prepare_sub_folder(output_directory):
"""Create images and checkpoints subfolders in output directory
Arguments:
output_directory {str} -- output directory
Returns:
checkpoint_directory, image_directory-- checkpoints and images directories
"""
image_directory = os.path.join(output_directory, "images")
if not os.path.exists(image_directory):
print("Creating directory: {}".format(image_directory))
os.makedirs(image_directory)
checkpoint_directory = os.path.join(output_directory, "checkpoints")
if not os.path.exists(checkpoint_directory):
print("Creating directory: {}".format(checkpoint_directory))
os.makedirs(checkpoint_directory)
return checkpoint_directory, image_directory
def write_loss(iterations, trainer, train_writer):
members = [
attr
for attr in dir(trainer)
if not callable(getattr(trainer, attr))
and not attr.startswith("__")
and ("loss" in attr or "grad" in attr or "nwd" in attr)
]
for m in members:
train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)
def slerp(val, low, high):
"""
Spherical linear interpolation (slerp)
original: Animating Rotation with Quaternion Curves, <NAME>
https://arxiv.org/abs/1609.04468
Code: https://github.com/soumith/dcgan.torch/issues/14, <NAME>
Arguments:
val {float} -- mean in Gaussian prior
low {float} -- smallest value in the interpolation
high {float} -- highest value in the interpolation
Returns:
slerp value
"""
omega = np.arccos(np.dot(low / np.linalg.norm(low), high / np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0 - val) * omega) / so * low + np.sin(val * omega) / so * high
def get_slerp_interp(nb_latents, nb_interp, z_dim):
"""
modified from: PyTorch inference for "Progressive Growing of GANs" with CelebA snapshot
https://github.com/ptrblck/prog_gans_pytorch_inference
"""
latent_interps = np.empty(shape=(0, z_dim), dtype=np.float32)
for _ in range(nb_latents):
low = np.random.randn(z_dim)
high = np.random.randn(z_dim) # low + np.random.randn(512) * 0.7
interp_vals = np.linspace(0, 1, num=nb_interp)
latent_interp = np.array([slerp(v, low, high) for v in interp_vals], dtype=np.float32)
latent_interps = np.vstack((latent_interps, latent_interp))
return latent_interps[:, :, np.newaxis, np.newaxis]
# Get model list for resume
def get_model_list(dirname, key):
"""get last model in dirname, whose name contain key
Arguments:
dirname {str} -- directory name
key {str} -- "key" in the model name
Returns:
last_model_name {str} -- last model name
"""
if os.path.exists(dirname) is False:
return None
gen_models = [
os.path.join(dirname, f)
for f in os.listdir(dirname)
if os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f
]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def load_vgg16(model_dir):
raise NotImplementedError(
"This function relies on torch.utils.serialization.load_lua which is deprecated"
)
def load_flood_classifier(ckpt_path):
""" Load flood classifier based on a pretrained resnet18 network.
Arguments:
ckpt_path {str} -- path to checkpoint
Returns:
model -- flood classifier model
"""
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
model.load_state_dict(torch.load(ckpt_path))
return model
class Resnet34_8s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet34_8s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet34_8s = resnet34(
fully_conv=True, pretrained=True, output_stride=8, remove_avg_pool_layer=True
)
# Randomly initialize the 1x1 Conv scoring layer
resnet34_8s.fc = nn.Conv2d(resnet34_8s.inplanes, num_classes, 1)
self.resnet34_8s = resnet34_8s
self._normal_initialization(self.resnet34_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x, feature_alignment=False):
input_spatial_dim = x.size()[2:]
if feature_alignment:
x = adjust_input_image_size_for_proper_feature_alignment(x, output_stride=8)
x = self.resnet34_8s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
# x = nn.functional.interpolate(input=x, size=input_spatial_dim, mode="bilinear")
print(
"#######################################################################################"
)
return x
def load_segmentation_model(ckpt_path, classes):
"""load Resnet34 segmentation model with output stride 8 from checkpoint
Arguments:
ckpt_path {str} -- checkpoint path
Returns:
model -- segmentation model
"""
model = Resnet34_8s(num_classes=classes).to("cuda")
model.load_state_dict(torch.load(ckpt_path))
return model
# Define the helper function
def decode_segmap(image, nc=19):
"""Creates a label colormap used in CITYSCAPES segmentation benchmark.
Arguments:
image {array} -- segmented image
(array of image size containing classat each pixel)
Returns:
array of size 3*nc -- A colormap for visualizing segmentation results.
"""
colormap = np.zeros((19, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
colormap[4] = [190, 153, 153]
colormap[5] = [153, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [220, 220, 0]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [255, 0, 0]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [0, 0, 230]
colormap[18] = [119, 11, 32]
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(nc):
idx = image == l
r[idx] = colormap[l, 0]
g[idx] = colormap[l, 1]
b[idx] = colormap[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def load_inception(model_path):
"""Load Inception model
Arguments:
model_path {str} -- model path
Returns:
model -- Inception model
"""
state_dict = torch.load(model_path)
model = inception_v3(pretrained=False, transform_input=True)
model.aux_logits = False
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, state_dict["fc.weight"].size(0))
model.load_state_dict(state_dict)
for param in model.parameters():
param.requires_grad = False
return model
def vgg_preprocess(batch):
"""Preprocess batch to use VGG model
"""
tensortype = type(batch.data)
(r, g, b) = torch.chunk(batch, 3, dim=1)
batch = torch.cat((b, g, r), dim=1) # convert RGB to BGR
batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]
mean = tensortype(batch.data.size()).cuda()
mean[:, 0, :, :] = 103.939
mean[:, 1, :, :] = 116.779
mean[:, 2, :, :] = 123.680
batch = batch.sub(Variable(mean)) # subtract mean
return batch
def get_scheduler(optimizer, hyperparameters, iterations=-1):
"""Returns a learning rate scheduler such that the learning rate of each parameter group is set to the initial
lr decayed by hyperparameter gamma every step_size epochs when a learning rate policy is specified in the hyperparameters.
When iterations=-1, sets initial lr as lr.
Arguments:
optimizer {Optimizer} -- Wrapped optimizer
hyperparameters {} -- Hyperparameters parsed from config yaml file
Keyword Arguments:
iterations {int} -- index of the last epoch (default: {-1})
"""
if "lr_policy" not in hyperparameters or hyperparameters["lr_policy"] == "constant":
scheduler = None # constant scheduler
elif hyperparameters["lr_policy"] == "step":
scheduler = lr_scheduler.StepLR(
optimizer,
step_size=hyperparameters["step_size"],
gamma=hyperparameters["gamma"],
last_epoch=iterations,
)
else:
return NotImplementedError(
"learning rate policy [%s] is not implemented", hyperparameters["lr_policy"]
)
return scheduler
def weights_init(init_type="gaussian"):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find("Conv") == 0 or classname.find("Linear") == 0) and hasattr(m, "weight"):
# print m.__class__.__name__
if init_type == "gaussian":
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == "xavier":
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == "kaiming":
init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif init_type == "orthogonal":
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == "default":
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, "bias") and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time))
def pytorch03_to_pytorch04(state_dict_base, trainer_name):
def __conversion_core(state_dict_base, trainer_name):
state_dict = state_dict_base.copy()
if trainer_name == "MUNIT":
for key, value in state_dict_base.items():
if key.endswith(
(
"enc_content.model.0.norm.running_mean",
"enc_content.model.0.norm.running_var",
"enc_content.model.1.norm.running_mean",
"enc_content.model.1.norm.running_var",
"enc_content.model.2.norm.running_mean",
"enc_content.model.2.norm.running_var",
"enc_content.model.3.model.0.model.1.norm.running_mean",
"enc_content.model.3.model.0.model.1.norm.running_var",
"enc_content.model.3.model.0.model.0.norm.running_mean",
"enc_content.model.3.model.0.model.0.norm.running_var",
"enc_content.model.3.model.1.model.1.norm.running_mean",
"enc_content.model.3.model.1.model.1.norm.running_var",
"enc_content.model.3.model.1.model.0.norm.running_mean",
"enc_content.model.3.model.1.model.0.norm.running_var",
"enc_content.model.3.model.2.model.1.norm.running_mean",
"enc_content.model.3.model.2.model.1.norm.running_var",
"enc_content.model.3.model.2.model.0.norm.running_mean",
"enc_content.model.3.model.2.model.0.norm.running_var",
"enc_content.model.3.model.3.model.1.norm.running_mean",
"enc_content.model.3.model.3.model.1.norm.running_var",
"enc_content.model.3.model.3.model.0.norm.running_mean",
"enc_content.model.3.model.3.model.0.norm.running_var",
)
):
del state_dict[key]
else:
def __conversion_core(state_dict_base):
state_dict = state_dict_base.copy()
for key, value in state_dict_base.items():
if key.endswith(
(
"enc.model.0.norm.running_mean",
"enc.model.0.norm.running_var",
"enc.model.1.norm.running_mean",
"enc.model.1.norm.running_var",
"enc.model.2.norm.running_mean",
"enc.model.2.norm.running_var",
"enc.model.3.model.0.model.1.norm.running_mean",
"enc.model.3.model.0.model.1.norm.running_var",
"enc.model.3.model.0.model.0.norm.running_mean",
"enc.model.3.model.0.model.0.norm.running_var",
"enc.model.3.model.1.model.1.norm.running_mean",
"enc.model.3.model.1.model.1.norm.running_var",
"enc.model.3.model.1.model.0.norm.running_mean",
"enc.model.3.model.1.model.0.norm.running_var",
"enc.model.3.model.2.model.1.norm.running_mean",
"enc.model.3.model.2.model.1.norm.running_var",
"enc.model.3.model.2.model.0.norm.running_mean",
"enc.model.3.model.2.model.0.norm.running_var",
"enc.model.3.model.3.model.1.norm.running_mean",
"enc.model.3.model.3.model.1.norm.running_var",
"enc.model.3.model.3.model.0.norm.running_mean",
"enc.model.3.model.3.model.0.norm.running_var",
"dec.model.0.model.0.model.1.norm.running_mean",
"dec.model.0.model.0.model.1.norm.running_var",
"dec.model.0.model.0.model.0.norm.running_mean",
"dec.model.0.model.0.model.0.norm.running_var",
"dec.model.0.model.1.model.1.norm.running_mean",
"dec.model.0.model.1.model.1.norm.running_var",
"dec.model.0.model.1.model.0.norm.running_mean",
"dec.model.0.model.1.model.0.norm.running_var",
"dec.model.0.model.2.model.1.norm.running_mean",
"dec.model.0.model.2.model.1.norm.running_var",
"dec.model.0.model.2.model.0.norm.running_mean",
"dec.model.0.model.2.model.0.norm.running_var",
"dec.model.0.model.3.model.1.norm.running_mean",
"dec.model.0.model.3.model.1.norm.running_var",
"dec.model.0.model.3.model.0.norm.running_mean",
"dec.model.0.model.3.model.0.norm.running_var",
)
):
del state_dict[key]
return state_dict
state_dict = dict()
state_dict["a"] = __conversion_core(state_dict_base["a"], trainer_name)
state_dict["b"] = __conversion_core(state_dict_base["b"], trainer_name)
return state_dict
# Domain adversarial | |
| wx.EXPAND,
)
self.text_content = wx.TextCtrl(self.panel,
wx.ID_ANY,
style=wx.TE_MULTILINE
)
self.text_content.SetMinSize((400, 200))
self.vbox1.Add(self.text_content)
# Button definitions and bindings
self.button_add = wx.Button(self.panel, wx.ID_ANY, label=_(u'新增'))
self.button_add.Bind(wx.EVT_BUTTON, self.add_btn_handler)
self.button_close = wx.Button(self.panel, wx.ID_ANY, label=_(u'关闭'))
self.button_close.Bind(wx.EVT_BUTTON, self.close_btn_handler)
# Add widgets to the panel sizer component
self.panel_sizer.Add(self.vbox1, flag=wx.TOP | wx.EXPAND, border=15)
self.panel_sizer.Add(self.vbox2, flag=wx.TOP | wx.EXPAND, border=15)
self.panel_sizer.Add(self.button_add, flag=wx.TOP, border=15)
self.panel_sizer.Add(self.button_close, flag=wx.TOP, border=5)
self.panel.SetSizer(self.panel_sizer)
self.main_sizer.Add(self.panel,
proportion=1,
flag=wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND,
border=15
)
# Apply sizer and display dialog panel
self.SetSizerAndFit(self.main_sizer)
self.Layout()
self.Center()
# Button bindings
def close_btn_handler(self, event):
self.Close()
def add_btn_handler(self, event):
config = configparser.ConfigParser(interpolation=None)
cfg_path = PROJECT_ABSOLUTE_PATH + "\\config.ini"
config.read(cfg_path,encoding='utf-8')
q_cmd_name = self.text_name.GetValue()
q_cmd = self.text_content.GetValue()
curr_cmd_count = int(config.get('QuickCMD', 'cmdcnt'))
if curr_cmd_count == 16:
wx.MessageBox(_(u'内置命令的最大限制是16!'),
_(u'提示'),
wx.YES_DEFAULT | wx.ICON_WARNING
)
return
if len(q_cmd_name) > 15:
wx.MessageBox(_(u'命令长度不可超过15个字符!'),
_(u'提示'),
wx.YES_DEFAULT | wx.ICON_WARNING
)
return
if q_cmd_name != '' and q_cmd != '':
new_cmd_count = str(int(config.get('QuickCMD', 'cmdcnt')) + 1)
config.set("QuickCMD", 'cmdcnt', new_cmd_count)
new_cmd_name = 'cmd' + new_cmd_count
modified_cmd = q_cmd.replace('\n', '\\r\\n')
modified_cmd = modified_cmd + '\\r\\n'
new_cmd = q_cmd_name + '|' + modified_cmd
config.set("QuickCMD", new_cmd_name, new_cmd)
with open(cfg_path, "w+", encoding='utf-8') as f:
config.write(f)
else:
return
class FileDrop(wx.FileDropTarget): #This is the file drop target
def __init__(self, window):
wx.FileDropTarget.__init__(self) #File Drop targets are subsets of windows
self.window = window
def OnDropFiles(self, x, y, filenames): #FileDropTarget now fills in the ListOfFiles
pub.sendMessage("dragAndDrop", arg1=filenames)
return True
class redirect_err:
""""""
#----------------------------------------------------------------------
def __init__(self, obj):
"""Constructor"""
if ifExist(PROJECT_ABSOLUTE_PATH + "\\logs\\software\\err\\")==False:
makeDir(PROJECT_ABSOLUTE_PATH + "\\logs\\software\\err\\")
file_path = PROJECT_ABSOLUTE_PATH + "\\logs\\software\\err\\" + open_time + "_run_err.log"
self.filename = open(file_path, "a",encoding = 'utf-8')
#----------------------------------------------------------------------
def write(self, text):
""""""
if self.filename.closed:
pass
else:
curr_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.filename.write('['+ str(curr_time) + '] ' + text)
self.filename.flush()
class redirect_std:
""""""
#----------------------------------------------------------------------
def __init__(self, obj):
"""Constructor"""
if ifExist(PROJECT_ABSOLUTE_PATH + "\\logs\\software\\std\\")==False:
makeDir(PROJECT_ABSOLUTE_PATH + "\\logs\\software\\std\\")
file_path = PROJECT_ABSOLUTE_PATH + "\\logs\\software\\std\\" + open_time + "_run_std.log"
self.filename = open(file_path, "a",encoding = 'utf-8')
#----------------------------------------------------------------------
def write(self, text):
""""""
if self.filename.closed:
pass
else:
curr_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.filename.write('['+ str(curr_time) + '] ' + text)
self.filename.flush()
'''
class StandardOutWrite:
def write(self, x):
old_std.write(x.replace("\n", " [[%s]]\n" % str(datetime.datetime.now())))
'''
class qpyTools(wx.Frame):
fileExtIcon = {}
logVar = ""
#for path str
localFilePathRoot = False #Root for list drivers; "" for nothing
def __init__(self, *args, **kwds):
# begin wxGlade: qpyTools.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
w = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X)
h = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
self.SetSize((int(0.6 * w), int(0.7 * h)))
# check resolution before starting
if (wx.GetDisplaySize()[0]*wx.GetDisplaySize()[1]) <= 1366*768:
self.Maximize(True)
self.Center()
# Error timestamp addition
sys.stderr = redirect_err(self)
sys.stdout = redirect_std(self)
# Menu Bar
self.menuBar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(2001, _(u"保存"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2001)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(2002, _(u"退出"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2002)
self.menuBar.Append(wxglade_tmp_menu, _(u"文件 (&F)"))
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(2011, _(u"交互命令行"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2011)
wxglade_tmp_menu.Append(2012, _(u"文件浏览"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2012)
wxglade_tmp_menu.Append(2013, _(u"下载固件/脚本"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2013)
wxglade_tmp_menu.Append(2014, _(u"软件设置"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2014)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(2021, _(u"简体中文 (zh_CN)"), "", wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.menuHandler, id=2021)
wxglade_tmp_menu_sub.Append(2022, "English (en)", "", wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.menuHandler, id=2022)
if languageTab == 'en':
wxglade_tmp_menu_sub.Check(2022,True)
elif languageTab == 'zh_CN':
wxglade_tmp_menu_sub.Check(2021,True)
# Language
# wxglade_tmp_menu.Append(wx.ID_ANY, _(u"语言"), wxglade_tmp_menu_sub, "")
wxglade_tmp_menu.AppendSubMenu(wxglade_tmp_menu_sub, _(u"语言"))
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(2031, _(u"切换到..."), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2031)
wxglade_tmp_menu_sub.Append(2032, _(u"日志另存为"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2032)
# Module Log
# wxglade_tmp_menu.Append(wx.ID_ANY, _(u"模块日志"), wxglade_tmp_menu_sub, "")
wxglade_tmp_menu.AppendSubMenu(wxglade_tmp_menu_sub, _(u"模块日志"))
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(2041, _(u"查看"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2041)
wxglade_tmp_menu_sub.Append(2042, _(u"日志另存为"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2042)
# Download Log
# wxglade_tmp_menu.Append(wx.ID_ANY, _(u"下载日志"), wxglade_tmp_menu_sub, "")
wxglade_tmp_menu.AppendSubMenu(wxglade_tmp_menu_sub, _(u"下载日志"))
self.menuBar.Append(wxglade_tmp_menu, _(u"查看 (&V)"))
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(2051, _(u"官方网站"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2051)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(2052, _(u"在线Wiki"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2052)
wxglade_tmp_menu.Append(2053, _(u"在线教程"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2053)
wxglade_tmp_menu.Append(2054, _(u"QQ开发交流群"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2054)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(2055, _(u"资料下载"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2055)
self.menuBar.Append(wxglade_tmp_menu, _(u"教程 (&E)"))
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(2061, _(u"检查升级"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2061)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(2065, _(u"版本信息"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2065)
wxglade_tmp_menu.Append(2066, _(u"使用指导"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2066)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(2062, _(u"关于 Quectel 移远"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2062)
wxglade_tmp_menu.Append(2063, _(u"关于 QuecPython"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2063)
wxglade_tmp_menu.Append(2064, _(u"版本"), "")
self.Bind(wx.EVT_MENU, self.menuHandler, id=2064)
self.menuBar.Append(wxglade_tmp_menu, _(u"帮助 (&H)"))
self.SetMenuBar(self.menuBar)
# Menu Bar end
self.statusBar = self.CreateStatusBar(3)
# Tool Bar
self.toolBar = wx.ToolBar(self, -1)
self.SetToolBar(self.toolBar)
self.toolBar.AddSeparator()
self.toolBar.AddTool(3011, _(u"开始"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\play-disable.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"开启模块命令行交互"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3012, _(u"暂停"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\pause-disable.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"暂停模块命令行交互"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3013, _(u"停止"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\stop.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"停止模块命令行交互"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3014, _(u"清除"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\clean.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"屏幕打印清除"))
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
self.toolBar.AddTool(3001, _(u"时间戳"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\timerstamp.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_CHECK, "", _(u"显示打印时间"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3002, _(u"显示行号"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\linenumber.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_CHECK, "", _(u"行号显示"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3003, _(u"格式"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\format.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_CHECK, "", _(u"交互页面主题切换"))
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
self.toolBar.AddTool(3021, _(u"保存"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\save.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"日志保存"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3022, _(u"搜索"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\search.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"日志关键字搜索"))
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
self.toolBar.AddSeparator()
self.toolBar.AddTool(3031, _(u"设置"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\settings.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"设置"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3032, _(u"工具箱"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\box.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"工具箱"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3033, _(u"工具箱配置"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\add-box.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"配置工具箱命令"))
self.toolBar.AddSeparator()
self.toolBar.AddTool(3034, _(u"置顶"), wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\ontop-disable.ico", wx.BITMAP_TYPE_ICO), wx.NullBitmap, wx.ITEM_NORMAL, "", _(u"窗口置顶"))
# Tool Bar end
self.label_4 = wx.StaticText(self, wx.ID_ANY, _(u"选择串口"))
self.comSelector = wx.Choice(self, wx.ID_ANY, choices=[])
self.label_3 = wx.StaticText(self, wx.ID_ANY, _(u"波特率"))
self.comBaud = wx.ComboBox(self, wx.ID_ANY, choices=["1200", "2400", "4800", "9600", "14400", "19200", "38400", "57600", "115200", "230400", "256000", "460800", "921600"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.comOpen = wx.Button(self, 10006, _(u"打开串口"))
self.comSettingMore = wx.Button(self, wx.ID_ANY, "...", style=wx.BU_BOTTOM)
self.notebook_1 = wx.Notebook(self, wx.ID_ANY)
self.notebook_1_pane_1 = wx.Panel(self.notebook_1, wx.ID_ANY)
self.stc = wx.stc.StyledTextCtrl(self.notebook_1_pane_1, wx.ID_ANY)
self.stc.SetLexer(stc.STC_LEX_PYTHON)
self.stc.SetKeyWords(0, " ".join(keyword.kwlist))
self.stc.Bind(wx.EVT_KEY_DOWN, self.DoKeyPress)
self.stc.Bind(wx.EVT_KEY_UP, self.forbidChar) # [F9] Ignore Chinese letters
# self.stc.Bind(wx.EVT_MOTION, self.forbidChar) # [F9] Ignore Chinese letters
self.stc.Bind(wx.EVT_CONTEXT_MENU, self.OnSTCContextMenu)
self.CONTENT = None # [F9] Ignore Chinese letters
self.stc.SetTabWidth(4)
self.stc.SetIndent(4)
#self.stcColorized(True)
self.stcLineNumberdisplayMode = 1
self.stc.SetValue(">>> ")
self.stc.SetWrapMode(1)
self.notebook_1_pane_2 = wx.Panel(self.notebook_1, wx.ID_ANY)
self.fileSplitter = wx.SplitterWindow(self.notebook_1_pane_2, wx.ID_ANY)
self.fileSplitter.SetSashGravity(0.5)
self.window_1_pane_1 = wx.Panel(self.fileSplitter, wx.ID_ANY)
self.localFilePathGoUp = wx.BitmapButton(self.window_1_pane_1, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\goesup.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.localFilePathReFresh = wx.BitmapButton(self.window_1_pane_1, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\refresh.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.localFilePath = wx.TextCtrl(self.window_1_pane_1, wx.ID_ANY, "")
self.localFilePathGo = wx.BitmapButton(self.window_1_pane_1, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\enter.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.localFileList = wx.ListCtrl(self.window_1_pane_1, wx.ID_ANY, style=wx.LC_HRULES | wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES)
self.window_1_pane_2 = wx.Panel(self.fileSplitter, wx.ID_ANY)
self.modFileRefresh = wx.BitmapButton(self.window_1_pane_2, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\refresh.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.modFileRefresh.SetToolTip("Refresh")
self.modFileExec = wx.BitmapButton(self.window_1_pane_2, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\exec.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.modFileExec.SetToolTip("Run")
self.modFileAdd = wx.BitmapButton(self.window_1_pane_2, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\add.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.modFileAdd.SetToolTip("Add")
self.modFileRm = wx.BitmapButton(self.window_1_pane_2, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\delete.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.modFileRm.SetToolTip("Remove")
self.modFileClean = wx.BitmapButton(self.window_1_pane_2, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\deleteall.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.modFileClean.SetToolTip("Clear")
self.modFileTree = wx.TreeCtrl(self.window_1_pane_2, wx.ID_ANY)
TreeDropVar = treeDrop(self.modFileTree)
self.modFileTree.SetDropTarget(TreeDropVar)
self.notebook_1_pane_3 = wx.Panel(self.notebook_1, wx.ID_ANY)
self.projectList = wx.ListBox(self.notebook_1_pane_3, wx.ID_ANY, choices=[])
self.projectCreat = wx.Button(self.notebook_1_pane_3, wx.ID_ANY, _(u"创建"))
self.projectDelete = wx.Button(self.notebook_1_pane_3, wx.ID_ANY, _(u"删除"))
self.firmwarePath = wx.TextCtrl(self.notebook_1_pane_3, wx.ID_ANY, "")
self.firmwareBrowser = wx.Button(self.notebook_1_pane_3, wx.ID_ANY, _(u"选择固件"))
self.howGetFirmware = wx.BitmapButton(self.notebook_1_pane_3, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\how.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.howGetFirmware.SetToolTip("how to get firmware")
self.downloadFileList = wx.ListCtrl(self.notebook_1_pane_3, wx.ID_ANY, style=wx.LC_HRULES | wx.LC_REPORT | wx.LC_VRULES)
self.downloadFileAdd = wx.BitmapButton(self.notebook_1_pane_3, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\add.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.downloadFileAdd.SetToolTip("Add")
self.downloadFileRm = wx.BitmapButton(self.notebook_1_pane_3, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\delete.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.downloadFileRm.SetToolTip("Delete")
self.downloadFileClean = wx.BitmapButton(self.notebook_1_pane_3, wx.ID_ANY, wx.Bitmap(PROJECT_ABSOLUTE_PATH + "\\images\\deleteall.ico", wx.BITMAP_TYPE_ICO), style=wx.BU_BOTTOM)
self.downloadFileClean.SetToolTip("Clear")
self.downloadFileEncrypt = wx.CheckBox(self.notebook_1_pane_3, wx.ID_ANY, _(u"加密"))
# self.downloadFileCompress = wx.CheckBox(self.notebook_1_pane_3, wx.ID_ANY, _(u"压缩"))
self.downloadFileBackup = wx.CheckBox(self.notebook_1_pane_3, wx.ID_ANY, _(u"备份"))
self.downloadProgress = wx.Gauge(self.notebook_1_pane_3, wx.ID_ANY, 100)
self.downloadFileStart = wx.Button(self.notebook_1_pane_3, wx.ID_ANY, _(u"下载脚本"))
self.downloadFileStart.SetLabel(_(u"下载脚本"))
self.downloadFileMore = wx.Button(self.notebook_1_pane_3, 5001, u"▼")
self.downloadFileCombine = wx.Button(self.notebook_1_pane_3, wx.ID_ANY, _(u"合并"))
self.notebook_1_pane_4 = wx.Panel(self.notebook_1, wx.ID_ANY)
self.autoSaveLogChk = wx.CheckBox(self.notebook_1_pane_4, wx.ID_ANY, _(u"自动保存模块日志"))
self.label_7 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"最大保存数目"))
self.autoSaveLogMaxItem = wx.TextCtrl(self.notebook_1_pane_4, wx.ID_ANY, "10")
self.label_8 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"最大占用上限(MB)"))
self.autoSaveLogMaxSize = wx.TextCtrl(self.notebook_1_pane_4, wx.ID_ANY, "100")
self.openAutoSaveLogDir = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, _(u"打开日志文件夹"))
self.codeEditorChk = wx.CheckBox(self.notebook_1_pane_4, wx.ID_ANY, _(u"源码编辑器"))
self.codeEditorPathText = wx.TextCtrl(self.notebook_1_pane_4, wx.ID_ANY, "", style=wx.TE_READONLY)
self.srcEditorBrowseBtn = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, "...", style=wx.BU_BOTTOM)
self.label_13 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"启动参数"))
self.codeEditorParam = wx.TextCtrl(self.notebook_1_pane_4, wx.ID_ANY, "%f")
self.comSettingChk = wx.CheckBox(self.notebook_1_pane_4, wx.ID_ANY, _(u"串口参数配置"))
self.label_14 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"校验位"))
self.comParity = wx.ComboBox(self.notebook_1_pane_4, wx.ID_ANY, choices=["NONE", "ODD", "EVEN", "MARK", "SPACE"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.label_15 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"数据位"))
self.comDatabit = wx.ComboBox(self.notebook_1_pane_4, wx.ID_ANY, choices=["5", "6", "7", "8"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.label_16 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"停止位"))
self.comStopbits = wx.ComboBox(self.notebook_1_pane_4, wx.ID_ANY, choices=["1", "1.5", "2"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.label_17 = wx.StaticText(self.notebook_1_pane_4, wx.ID_ANY, _(u"流控制"))
self.comFlowControl = wx.ComboBox(self.notebook_1_pane_4, wx.ID_ANY, choices=["NONE", "XON/XOFF", "RTS/CTS", "DTR/DSR", "RTS/CTS/XON/XOFF", "DTR/DSR/XON/XOFF"], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.mpycChk = wx.CheckBox(self.notebook_1_pane_4, wx.ID_ANY, _(u"mpy-cross 路径"))
self.mpycPathText = wx.TextCtrl(self.notebook_1_pane_4, wx.ID_ANY, "", style=wx.TE_READONLY)
self.mpycBrowser = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, "...", style=wx.BU_BOTTOM)
self.manualSettingChk = wx.CheckBox(self.notebook_1_pane_4, wx.ID_ANY, _(u"手动修改配置文件"))
self.openConfigFile = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, _(u"关闭工具并打开配置文件"))
self.fontCheck = wx.CheckBox(self.notebook_1_pane_4, wx.ID_ANY, _(u"字体设置"))
self.fontButton = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, _(u"交互页面字体设置"))
self.configConfirm = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, _(u"确定"))
self.configConfirm.SetToolTip("确定-保存修改并生效")
self.configReset = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, _(u"重置"))
self.configReset.SetToolTip("重置-恢复默认选项")
self.configCancel = wx.Button(self.notebook_1_pane_4, wx.ID_ANY, _(u"取消"))
self.configCancel.SetToolTip("取消-放弃本次修改")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3011)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3012)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3013)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3014)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3001)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3002)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3003)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3021)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3022)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3031)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3033)
self.Bind(wx.EVT_TOOL, self.toolBarHandler, id=3034)
self.Bind(wx.EVT_TOOL, self.CMDHandler, id=3032)
self.Bind(wx.EVT_CHOICE, self.comSelectorChange, self.comSelector)
self.Bind(wx.EVT_BUTTON, self.comOpenBtn, self.comOpen)
self.Bind(wx.EVT_BUTTON, self.comSettingBtn, self.comSettingMore)
self.Bind(wx.EVT_BUTTON, self.fileExploreGoUpHandler, self.localFilePathGoUp)
self.Bind(wx.EVT_BUTTON, self.fileExploreReFreshHandler, self.localFilePathReFresh)
self.Bind(wx.EVT_BUTTON, self.fileExploreGoHandler, self.localFilePathGo)
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.fileListDragInit, self.localFileList)
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.openEditor, self.localFileList)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.localFileListDoubleClickHandler, self.localFileList)
#self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.openEditorFromDownload, self.downloadFileList)
self.Bind(wx.EVT_BUTTON, self.modFileRefreshFunc, self.modFileRefresh)
self.Bind(wx.EVT_BUTTON, self.modFileRunFunc, self.modFileExec)
self.Bind(wx.EVT_BUTTON, self.modFileAddFunc, self.modFileAdd)
self.Bind(wx.EVT_BUTTON, self.modFileRmFunc, self.modFileRm)
self.Bind(wx.EVT_BUTTON, self.modFileCleanFunc, self.modFileClean)
self.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.modFileTreeExpand, self.modFileTree)
self.Bind(wx.EVT_CHECKBOX, self.autoSaveLogChkToggle, self.autoSaveLogChk)
self.Bind(wx.EVT_BUTTON, self.openLogDir, self.openAutoSaveLogDir)
self.Bind(wx.EVT_CHECKBOX, self.codeEditorToggle, self.codeEditorChk)
self.Bind(wx.EVT_BUTTON, self.srcEditorBrowse, self.srcEditorBrowseBtn)
self.Bind(wx.EVT_CHECKBOX, self.changeSerialSettingChk, self.comSettingChk)
self.Bind(wx.EVT_CHECKBOX, self.mpycPathChk, self.mpycChk)
self.Bind(wx.EVT_BUTTON, self.mpycPathBrowse, self.mpycBrowser)
self.Bind(wx.EVT_CHECKBOX, self.manualEditConfigure, self.manualSettingChk)
self.Bind(wx.EVT_BUTTON, self.openConfigureFile, self.openConfigFile)
self.Bind(wx.EVT_CHECKBOX, self.fontCheckConfigure, self.fontCheck)
self.Bind(wx.EVT_BUTTON, self.onFontConfigure, self.fontButton)
self.Bind(wx.EVT_BUTTON, self.saveSettings, self.configConfirm)
self.Bind(wx.EVT_BUTTON, self.resetSettigns, self.configReset)
self.Bind(wx.EVT_BUTTON, self.restoreSettings, self.configCancel)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.winTabChanged, self.notebook_1)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_BUTTON, self.howGetFirmwareFunc, self.howGetFirmware)
# end wxGlade
#user code
self.mpycPathText.Bind(wx.EVT_SET_FOCUS, self.mpycPathBrowse)
self.codeEditorPathText.Bind(wx.EVT_SET_FOCUS, self.srcEditorBrowse)
#self.codeEditorParam.Bind(wx.EVT_TEXT, self.srcEditorParamSave)
self.autoSaveLogMaxItem.Bind(wx.EVT_CHAR_HOOK, self.txtFilterNum)
self.autoSaveLogMaxSize.Bind(wx.EVT_CHAR_HOOK, self.txtFilterNum)
self.localFilePath.Bind(wx.EVT_CHAR_HOOK, self.filePathHandler)
self.projectList.Bind(wx.EVT_KEY_UP, self.delete)
#self.Bind(wx.EVT_SIZE, self.OnReSize, self)
#statusBar data timer
self.statusBarTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.statusBarTimerFresh, self.statusBarTimer)
self.statusBarTimer.Start(100)
#lineWidthTimerFix data timer
self.lineWidthTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.lineWidthTimerFix, self.lineWidthTimer)
self.lineWidthTimer.Start(100)
#serial rcv data timer
self.serRcvTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.serRcvHandler, self.serRcvTimer)
#auto save log timer
self.autoSaveLogTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.autoSaveLogHandler, self.autoSaveLogTimer)
self.autoSaveLogTimer.Start(5000)
#msg for update ui form
pub.subscribe(self.updateDisplay, "uiUpdate")
pub.subscribe(self.statusBarMessages, "statusBarUpdate")
#toolbar toggle
self.toolBar.ToggleTool(3001, False) #timestamp
self.toolBar.ToggleTool(3002, True) #line number
self.toolBar.ToggleTool(3003, True) #format render
#for serial
self.serSendList = ['\r\n']
#for log pause boolean
self.logPrtPause = False
self.logPrtPauseCache = True
self.logCache = []
# For solving flickering frame issues
self.notebook_1_pane_4.SetDoubleBuffered(True)
#for listview icon
self.FileIconList = wx.ImageList(24,24)
self.localFileList.SetImageList(self.FileIconList, wx.IMAGE_LIST_SMALL)
#self.treeIconList = wx.ImageList(16,16)
self.treeIconList = wx.ImageList(24,24)
self.modFileTree.AssignImageList(self.treeIconList)
self.fldridx = self.treeIconList.Add(wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, (24,24)))
self.fldropenidx = self.treeIconList.Add(wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, (24,24)))
self.fileidx = self.treeIconList.Add(wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, (24,24)))
self.filepy = self.treeIconList.Add(wx.Icon(PROJECT_ABSOLUTE_PATH + "\\images\\pyfile.ico", wx.BITMAP_TYPE_ICO))
#for file tree, add root item
self.addTree(self.modFileTree, "", "", True, True)
#for treectrl tooltip
self.modFileTree.Bind(wx.EVT_MOTION, self.modFileTreeMouseMotion)
# for RIGHT_CLICK event modFileTree
self.modFileTree.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.modFileTreeRightClick)
# self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.modFileTreeRightClick, self.modFileTree)
#for ser log
self.serProCache = ""
#for init config file
self.conf = configparser.ConfigParser(interpolation=None)
if not ifExist(PROJECT_ABSOLUTE_PATH+"\\config.ini"):
self.initConfigFile(True)
self.conf.read(PROJECT_ABSOLUTE_PATH+"\\config.ini", encoding='utf-8')
self.getConfigValue()
self.stc_colour = tuple()
colour = self.conf.get('font', 'colour')
self.stc_colour = tuple(int(s) for s in colour[1:-1].split(','))
self.stcColorized(True,
self.conf.getboolean('font', 'default'),
self.stc_colour
)
# self.stc_colour = tuple()
# # init stc font
# if self.conf.getboolean('font', 'default'):
# self.font = wx.Font(12,
# wx.MODERN,
# wx.NORMAL,
# wx.NORMAL,
# False,
# 'Lucida Sans Typewriter'
# )
# else:
# curr_data = {
# 'pointSize': self.conf.getint('font', 'pointSize'),
# 'family': self.conf.getint('font', 'family'),
# 'style': self.conf.getint('font', 'style'),
# 'weight': self.conf.getint('font', 'weight'),
# 'underline': self.conf.getboolean('font', 'underline'),
# 'face': self.conf.get('font', 'face'),
# 'encoding': self.conf.getint('font', 'encoding')
# }
# self.font = wx.Font(curr_data['pointSize'],
# wx.FontFamily(curr_data['family']),
# wx.FontStyle(curr_data['style']),
# wx.FontWeight(curr_data['weight']),
# curr_data['underline'],
# curr_data['face'],
# wx.FontEncoding(curr_data['encoding'])
# )
# colour = self.conf.get('font', 'colour')
# self.stc_colour = tuple(int(s) for s in colour[1:-1].split(','))
# for | |
#!/usr/bin/python
import os
import sys
import codecs
import re
import locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
class Node(object):
def __init__(self, text):
self.text = text
self.lex = None
self.type = None
self.__attributes = {}
self.errors = []
self.name = None
self.parent = None
self.parentRelation = None
self.alignedTo = None
self.fsList = None
self.namedict = {}
self.analyzeNode(self.text)
def analyzeNode(self, text):
[token, tokenType, fsDict, fsList] = getTokenFeats(text.strip().split())
attributeUpdateStatus = self.updateAttributes(token, tokenType, fsDict, fsList)
if attributeUpdateStatus is 0:
self.errors.append("Can't update attributes for node")
self.probSent = True
if self.name is not None:
self.namedict[self.name] = self
def updateAttributes(self, token, tokenType, fsDict, fsList):
self.fsList = fsList
self.lex = token
self.type = tokenType
for attribute in fsDict.keys():
self.__attributes[attribute] = fsDict[attribute]
self.assignName()
def assignName(self):
if 'name' in self.__attributes:
self.name = self.getAttribute('name')
else:
self.errors.append('No name for this token Node')
def printValue(self):
return self.lex
def printSSFValue(self, prefix, allFeat):
returnValue = [prefix, self.printValue(), self.type]
if allFeat is False:
fs = ['<fs']
if "af" in self.__attributes.keys():
fs.append("af" + "='" + self.getAttribute("af") + "'")
for key in (k for k in self.__attributes.keys() if k != "af"):
fs.append(key + "='" + self.getAttribute(key) + "'")
delim = ' '
fs[-1] = fs[-1] + '>'
else:
fs = self.fsList
delim = '|'
return ('\t'.join(x for x in returnValue) + '\t' + delim.join(x for x in fs))
def getAttribute(self, key):
if key in self.__attributes:
return self.__attributes[key]
else:
return None
def addAttribute(self, key, value):
self.__attributes[key] = value
def deleteAttribute(self, key):
del self.__attributes[key]
def get_attribute_keys(self):
return self.__attributes.keys()
def get_attribute_items(self):
return self.__attributes.items()
def expand_af(self):
af_string = self.__attributes.pop(u"af", None)
if af_string is not None:
self.__attributes.update({key: value for key, value in zip([u"root", u"lcat", u"gen", u"num", u"per", u"case", u"cm/tam", u"suffix"], af_string.split(u","))})
def abbreviate_af(self):
af_values = [self.__attributes.pop(key, None) for key in [u"root", u"lcat", u"gen", u"num", u"per", u"case", u"cm/tam", u"suffix"]]
if any(af_values):
self.__attributes["af"] = ",".join(af_values)
class ChunkNode(object):
def __init__(self, header):
self.text = []
self.header = header
self.footer = None
self.nodeList = []
self.parent = '0'
self.__attributes = {}
self.parentRelation = 'root'
self.name = None
self.type = None
self.head = None
self.isParent = False
self.errors = []
self.upper = None
self.updateDrel()
self.type = None
self.fsList = None
self.namedict = {}
def analyzeChunk(self):
[chunkType, chunkFeatDict, chunkFSList] = getChunkFeats(self.header)
self.fsList = chunkFSList
self.type = chunkType
self.updateAttributes(chunkFeatDict)
self.text = '\n'.join([line for line in self.text])
for chunk_or_node in self.nodeList:
self.namedict.update(chunk_or_node.namedict)
def updateAttributes(self, fsDict):
for attribute in fsDict.keys():
self.__attributes[attribute] = fsDict[attribute]
self.assignName()
self.updateDrel()
def assignName(self):
if 'name' in self.__attributes:
self.name = self.getAttribute('name')
else:
self.errors.append('No name for this chunk Node')
def updateDrel(self):
if 'drel' in self.__attributes:
drelList = self.getAttribute('drel').split(':')
if len(drelList) == 2:
self.parent = drelList[1]
self.parentRelation = self.getAttribute('drel').split(':')[0]
elif 'dmrel' in self.__attributes:
drelList = self.getAttribute('dmrel').split(':')
if len(drelList) == 2:
self.parent = drelList[1]
self.parentRelation = self.getAttribute('dmrel').split(':')[0]
def printValue(self):
returnString = []
for node in self.nodeList:
returnString.append(node.printValue())
return ' '.join(x for x in returnString)
def printSSFValue(self, prefix, allFeat):
returnStringList = []
returnValue = [prefix, '((', self.type]
if allFeat is False:
fs = ['<fs']
if "af" in self.__attributes.keys():
fs.append("af" + "='" + self.getAttribute("af") + "'")
for key in (k for k in self.__attributes.keys() if k != "af"):
fs.append(key + "='" + self.getAttribute(key) + "'")
delim = ' '
fs[-1] = fs[-1] + '>'
else:
fs = self.fsList
delim = '|'
# fixes SSF <fs> issue
if self.type == "SSF":
returnStringList.append('\t'.join(x for x in returnValue))
else:
returnStringList.append('\t'.join(x for x in returnValue) + '\t' + delim.join(x for x in fs))
nodePosn = 0
for node in self.nodeList:
nodePosn += 1
if isinstance(node, ChunkNode):
returnStringList.extend(node.printSSFValue(prefix + '.' + str(nodePosn), allFeat))
else:
returnStringList.append(node.printSSFValue(prefix + '.' + str(nodePosn), allFeat))
returnStringList.append('\t' + '))')
return returnStringList
def getAttribute(self, key):
if key in self.__attributes:
return self.__attributes[key]
else:
return None
def addAttribute(self, key, value):
self.__attributes[key] = value
def deleteAttribute(self, key):
del self.__attributes[key]
def get_attribute_keys(self):
return self.__attributes.keys()
def get_attribute_items(self):
return self.__attributes.items()
def expand_af(self):
af_string = self.__attributes.pop(u"af", None)
if af_string is not None:
self.__attributes.update(
{key: value for key, value in zip([u"root", u"lcat", u"gen", u"num", u"per", u"case", u"cm/tam", u"suffix"], af_string.split(u","))})
def abbreviate_af(self):
af_values = [self.__attributes.pop(key, None) for key in [u"root", u"lcat", u"gen", u"num", u"per", u"case", u"cm/tam", u"suffix"]]
if any(af_values):
self.__attributes["af"] = ",".join(af_values)
class Sentence(object):
def __init__(self, sentence, ignoreErrors=True, nesting=True, dummySentence=False):
self.ignoreErrors = ignoreErrors
self.nesting = nesting
self.sentence = None
self.sentenceID = None
self.sentenceType = None
self.length = 0
self.tree = None
self.nodeList = []
self.edges = {}
self.nodes = {}
self.tokenNodes = {}
self.rootNode = None
self.fileName = None
self.comment = None
self.probSent = False
self.errors = []
self.namedict = {}
self.dummySentence = dummySentence
if self.dummySentence is False:
self.header = sentence.group('header')
self.footer = sentence.group('footer')
self.name = sentence.group('sentenceID')
self.text = sentence.group('text')
self.analyzeSentence()
def analyzeSentence(self, ignoreErrors=False, nesting=True):
lastContext = self
for line in self.text.split('\n'):
stripLine = line.strip()
if stripLine == "":
continue
elif stripLine[0] == "<" and ignoreErrors is False:
self.errors.append('Encountered a line starting with "<"')
self.probSent = True
else:
splitLine = stripLine.split()
if len(splitLine) > 0 and splitLine[0] == '))':
currentChunkNode.footer = line + '\n'
currentChunkNode.analyzeChunk()
lastContext = currentChunkNode.upper
if type(lastContext) != Sentence:
currentChunkNode = lastContext
elif len(splitLine) > 1 and splitLine[1] == '((':
currentChunkNode = ChunkNode(line + '\n')
currentChunkNode.upper = lastContext
currentChunkNode.upper.nodeList.append(currentChunkNode)
if type(currentChunkNode.upper) != Sentence:
currentChunkNode.upper.text.append(line)
lastContext = currentChunkNode
else:
currentNode = Node(line + '\n')
lastContext.nodeList.append(currentNode)
currentNode.upper = lastContext
for chunkNode in self.nodeList:
self.namedict.update(chunkNode.namedict)
def addEdge(self, parent, child):
if parent in self.edges.iterkeys():
if child not in self.edges[parent]:
self.edges[parent].append(child)
else:
self.edges[parent] = [child]
def updateAttributes(self):
populateNodesStatus = self.populateNodes()
populateEdgesStatus = self.populateEdges()
self.sentence = self.generateSentence()
if populateEdgesStatus == 0 or populateNodesStatus == 0:
return 0
return 1
def printSSFValue(self, allFeat=True):
returnStringList = []
returnStringList.append("<Sentence id=\"" + str(self.name) + "\">")
if self.nodeList != []:
nodeList = self.nodeList
nodePosn = 0
for node in nodeList:
nodePosn += 1
returnStringList.extend(node.printSSFValue(str(nodePosn), allFeat))
returnStringList.append('</Sentence>\n')
return '\n'.join(x for x in returnStringList)
def populateNodes(self, naming='strict'):
if naming == 'strict':
for nodeElement in self.nodeList:
assert nodeElement.name is not None
self.nodes[nodeElement.name] = nodeElement
return 1
def populateEdges(self):
for node in self.nodeList:
nodeName = node.name
if node.parent == '0' or node == self.rootNode:
self.rootNode = node
continue
elif node.parent not in self.nodes.iterkeys():
return 0
assert node.parent in self.nodes.iterkeys()
self.addEdge(node.parent, node.name)
return 1
def generateSentence(self):
sentence = []
for nodeName in self.nodeList:
sentence.append(nodeName.printValue())
return ' '.join(x for x in sentence)
class Document(object):
def __init__(self, data):
self.header = None
self.footer = None
self.text = None
self.nodeList = []
self.data = data
self.namedict = {}
self.analyzeDocument()
self.upper = None
def analyzeDocument(self):
sentenceList = getSentenceIter(self.data)
for sentence in sentenceList:
tree = Sentence(sentence, ignoreErrors=True, nesting=True)
tree.upper = self
self.nodeList.append(tree)
for sentence in self.nodeList:
self.namedict.update(sentence.namedict)
def getAddressNode(address, node, level='ChunkNode'):
''' Returns the node referenced in the address string relative to the node in the second argument.
There are levels for setting the starting address-base. These are "ChunkNode", "Node" , "Sentence" , "Document" , "Relative".
The hierarchy of levels for interpretation is :
"Document" -> "Sentence" -> "ChunkNode" -> "Node"
"Relative" value starts the base address from the node which contains the address. This is also the default option.
'''
currentContext = node
if level != 'Relative':
while(currentContext.__class__.__name__ != level):
currentContext = currentContext.upper
currentContext = currentContext.upper
stepList = address.split('%')
for step in stepList:
if step == '..':
currentContext = currentContext.upper
else:
refNode = [iterNode for iterNode in currentContext.nodeList if iterNode.name == step][0]
currentContext = refNode
return refNode
def getChunkFeats(line):
lineList = line.strip().split()
chunkType = None
fsList = []
if len(lineList) >= 3:
chunkType = lineList[2]
returnFeats = {}
multipleFeatRE = r'<fs.*?>'
featRE = r'(?:\W*)(\S+)=([\'|\"])?([^ \t\n\r\f\v\'\"]*)[\'|\"](?:.*)'
fsList = re.findall(multipleFeatRE, ' '.join(lineList))
for x in lineList:
feat = re.findall(featRE, x)
if feat != []:
if len(feat) > 1:
returnErrors.append('Feature with more than one value')
continue
returnFeats[feat[0][0]] = feat[0][2]
return [chunkType, returnFeats, fsList]
def getTokenFeats(lineList):
tokenType, token = None, None
returnFeats = {}
fsList = []
if len(lineList) >= 3:
tokenType = lineList[2]
token = lineList[1]
multipleFeatRE = r'<fs.*?>'
featRE = r'(?:\W*)(\S+)=([\'|\"])?([^ \t\n\r\f\v\'\"]*)[\'|\"](?:.*)'
fsList = re.findall(multipleFeatRE, ' '.join(lineList))
for x in lineList:
feat = re.findall(featRE, x)
if feat != []:
if len(feat) > 1:
returnErrors.append('Feature with more than one value')
continue
returnFeats[feat[0][0]] = feat[0][2]
return [token, tokenType, returnFeats, fsList]
def getSentenceIter(text):
sentenceRE = r'''(?P<complete>(?P<header><Sentence id=[\'\"]?(?P<sentenceID>\d+)[\'\"]?>)(?P<text>.*?)(?P<footer></Sentence>))'''
return re.finditer(sentenceRE, text, | |
<filename>modules/pytket-qiskit/pytket/extensions/qiskit/backends/aer.py
# Copyright 2019-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections import defaultdict
from logging import warning
from typing import Dict, Iterable, List, Optional, Tuple, cast, TYPE_CHECKING, Set
import numpy as np
from pytket.backends import Backend, CircuitNotRunError, CircuitStatus, ResultHandle
from pytket.backends.backendresult import BackendResult
from pytket.backends.resulthandle import _ResultIdTuple
from pytket.circuit import BasisOrder, Circuit, Node, OpType, Qubit # type: ignore
from pytket.device import Device, QubitErrorContainer # type: ignore
from pytket.passes import ( # type: ignore
BasePass,
CliffordSimp,
CXMappingPass,
DecomposeBoxes,
FullPeepholeOptimise,
RebaseCustom,
RebaseIBM,
SequencePass,
SynthesiseIBM,
)
from pytket.pauli import QubitPauliString # type: ignore
from pytket.predicates import ( # type: ignore
ConnectivityPredicate,
GateSetPredicate,
NoClassicalControlPredicate,
NoFastFeedforwardPredicate,
NoSymbolsPredicate,
Predicate,
)
from pytket.extensions.qiskit.qiskit_convert import (
tk_to_qiskit,
_qiskit_gates_1q,
_qiskit_gates_2q,
_gate_str_2_optype,
)
from pytket.extensions.qiskit.result_convert import qiskit_result_to_backendresult
from pytket.routing import Architecture, NoiseAwarePlacement # type: ignore
from pytket.utils.operators import QubitPauliOperator
from pytket.utils.results import KwargTypes, permute_basis_indexing
from qiskit import Aer # type: ignore
from qiskit.opflow.primitive_ops import PauliSumOp # type: ignore
from qiskit.providers.aer.library import ( # type: ignore # pylint: disable=unused-import
save_expectation_value,
)
from qiskit.providers.aer.noise import NoiseModel # type: ignore
from .ibm_utils import _STATUS_MAP
if TYPE_CHECKING:
from qiskit.providers.aer import AerJob # type: ignore
from qiskit.providers.aer.backends.aerbackend import AerBackend as QiskitAerBackend # type: ignore
def _default_q_index(q: Qubit) -> int:
if q.reg_name != "q" or len(q.index) != 1:
raise ValueError("Non-default qubit register")
return int(q.index[0])
_required_gates: Set[OpType] = {OpType.CX, OpType.U1, OpType.U2, OpType.U3}
_1q_gates: Set[OpType] = set(_qiskit_gates_1q.values())
_2q_gates: Set[OpType] = set(_qiskit_gates_2q.values())
def _tk1_to_u(a: float, b: float, c: float) -> Circuit:
circ = Circuit(1)
circ.add_gate(OpType.U3, [b, a - 0.5, c + 0.5], [0])
circ.add_phase(-0.5 * (a + c))
return circ
class _AerBaseBackend(Backend):
"""Common base class for all Aer simulator backends"""
_persistent_handles = False
def __init__(self, backend_name: str):
super().__init__()
self._backend_name = backend_name
self._backend: "QiskitAerBackend" = Aer.get_backend(backend_name)
self._gate_set: Set[OpType] = {
_gate_str_2_optype[gate_str]
for gate_str in self._backend.configuration().basis_gates
if gate_str in _gate_str_2_optype
}
if not self._gate_set >= _required_gates:
raise NotImplementedError(
f"Gate set {self._gate_set} missing at least one of {_required_gates}"
)
self._noise_model: Optional[NoiseModel] = None
self._characterisation: Optional[dict] = None
self._device: Optional[Device] = None
self._memory = False
self._rebase_pass = RebaseCustom(
self._gate_set & _2q_gates,
Circuit(2).CX(0, 1),
self._gate_set & _1q_gates,
_tk1_to_u,
)
@property
def _result_id_type(self) -> _ResultIdTuple:
return (str, int)
@property
def characterisation(self) -> Optional[dict]:
return self._characterisation
@property
def device(self) -> Optional[Device]:
return self._device
def process_circuits(
self,
circuits: Iterable[Circuit],
n_shots: Optional[int] = None,
valid_check: bool = True,
**kwargs: KwargTypes,
) -> List[ResultHandle]:
circuit_list = list(circuits)
if valid_check:
self._check_all_circuits(circuit_list)
qcs = [tk_to_qiskit(tkc) for tkc in circuit_list]
if self._backend_name == "aer_simulator_statevector":
for qc in qcs:
qc.save_state()
elif self._backend_name == "aer_simulator_unitary":
for qc in qcs:
qc.save_unitary()
seed = cast(Optional[int], kwargs.get("seed"))
job = self._backend.run(
qcs,
shots=n_shots,
memory=self._memory,
seed_simulator=seed,
noise_model=self._noise_model,
)
jobid = job.job_id()
handle_list = [ResultHandle(jobid, i) for i in range(len(circuit_list))]
for handle in handle_list:
self._cache[handle] = {"job": job}
return handle_list
def cancel(self, handle: ResultHandle) -> None:
job: "AerJob" = self._cache[handle]["job"]
cancelled = job.cancel()
if not cancelled:
warning(f"Unable to cancel job {cast(str, handle[0])}")
def circuit_status(self, handle: ResultHandle) -> CircuitStatus:
self._check_handle_type(handle)
job: "AerJob" = self._cache[handle]["job"]
ibmstatus = job.status()
return CircuitStatus(_STATUS_MAP[ibmstatus], ibmstatus.value)
def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResult:
try:
return super().get_result(handle)
except CircuitNotRunError:
jobid, _ = handle
try:
job: "AerJob" = self._cache[handle]["job"]
except KeyError:
raise CircuitNotRunError(handle)
res = job.result()
backresults = qiskit_result_to_backendresult(res)
for circ_index, backres in enumerate(backresults):
self._cache[ResultHandle(jobid, circ_index)]["result"] = backres
return cast(BackendResult, self._cache[handle]["result"])
def _snapshot_expectation_value(
self,
circuit: Circuit,
hamiltonian: PauliSumOp,
valid_check: bool = True,
) -> complex:
if valid_check:
self._check_all_circuits([circuit], nomeasure_warn=False)
circ_qbs = circuit.qubits
q_indices = (_default_q_index(q) for q in circ_qbs)
if not all(q_ind == i for q_ind, i in zip(q_indices, range(len(circ_qbs)))):
raise ValueError(
"Circuit must act on default register Qubits, contiguously from 0"
+ f" onwards. Circuit qubits were: {circ_qbs}"
)
qc = tk_to_qiskit(circuit)
qc.save_expectation_value(hamiltonian, qc.qubits, "snap")
job = self._backend.run(qc)
return cast(
complex,
job.result().data(qc)["snap"],
)
def get_pauli_expectation_value(
self,
state_circuit: Circuit,
pauli: QubitPauliString,
valid_check: bool = True,
) -> complex:
"""Calculates the expectation value of the given circuit using the built-in Aer
snapshot functionality
Requires a simple circuit with default register qubits.
:param state_circuit: Circuit that generates the desired state
:math:`\\left|\\psi\\right>`.
:type state_circuit: Circuit
:param pauli: Pauli operator
:type pauli: QubitPauliString
:param valid_check: Explicitly check that the circuit satisfies all required
predicates to run on the backend. Defaults to True
:type valid_check: bool, optional
:return: :math:`\\left<\\psi | P | \\psi \\right>`
:rtype: complex
"""
if not self._supports_expectation:
raise NotImplementedError("Cannot get expectation value from this backend")
operator = PauliSumOp.from_list(
[(_qiskit_label(pauli, state_circuit.n_qubits), 1)]
)
return self._snapshot_expectation_value(state_circuit, operator, valid_check)
def get_operator_expectation_value(
self,
state_circuit: Circuit,
operator: QubitPauliOperator,
valid_check: bool = True,
) -> complex:
"""Calculates the expectation value of the given circuit with respect to the
operator using the built-in Aer snapshot functionality
Requires a simple circuit with default register qubits.
:param state_circuit: Circuit that generates the desired state
:math:`\\left|\\psi\\right>`.
:type state_circuit: Circuit
:param operator: Operator :math:`H`.
:type operator: QubitPauliOperator
:param valid_check: Explicitly check that the circuit satisfies all required
predicates to run on the backend. Defaults to True
:type valid_check: bool, optional
:return: :math:`\\left<\\psi | H | \\psi \\right>`
:rtype: complex
"""
if not self._supports_expectation:
raise NotImplementedError("Cannot get expectation value from this backend")
n_qubits = state_circuit.n_qubits
q_operator = PauliSumOp.from_list(
[
(_qiskit_label(pauli, n_qubits), coeff)
for pauli, coeff in operator._dict.items()
]
)
return self._snapshot_expectation_value(state_circuit, q_operator, valid_check)
class _AerStateBaseBackend(_AerBaseBackend):
def __init__(self, *args: str, **kwargs: KwargTypes):
self._qlists: Dict[ResultHandle, Tuple[int, ...]] = {}
super().__init__(*args)
@property
def required_predicates(self) -> List[Predicate]:
return [
NoClassicalControlPredicate(),
NoFastFeedforwardPredicate(),
GateSetPredicate(
self._gate_set.union(
{
OpType.noop,
OpType.Unitary1qBox,
}
)
),
]
def default_compilation_pass(self, optimisation_level: int = 1) -> BasePass:
assert optimisation_level in range(3)
if optimisation_level == 0:
return SequencePass([DecomposeBoxes(), RebaseIBM()])
elif optimisation_level == 1:
return SequencePass([DecomposeBoxes(), SynthesiseIBM()])
else:
return SequencePass([DecomposeBoxes(), FullPeepholeOptimise()])
def process_circuits(
self,
circuits: Iterable[Circuit],
n_shots: Optional[int] = None,
valid_check: bool = True,
**kwargs: KwargTypes,
) -> List[ResultHandle]:
handles = super().process_circuits(
circuits, n_shots=None, valid_check=valid_check, **kwargs
)
for handle, circ in zip(handles, circuits):
perm: Dict[Qubit, Qubit] = circ.implicit_qubit_permutation()
if not all(key == val for key, val in perm.items()):
self._cache[handle]["implicit_perm_qubits"] = perm
return handles
def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResult:
if handle in self._cache:
if "result" in self._cache[handle]:
return cast(BackendResult, self._cache[handle]["result"])
self._check_handle_type(handle)
try:
job: "AerJob" = self._cache[handle]["job"]
except KeyError:
raise CircuitNotRunError(handle)
res = job.result()
backresults = qiskit_result_to_backendresult(res)
for circ_index, backres in enumerate(backresults):
newhandle = ResultHandle(handle[0], circ_index)
if "implicit_perm_qubits" in self._cache[newhandle]:
permed_qbit_map: Dict[Qubit, Qubit] = self._cache[newhandle][
"implicit_perm_qubits"
]
original_indexmap = backres.q_bits.copy()
assert original_indexmap
# Simultaneous permutation of inputs and outputs of process
# Handles implicit permutation of outputs for statevector
backres.q_bits = {
permed_qbit_map[qb]: index
for qb, index in original_indexmap.items()
}
if backres._unitary is not None:
# For unitaries, the implicit permutation
# should only be applied to inputs
# The above relabelling will permute both inputs and outputs
# Correct by applying the inverse
# permutation on the inputs (i.e. a column permutation)
permutation = [0] * len(original_indexmap)
for qb, index in original_indexmap.items():
permutation[index] = original_indexmap[permed_qbit_map[qb]]
backres._unitary = permute_basis_indexing(
backres._unitary.T, tuple(permutation)
).T
self._cache[newhandle]["result"] = backres
return cast(BackendResult, self._cache[handle]["result"])
class AerBackend(_AerBaseBackend):
_supports_shots = True
_supports_counts = True
_supports_expectation = True
_expectation_allows_nonhermitian = False
def __init__(
self,
noise_model: Optional[NoiseModel] = None,
simulation_method: str = "automatic",
):
"""Backend for running simulations on the Qiskit Aer QASM simulator.
:param noise_model: Noise model to apply during simulation. Defaults to None.
:type noise_model: Optional[NoiseModel], optional
:param simulation_method: Simulation method, see
https://qiskit.org/documentation/stubs/qiskit.providers.aer.AerSimulator.html
for available values. Defaults to "automatic".
:type simulation_method: str
"""
super().__init__("aer_simulator")
if not noise_model or all(
value == [] for value in noise_model.to_dict().values()
):
self._noise_model = None
else:
self._noise_model = noise_model
self._characterisation = _process_model(noise_model, self._gate_set)
self._device = Device(
self._characterisation.get("NodeErrors", {}),
self._characterisation.get("EdgeErrors", {}),
self._characterisation.get("Architecture", Architecture([])),
)
self._memory = True
self._backend.set_options(method=simulation_method)
@property
def required_predicates(self) -> List[Predicate]:
pred_list = [
NoSymbolsPredicate(),
GateSetPredicate(
self._gate_set.union(
{
OpType.Measure,
OpType.Reset,
OpType.Barrier,
OpType.noop,
OpType.Unitary1qBox,
OpType.RangePredicate,
}
)
),
]
if self._noise_model and self._device:
pred_list.append(ConnectivityPredicate(self._device))
return pred_list
def default_compilation_pass(self, | |
import numpy as np
import scipy as sp
from scipy.sparse import linalg
import copy
import moments.Spectrum_mod
from . import Numerics
import Jackknife as jk
import LinearSystem_1D as ls1
import LinearSystem_2D as ls2
from . import Reversible
#------------------------------------------------------------------------------
# Functions for the computation of the Phi-moments for multidimensional models:
# we integrate the ode system on the Phi_n(i) to compute their evolution
# we write it (and solve it) as an approximated linear system:
# Phi_n' = Bn(N) + (1/(4N)Dn + S1n + S2n)Phi_n
# where :
# N is the total population size
# Bn(N) is the mutation source term
# 1/(4N)Dn is the drift effect matrix
# S1n is the selection matrix for h = 0.5
# S2n is the effect of h != 0.5
#------------------------------------------------------------------------------
#-----------------------------------
# functions to compute the matrices-
#-----------------------------------
# Mutations
def _calcB(dims, u):
# u is a list of mutation rates in each population
# allows for different mutation rates in different pops
B = np.zeros(dims)
for k in range(len(dims)):
ind = np.zeros(len(dims), dtype='int')
ind[k] = int(1)
tp = tuple(ind)
B[tp] = (dims[k] - 1) * u[k]
return B
# Finite genome mutation model
def _calcB_FB(dims, theta_fd, theta_bd):
"""
dims : List containing the pop sizes
u: scalar forward mutation rate
v: scalar backward mutation rate
Returns mutation matrix for finite genome model
"""
if len(dims) == 1:
return ls1.calcB_FB(dims[0], theta_fd, theta_bd)
elif len(dims) == 2: # return list of mutation matrices
return [ls2.calcB_FB1(dims, theta_fd, theta_bd), ls2.calcB_FB2(dims, theta_fd, theta_bd)]
elif len(dims) == 3:
return Reversible.calc_FB_3pop(dims, theta_fd, theta_bd)
elif len(dims) == 4:
return Reversible.calc_FB_4pop(dims, theta_fd, theta_bd)
elif len(dims) == 5:
return Reversible.calc_FB_5pop(dims, theta_fd, theta_bd)
# Drift
def _calcD(dims):
"""
dims : List containing the pop sizes
Returns a list of drift matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcD1(np.array([dims[i], dims[j]])),
ls2.calcD2(np.array([dims[i], dims[j]]))])
return res
def _buildD(vd, dims, N):
"""
Builds the effective drift matrices by multiplying by the 1/4N coeff
vd : List containing the drift matrices
dims : List containing the pop sizes
N : List containing the effective pop sizes for each pop
Returns a list of effective drift matrices for each pair of pops
"""
if (len(dims) == 1): return [1.0 / 4 / N[0] * vd[0][0]]
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(1.0/(4*N[i])*vd[ctr][0] + 1.0/(4*N[j])*vd[ctr][1])
ctr += 1
return res
# Selection 1
def _calcS(dims, ljk):
"""
dims : List containing the pop sizes
ljk : List containing the 1 jump jackknife matrices for each pair of pop
Returns a list of selection matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcS_1(np.array([dims[i], dims[j]]), ljk[i]),
ls2.calcS_2(np.array([dims[i], dims[j]]), ljk[j])])
return res
def _buildS(vs, dims, s, h):
"""
Builds the effective selection matrices by multiplying by the correct coeff
vs : List containing the selection matrices
dims : List containing the pop sizes
s : List containing the selection coefficients
h : List containing the dominance coefficients
Returns a list of effective selection matrices for each pair of pops
"""
if (len(dims) == 1): return [vs[0][0]]
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(s[i]*h[i]*vs[ctr][0] + s[j]*h[j]*vs[ctr][1])
ctr += 1
return res
# Selection 2
def _calcS2(dims, ljk):
"""
dims : List containing the pop sizes
ljk : List containing the 2 jumps jackknife matrices for each pair of pop
Returns a list of selection matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcS2_1(np.array([dims[i], dims[j]]), ljk[i]),
ls2.calcS2_2(np.array([dims[i], dims[j]]), ljk[j])])
return res
def _buildS2(vs, dims, s, h):
"""
Builds the effective selection matrices (part due to dominance)
by multiplying by the correct coeff
vs : List containing the selection matrices
dims : List containing the pop sizes
s : List containing the selection coefficients
h : List containing the dominance coefficients
Returns a list of effective selection matrices for each pair of pops
"""
if (len(dims) == 1): return [vs[0][0]]
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(s[i]*(1-2.0*h[i])*vs[ctr][0] + s[j]*(1-2.0*h[j])*vs[ctr][1])
ctr += 1
return res
# Migrations
def _calcM(dims, ljk):
"""
dims : List containing the pop sizes
ljk : List containing the 1 jump jackknife matrices for each pair of pop
Returns a list of migration matrices for each pair of pops
"""
res = []
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append([ls2.calcM_1(np.array([dims[i], dims[j]]), ljk[j]),
ls2.calcM_2(np.array([dims[i], dims[j]]), ljk[i])])
return res
def _buildM(vm, dims, m):
"""
Builds the effective migration matrices by multiplying by the migration coeff
vm : List containing the migration matrices
dims : List containing the pop sizes
m : matrix containing the migration coefficients
Returns a list of effective migration matrices for each pair of pops
"""
res = []
ctr = 0
for i in range(len(dims)):
for j in range(i + 1, len(dims)):
res.append(m[i, j]*vm[ctr][0] + m[j, i]*vm[ctr][1])
ctr += 1
return res
#----------------------------------
# updates for the time integration-
#----------------------------------
# we solve a system like PX = QY
# step 1 functions correspond to the QY computation
# and step 2 to the resolution of PX = Y'
# 2D
#step 1
def _ud1_2pop_1(sfs, Q, dims):
sfs = Q[0].dot(sfs.reshape(dims[0] * dims[1])).reshape(dims)
return sfs
# step 2
def _ud2_2pop_1(sfs, slv, dims):
sfs = (slv[0](sfs.reshape(dims[0] * dims[1]))).reshape(dims)
return sfs
# for 3D, 4D and 5D cases, each couple of directions are coded separately to simplify the permutations...
#------------------------------
# 3D
# step 1
def _ud1_3pop_1(sfs, Q, dims):
for i in range(int(dims[2])):
sfs[:, :, i] = Q[0].dot(sfs[:, :, i].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud1_3pop_2(sfs, Q, dims):
for i in range(int(dims[1])):
sfs[:, i, :] = Q[1].dot(sfs[:, i, :].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud1_3pop_3(sfs, Q, dims):
for i in range(int(dims[0])):
sfs[i, :, :] = Q[2].dot(sfs[i, :, :].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
# step 2
def _ud2_3pop_1(sfs, slv, dims):
for i in range(int(dims[2])):
sfs[:, :, i] = slv[0](sfs[:, :, i].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud2_3pop_2(sfs, slv, dims):
for i in range(int(dims[1])):
sfs[:, i, :] = slv[1](sfs[:, i, :].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud2_3pop_3(sfs, slv, dims):
for i in range(int(dims[0])):
sfs[i, :, :] = slv[2](sfs[i, :, :].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
#------------------------------
# 4D
# step 1
def _ud1_4pop_1(sfs, Q, dims):
for i in range(int(dims[2])):
for j in range(int(dims[3])):
sfs[:, :, i, j] = Q[0].dot(sfs[:, :, i, j].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud1_4pop_2(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[3])):
sfs[:, i, :, j] = Q[1].dot(sfs[:, i, :, j].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud1_4pop_3(sfs, Q, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
sfs[:, i, j, :] = Q[2].dot(sfs[:, i, j, :].reshape(dims[0] * dims[3])).reshape(dims[0], dims[3])
return sfs
def _ud1_4pop_4(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[3])):
sfs[i, :, :, j] = Q[3].dot(sfs[i, :, :, j].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
def _ud1_4pop_5(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[2])):
sfs[i, :, j, :] = Q[4].dot(sfs[i, :, j, :].reshape(dims[1] * dims[3])).reshape(dims[1], dims[3])
return sfs
def _ud1_4pop_6(sfs, Q, dims):
for i in range(int(dims[0])):
for j in range(int(dims[1])):
sfs[i, j, :, :] = Q[5].dot(sfs[i, j, :, :].reshape(dims[2] * dims[3])).reshape(dims[2], dims[3])
return sfs
# step 2
def _ud2_4pop_1(sfs, slv, dims):
for i in range(int(dims[2])):
for j in range(int(dims[3])):
sfs[:, :, i, j] = slv[0](sfs[:, :, i, j].reshape(dims[0] * dims[1])).reshape(dims[0], dims[1])
return sfs
def _ud2_4pop_2(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[3])):
sfs[:, i, :, j] = slv[1](sfs[:, i, :, j].reshape(dims[0] * dims[2])).reshape(dims[0], dims[2])
return sfs
def _ud2_4pop_3(sfs, slv, dims):
for i in range(int(dims[1])):
for j in range(int(dims[2])):
sfs[:, i, j, :] = slv[2](sfs[:, i, j, :].reshape(dims[0] * dims[3])).reshape(dims[0], dims[3])
return sfs
def _ud2_4pop_4(sfs, slv, dims):
for i in range(int(dims[0])):
for j in range(int(dims[3])):
sfs[i, :, :, j] = slv[3](sfs[i, :, :, j].reshape(dims[1] * dims[2])).reshape(dims[1], dims[2])
return sfs
def _ud2_4pop_5(sfs, slv, dims):
for | |
'Nangus'},
'61269448':{'en': 'Burra'},
'6126945':{'en': 'Coolac'},
'61269456':{'en': 'Tooma'},
'61269457':{'en': 'Tumbarumba'},
'61269458':{'en': 'Tumorrama'},
'61269459':{'en': 'Tumut'},
'612694600':{'en': 'Wallendbeen'},
'612694601':{'en': 'Wallendbeen'},
'612694602':{'en': 'Wallendbeen'},
'612694603':{'en': 'Wallendbeen'},
'612694606':{'en': 'Wallendbeen'},
'61269461':{'en': 'Adelong'},
'61269462':{'en': 'Adelong'},
'612694630':{'en': 'Yaven Creek'},
'612694631':{'en': 'Yaven Creek'},
'612694632':{'en': 'Yaven Creek'},
'612694633':{'en': 'Yaven Creek'},
'612694636':{'en': 'Yaven Creek'},
'61269464':{'en': 'Adelong'},
'61269465':{'en': 'Yaven Creek'},
'61269466':{'en': 'Tumorrama'},
'61269467':{'en': 'Adelong'},
'61269468':{'en': 'Adelong'},
'61269469':{'en': 'Adelong'},
'6126947':{'en': 'Tumut'},
'6126948':{'en': 'Tumbarumba'},
'61269484':{'en': 'Tooma'},
'61269485':{'en': 'Mannus'},
'61269486':{'en': 'Carabost'},
'61269490':{'en': 'Batlow'},
'61269491':{'en': 'Batlow'},
'61269492':{'en': 'Batlow'},
'61269493':{'en': 'Batlow'},
'61269494':{'en': 'Talbingo'},
'61269495':{'en': 'Talbingo'},
'61269496':{'en': 'Barellan'},
'61269497':{'en': 'Black Stump'},
'61269498':{'en': 'Bunda'},
'61269499':{'en': 'Darlington Point'},
'6126950':{'en': 'Narrandera'},
'61269501':{'en': 'Egansford'},
'61269502':{'en': 'Coleambally'},
'61269503':{'en': '<NAME>'},
'6126951':{'en': 'Narrandera'},
'61269511':{'en': 'Leeton'},
'61269512':{'en': 'Leeton'},
'61269513':{'en': 'Leeton'},
'61269514':{'en': 'Stanbridge'},
'61269520':{'en': 'Goolgowi'},
'61269521':{'en': 'Griffith'},
'61269522':{'en': 'Gunbar'},
'61269523':{'en': 'Hillston'},
'61269524':{'en': 'Melbergen'},
'61269525':{'en': 'Merriwagga'},
'61269526':{'en': '<NAME>'},
'61269527':{'en': 'Wallanthery'},
'61269528':{'en': 'Warrawidgee'},
'61269529':{'en': '<NAME>'},
'6126953':{'en': 'Leeton'},
'61269540':{'en': 'Egansford'},
'61269541':{'en': 'Egansford'},
'61269542':{'en': 'Egansford'},
'61269543':{'en': 'Egansford'},
'61269544':{'en': 'Coleambally'},
'61269545':{'en': 'Egansford'},
'61269546':{'en': 'Coleambally'},
'61269547':{'en': 'Coleambally'},
'61269548':{'en': 'Gala Vale'},
'61269549':{'en': 'Coleambally'},
'6126955':{'en': 'Leeton'},
'61269550':{'en': 'Stanbridge'},
'61269551':{'en': 'Stanbridge'},
'61269552':{'en': 'Stanbridge'},
'61269553':{'en': 'Landervale'},
'6126956':{'en': 'Landervale'},
'61269560':{'en': 'Bundure'},
'61269561':{'en': 'Bundure'},
'61269562':{'en': 'Grong Grong'},
'61269570':{'en': 'The Rock'},
'61269571':{'en': 'Urana'},
'61269572':{'en': 'Wagga Wagga'},
'61269573':{'en': 'Wantabadgery'},
'61269574':{'en': 'Winchendon Vale'},
'61269575':{'en': 'Alleena'},
'61269576':{'en': 'Burcher'},
'61269577':{'en': 'Kikoira'},
'61269578':{'en': 'Marsden'},
'61269579':{'en': 'Tallimba'},
'61269580':{'en': 'Narrandera'},
'61269581':{'en': 'Narrandera'},
'61269582':{'en': 'Narrandera'},
'61269583':{'en': 'Narrandera'},
'61269584':{'en': 'Bundure'},
'61269585':{'en': '<NAME>'},
'61269586':{'en': 'Landervale'},
'61269587':{'en': 'Morundah'},
'61269588':{'en': 'Sandigo'},
'61269589':{'en': 'Narrandera'},
'6126959':{'en': 'Narrandera'},
'61269597':{'en': 'Morundah'},
'61269598':{'en': 'Sandigo'},
'61269600':{'en': '<NAME>'},
'61269601':{'en': 'Griffith'},
'61269602':{'en': 'Griffith'},
'61269603':{'en': 'Griffith'},
'61269604':{'en': 'Yenda'},
'61269605':{'en': '<NAME>'},
'61269606':{'en': 'Goolgowi'},
'61269607':{'en': 'Gunbar'},
'61269608':{'en': '<NAME>'},
'61269609':{'en': 'Merriwagga'},
'61269610':{'en': 'Griffith'},
'61269611':{'en': 'Bunda'},
'61269612':{'en': 'Warrawidgee'},
'61269613':{'en': 'Yenda'},
'61269614':{'en': 'Melbergen'},
'61269615':{'en': '<NAME>'},
'61269616':{'en': 'Wallanthery'},
'61269617':{'en': 'Hillston'},
'61269618':{'en': 'Griffith'},
'61269619':{'en': 'Griffith'},
'6126962':{'en': 'Griffith'},
'6126963':{'en': 'Griffith'},
'61269639':{'en': 'Barellan'},
'6126964':{'en': 'Griffith'},
'6126965':{'en': 'Goolgowi'},
'61269652':{'en': 'Gunbar'},
'61269653':{'en': 'Melbergen'},
'61269654':{'en': 'Merriwagga'},
'61269660':{'en': 'Griffith'},
'61269661':{'en': 'Rankins Springs'},
'61269662':{'en': 'Griffith'},
'61269663':{'en': 'Rankins Springs'},
'61269664':{'en': 'Rankins Springs'},
'61269665':{'en': 'Rankins Springs'},
'61269666':{'en': 'Rankins Springs'},
'61269667':{'en': 'Griffith'},
'61269668':{'en': 'Griffith'},
'61269669':{'en': 'Griffith'},
'61269670':{'en': 'Hillston'},
'61269671':{'en': 'Hillston'},
'61269672':{'en': 'Hillston'},
'61269673':{'en': 'Hillston'},
'61269674':{'en': 'Wallanthery'},
'61269675':{'en': 'Bunda'},
'61269676':{'en': 'Black Stump'},
'61269677':{'en': 'Black Stump'},
'61269678':{'en': 'Wee Elwah'},
'61269679':{'en': 'Wee Elwah'},
'61269680':{'en': 'Yenda'},
'61269681':{'en': 'Yenda'},
'61269682':{'en': 'Yenda'},
'61269683':{'en': 'Yenda'},
'61269684':{'en': '<NAME>'},
'61269685':{'en': '<NAME>'},
'61269686':{'en': 'Warrawidgee'},
'61269687':{'en': 'Warrawidgee'},
'61269688':{'en': 'Goolgowi'},
'61269689':{'en': 'Goolgowi'},
'6126969':{'en': 'Griffith'},
'61269700':{'en': 'Warralonga'},
'61269701':{'en': 'West Wyalong'},
'61269702':{'en': 'West Wyalong'},
'61269703':{'en': 'Kikoira'},
'61269704':{'en': 'West Wyalong'},
'61269705':{'en': 'Burcher'},
'61269706':{'en': 'Marsden'},
'61269707':{'en': 'Tallimba'},
'61269708':{'en': 'Tullibigeal'},
'61269709':{'en': 'Ungarie'},
'6126971':{'en': 'Wagga Wagga'},
'61269720':{'en': 'West Wyalong'},
'61269721':{'en': 'West Wyalong'},
'61269722':{'en': 'West Wyalong'},
'61269723':{'en': 'West Wyalong'},
'61269724':{'en': 'West Wyalong'},
'61269725':{'en': 'Burcher'},
'61269726':{'en': 'Kikoira'},
'61269727':{'en': 'Marsden'},
'61269728':{'en': 'Marsden'},
'61269729':{'en': 'Tullibigeal'},
'61269730':{'en': 'Temora'},
'61269731':{'en': 'Temora'},
'61269732':{'en': 'Ariah Park'},
'61269733':{'en': 'Bambilla'},
'61269734':{'en': 'Booroorban'},
'61269735':{'en': 'Carrathool'},
'61269736':{'en': 'Hay'},
'61269737':{'en': 'Temora'},
'61269738':{'en': 'Springdale'},
'61269739':{'en': 'Narraburra'},
'61269740':{'en': 'Ariah Park'},
'61269741':{'en': 'Ariah Park'},
'61269742':{'en': 'Ivanhoe'},
'61269743':{'en': 'Ariah Park'},
'61269744':{'en': 'Lachlan'},
'61269745':{'en': 'Maude'},
'61269746':{'en': 'Bundure'},
'61269747':{'en': 'Coleambally'},
'61269748':{'en': 'Egansford'},
'61269749':{'en': '<NAME>'},
'61269750':{'en': 'Grong Grong'},
'61269751':{'en': 'Landervale'},
'61269752':{'en': 'Alleena'},
'61269753':{'en': 'Burcher'},
'61269754':{'en': 'Burcher'},
'61269755':{'en': 'Warralonga'},
'61269756':{'en': 'Weethalle'},
'61269757':{'en': 'Tallimba'},
'61269758':{'en': 'Ungarie'},
'61269759':{'en': 'Ungarie'},
'61269760':{'en': 'Leeton'},
'61269761':{'en': 'Morundah'},
'61269762':{'en': 'Barmedman'},
'61269763':{'en': 'Barmedman'},
'61269764':{'en': 'Barmedman East'},
'61269765':{'en': 'Narrandera'},
'61269766':{'en': 'Sandigo'},
'61269767':{'en': 'Stanbridge'},
'61269768':{'en': 'Ardlethan'},
'61269769':{'en': 'Ariah Park'},
'6126977':{'en': 'Temora'},
'61269780':{'en': 'Temora'},
'61269781':{'en': 'Temora'},
'61269782':{'en': 'Ardlethan'},
'61269783':{'en': 'Ardlethan'},
'61269784':{'en': 'Barmedman'},
'61269785':{'en': 'Barmedman East'},
'61269786':{'en': 'Narraburra'},
'61269787':{'en': 'Springdale'},
'61269788':{'en': 'Temora'},
'61269789':{'en': 'Bidgeemia'},
'6126979':{'en': 'West Wyalong'},
'61269791':{'en': 'Weethalle'},
'61269800':{'en': 'Temora'},
'61269801':{'en': 'Temora'},
'61269802':{'en': 'Temora'},
'61269803':{'en': 'Ardlethan'},
'61269804':{'en': 'Boree Creek'},
'61269805':{'en': 'Coolamon'},
'61269806':{'en': 'Cowabbie'},
'61269807':{'en': 'Narraburra'},
'61269808':{'en': 'Springdale'},
'61269809':{'en': 'Temora'},
'61269810':{'en': 'Talbingo'},
'61269811':{'en': 'Tumut'},
'61269812':{'en': 'Tumut'},
'61269813':{'en': 'Tumut'},
'61269814':{'en': 'Tumut'},
'61269815':{'en': 'Gundagai'},
'61269816':{'en': 'Gundagai'},
'61269817':{'en': 'Gundagai'},
'61269818':{'en': 'Cootamundra'},
'61269819':{'en': 'Cootamundra'},
'61269820':{'en': 'Currawarna'},
'61269821':{'en': 'Bethungra'},
'61269822':{'en': 'Mannus'},
'61269823':{'en': 'Nangus'},
'61269824':{'en': 'Nangus'},
'61269825':{'en': 'Stockinbingal'},
'61269826':{'en': 'Tooma'},
'61269827':{'en': 'Tumbarumba'},
'61269828':{'en': 'Tumut'},
'61269829':{'en': 'Wallendbeen'},
'61269830':{'en': 'Wantabadgery'},
'61269831':{'en': 'Griffith'},
'61269832':{'en': 'Griffith'},
'61269833':{'en': 'Griffith'},
'61269834':{'en': 'Wagga Wagga'},
'61269835':{'en': 'Tullibigeal'},
'61269836':{'en': 'Ungarie'},
'61269837':{'en': 'Warralonga'},
'61269838':{'en': 'Weethalle'},
'61269839':{'en': 'West Wyalong'},
'61269840':{'en': 'Adelong'},
'61269841':{'en': 'Batlow'},
'61269842':{'en': 'Bethungra'},
'61269843':{'en': 'Burra'},
'61269844':{'en': 'Carabost'},
'61269845':{'en': 'Coolac'},
'61269846':{'en': 'Cootamundra'},
'61269847':{'en': 'Gundagai'},
'61269848':{'en': 'Mannus'},
'61269849':{'en': 'Nangus'},
'61269850':{'en': 'Burcher'},
'61269851':{'en': 'Kikoira'},
'61269852':{'en': 'Marsden'},
'61269853':{'en': 'Tallimba'},
'61269854':{'en': 'Tullibigeal'},
'61269855':{'en': 'Ungarie'},
'61269856':{'en': 'Warralonga'},
'61269857':{'en': 'Weethalle'},
'61269858':{'en': 'West Wyalong'},
'61269859':{'en': 'Stockinbingal'},
'61269860':{'en': 'Mangoplah'},
'61269861':{'en': 'Milbrulong'},
'61269862':{'en': 'Rannock'},
'61269863':{'en': 'Tarcutta'},
'61269864':{'en': 'The Rock'},
'61269865':{'en': 'Urana'},
'61269866':{'en': 'Wagga Wagga'},
'61269867':{'en': 'Wantabadgery'},
'61269868':{'en': '<NAME>'},
'61269869':{'en': 'Alleena'},
'61269870':{'en': 'Cowabbie'},
'61269871':{'en': 'Currawarna'},
'61269872':{'en': 'Galore'},
'61269873':{'en': 'Ganmain'},
'61269874':{'en': 'Henty'},
'61269875':{'en': 'Humula'},
'61269876':{'en': 'Junee'},
'61269877':{'en': '<NAME>'},
'61269878':{'en': 'Kyeamba'},
'61269879':{'en': 'Lockhart'},
'61269880':{'en': 'Ardlethan'},
'61269881':{'en': '<NAME>'},
'61269882':{'en': 'Barmedman'},
'61269883':{'en': 'Barmedman East'},
'61269884':{'en': 'Narraburra'},
'61269885':{'en': 'Springdale'},
'61269886':{'en': 'Temora'},
'61269887':{'en': 'Bidgeemia'},
'61269888':{'en': 'Boree Creek'},
'61269889':{'en': 'Coolamon'},
'61269890':{'en': 'Burcher'},
'61269891':{'en': 'Ungarie'},
'61269892':{'en': 'Galore'},
'61269893':{'en': 'Ganmain'},
'61269894':{'en': 'Henty'},
'61269895':{'en': 'Leeton'},
'61269896':{'en': 'Morundah'},
'61269897':{'en': 'Narrandera'},
'61269898':{'en': 'Sandigo'},
'61269899':{'en': 'Stanbridge'},
'61269900':{'en': 'Hay'},
'61269901':{'en': 'Hay'},
'61269902':{'en': 'Ivanhoe'},
'61269903':{'en': 'Booroorban'},
'61269904':{'en': 'Humula'},
'61269905':{'en': 'Carrathool'},
'61269906':{'en': 'Maude'},
'61269907':{'en': 'Lachlan'},
'61269908':{'en': 'Hay'},
'61269909':{'en': 'Hay'},
'61269910':{'en': 'Junee'},
'61269911':{'en': 'Bunda'},
'61269912':{'en': '<NAME>'},
'61269913':{'en': 'Griffith'},
'61269914':{'en': 'Griffith'},
'61269915':{'en': 'Melbergen'},
'61269916':{'en': 'Merriwagga'},
'61269917':{'en': '<NAME>'},
'61269918':{'en': 'Wallanthery'},
'61269919':{'en': 'Yenda'},
'61269920':{'en': 'Hay'},
'61269921':{'en': 'Ivanhoe'},
'61269922':{'en': 'Lachlan'},
'61269923':{'en': 'Maude'},
'61269924':{'en': 'Bundure'},
'61269925':{'en': 'Coleambally'},
'61269926':{'en': 'Egansford'},
'61269927':{'en': '<NAME>'},
'61269928':{'en': 'Grong Grong'},
'61269929':{'en': 'Landervale'},
'61269930':{'en': 'Booroorban'},
'61269931':{'en': 'Hay'},
'61269932':{'en': 'Hay'},
'61269933':{'en': 'Hay'},
'61269934':{'en': 'Hay'},
'61269935':{'en': 'Carrathool'},
'61269936':{'en': 'Maude'},
'61269937':{'en': 'Lachlan'},
'61269938':{'en': 'Lachlan'},
'61269939':{'en': 'Lachlan'},
'61269940':{'en': 'Melbergen'},
'61269941':{'en': 'Merriwagga'},
'61269942':{'en': '<NAME>'},
'61269943':{'en': 'Wallanthery'},
'61269944':{'en': 'Warrawidgee'},
'61269945':{'en': '<NAME>'},
'61269946':{'en': 'Yenda'},
'61269947':{'en': 'Bambilla'},
'61269948':{'en': 'Booroorban'},
'61269949':{'en': 'Carrathool'},
'61269950':{'en': 'Ivanhoe'},
'61269951':{'en': 'Ivanhoe'},
'61269952':{'en': 'Ivanhoe'},
'61269953':{'en': 'Bambilla'},
'61269954':{'en': 'Bambilla'},
'61269955':{'en': '<NAME>'},
'61269956':{'en': 'Kyeamba'},
'61269957':{'en': 'Lockhart'},
'61269958':{'en': 'Mangoplah'},
'61269959':{'en': 'Milbrulong'},
'6126996':{'en': '<NAME>'},
'61269970':{'en': 'Carrathool'},
'61269971':{'en': 'Maude'},
'61269972':{'en': 'Hay'},
'61269973':{'en': 'Hay'},
'61269974':{'en': 'Bunda'},
'61269975':{'en': '<NAME>'},
'61269976':{'en': 'Goolgowi'},
'61269977':{'en': 'Griffith'},
'61269978':{'en': 'Gunbar'},
'61269979':{'en': 'Hillston'},
'61269980':{'en': 'Stockinbingal'},
'61269981':{'en': 'Talbingo'},
'61269982':{'en': 'Tooma'},
'61269983':{'en': 'Tumbarumba'},
'61269984':{'en': 'Tumorrama'},
'61269985':{'en': 'Tumut'},
'61269986':{'en': 'Wallendbeen'},
'61269987':{'en': 'Yaven Creek'},
'61269988':{'en': 'Barellan'},
'61269989':{'en': 'Black Stump'},
'61269990':{'en': 'Adelong'},
'61269991':{'en': 'Batlow'},
'61269992':{'en': 'Bethungra'},
'61269993':{'en': 'Burra'},
'61269994':{'en': 'Carabost'},
'61269995':{'en': 'Coolac'},
'61269996':{'en': 'Cootamundra'},
'61269997':{'en': 'Gundagai'},
'61269998':{'en': 'Mannus'},
'61269999':{'en': 'Nangus'},
'6127200':{'en': 'Sydney'},
'6127201':{'en': 'Sydney'},
'61275000':{'en': 'Engadine'},
'61275001':{'en': 'Sutherland'},
'61275002':{'en': 'Engadine'},
'61275003':{'en': 'Sutherland'},
'61275004':{'en': 'Engadine'},
'61275005':{'en': 'Sutherland'},
'61275006':{'en': 'Engadine'},
'61275007':{'en': 'Sutherland'},
'61275008':{'en': 'Engadine'},
'61275009':{'en': 'Sutherland'},
'61275010':{'en': 'Engadine'},
'61275011':{'en': 'Sutherland'},
'61275012':{'en': 'Engadine'},
'61275013':{'en': 'Sutherland'},
'61275014':{'en': 'Engadine'},
'61275015':{'en': 'Sutherland'},
'6127800':{'en': 'Parramatta'},
'6127801':{'en': 'Parramatta'},
'6127802':{'en': 'Parramatta'},
'6127803':{'en': 'Parramatta'},
'61278040':{'en': 'Parramatta'},
'61278041':{'en': 'Parramatta'},
'61278042':{'en': 'Parramatta'},
'61278043':{'en': 'Parramatta'},
'61278044':{'en': 'Parramatta'},
'61278045':{'en': 'Parramatta'},
'61278046':{'en': 'Parramatta'},
'6127805':{'en': 'Parramatta'},
'6127806':{'en': 'Parramatta'},
'6127807':{'en': 'Parramatta'},
'61278082':{'en': 'Parramatta'},
'61278083':{'en': 'Parramatta'},
'61278084':{'en': 'Parramatta'},
'61278085':{'en': 'Parramatta'},
'61278086':{'en': 'Parramatta'},
'6127809':{'en': 'Parramatta'},
'6127810':{'en': 'Parramatta'},
'6127814':{'en': 'Parramatta'},
'6127900':{'en': 'Sydney'},
'6127902':{'en': 'Sydney'},
'6127903':{'en': 'Sydney'},
'6127909':{'en': 'Sydney'},
'6127922':{'en': 'Sydney'},
'6127923':{'en': 'Sydney'},
'6127924':{'en': 'Sydney'},
'6127966':{'en': 'Sydney'},
'61279888':{'en': 'Sydney'},
'612800':{'en': 'Sydney'},
'612801':{'en': 'Sydney'},
'612802':{'en': 'Sydney'},
'612803':{'en': 'Sydney'},
'6128040':{'en': 'Sydney'},
'6128041':{'en': 'Sydney'},
'6128042':{'en': 'Sydney'},
'61280430':{'en': 'Sydney'},
'61280431':{'en': 'Sydney'},
'61280432':{'en': 'Sydney'},
'61280433':{'en': 'Sydney'},
'61280445':{'en': 'Sydney'},
'61280446':{'en': 'Sydney'},
'61280447':{'en': 'Sydney'},
'61280448':{'en': 'Sydney'},
'61280449':{'en': 'Sydney'},
'6128045':{'en': 'Sydney'},
'6128046':{'en': 'Sydney'},
'6128047':{'en': 'Sydney'},
'6128048':{'en': 'Sydney'},
'6128049':{'en': 'Sydney'},
'612805':{'en': 'Sydney'},
'612806':{'en': 'Sydney'},
'612807':{'en': 'Sydney'},
'612808':{'en': 'Sydney'},
'6128090':{'en': 'Sydney'},
'6128094':{'en': 'Sydney'},
'6128100':{'en': 'Bankstown'},
'6128101':{'en': 'Liverpool'},
'6128102':{'en': 'Bankstown'},
'6128103':{'en': 'Liverpool'},
'61281030':{'en': 'Bankstown'},
'61281031':{'en': 'Bankstown'},
'61281032':{'en': 'Bankstown'},
'61281034':{'en': 'Sydney'},
'61281040':{'en': 'Liverpool'},
'61281041':{'en': 'Liverpool'},
'61281042':{'en': 'Sydney'},
'61281043':{'en': 'Liverpool'},
'61281044':{'en': 'Bankstown'},
'61281045':{'en': 'Bankstown'},
'61281046':{'en': 'Liverpool'},
'61281047':{'en': 'Bankstown'},
'61281048':{'en': 'Liverpool'},
'61281049':{'en': 'Bankstown'},
'6128105':{'en': 'Liverpool'},
'61281051':{'en': 'Bankstown'},
'61281053':{'en': 'Bankstown'},
'61281055':{'en': 'Bankstown'},
'61281057':{'en': 'Bankstown'},
'61281060':{'en': 'Bankstown'},
'61281061':{'en': 'Liverpool'},
'6128107':{'en': 'Liverpool'},
'6128113':{'en': 'Sydney'},
'6128114':{'en': 'Sydney'},
'6128115':{'en': 'Sydney'},
'6128116':{'en': 'Sydney'},
'6128117':{'en': 'Sydney'},
'61281180':{'en': 'Bankstown'},
'61281181':{'en': 'Liverpool'},
'61281182':{'en': 'Liverpool'},
'61281183':{'en': 'Liverpool'},
'61281184':{'en': 'Liverpool'},
'61281185':{'en': 'Liverpool'},
'61281186':{'en': 'Liverpool'},
'61281187':{'en': 'Liverpool'},
'61281188':{'en': 'Liverpool'},
'6128119':{'en': 'Liverpool'},
'6128120':{'en': 'Sydney'},
'6128121':{'en': 'Sydney'},
'6128122':{'en': 'Sydney'},
'6128123':{'en': 'Sydney'},
'6128188':{'en': 'Sydney'},
'6128189':{'en': 'Liverpool'},
'6128197':{'en': 'Sydney'},
'6128198':{'en': 'Sydney'},
'6128199':{'en': 'Sydney'},
'612820':{'en': 'Sydney'},
'612821':{'en': 'Sydney'},
'612822':{'en': 'Sydney'},
'612823':{'en': 'Sydney'},
'612824':{'en': 'Sydney'},
'612825':{'en': 'Sydney'},
'6128260':{'en': 'Sydney'},
'61282611':{'en': 'Sydney'},
'61282614':{'en': 'Sydney'},
'61282615':{'en': 'Sydney'},
'61282616':{'en': 'Sydney'},
'61282617':{'en': 'Sydney'},
'61282618':{'en': 'Sydney'},
'61282619':{'en': 'Sydney'},
'6128262':{'en': 'Sydney'},
'6128263':{'en': 'Sydney'},
'6128264':{'en': 'Sydney'},
'6128265':{'en': 'Sydney'},
'6128266':{'en': 'Sydney'},
'6128267':{'en': 'Sydney'},
'6128268':{'en': 'Sydney'},
'61282690':{'en': 'Sydney'},
'612827':{'en': 'Sydney'},
'612828':{'en': 'Sydney'},
'612829':{'en': 'Sydney'},
'612830':{'en': 'Sydney'},
'612831':{'en': 'Sydney'},
'612832':{'en': 'Sydney'},
'612833':{'en': 'Sydney'},
'612834':{'en': 'Sydney'},
'6128350':{'en': 'Sydney'},
'6128353':{'en': 'Sydney'},
'6128354':{'en': 'Sydney'},
'6128355':{'en': 'Sydney'},
'6128356':{'en': 'Sydney'},
'6128362':{'en': 'Sydney'},
'6128363':{'en': 'Sydney'},
'6128364':{'en': 'Sydney'},
'6128372':{'en': 'Sydney'},
'6128373':{'en': 'Sydney'},
'6128374':{'en': 'Sydney'},
'6128375':{'en': 'Sydney'},
'6128376':{'en': 'Sydney'},
'6128377':{'en': 'Sydney'},
'6128378':{'en': 'Sydney'},
'6128379':{'en': 'Sydney'},
'6128380':{'en': 'Sydney'},
'6128381':{'en': 'Sydney'},
'6128382':{'en': 'Sydney'},
'6128383':{'en': 'Sydney'},
'6128384':{'en': 'Sydney'},
'6128385':{'en': 'Sydney'},
'6128386':{'en': 'Sydney'},
'6128387':{'en': 'Sydney'},
'6128388':{'en': 'Sydney'},
'6128394':{'en': 'Sydney'},
'6128396':{'en': 'Sydney'},
'6128397':{'en': 'Sydney'},
'6128398':{'en': 'Sydney'},
'6128399':{'en': 'Sydney'},
'6128400':{'en': 'Avalon Beach'},
'6128401':{'en': '<NAME>'},
'6128402':{'en': | |
de Caxias",
"es_ES": "Duque de Caxias",
"fr_FR": "Duque de Caxias",
"it_IT": "Duque de Caxias",
"ja_JP": "ドゥケ・デ・カシアス",
"ko_KR": "두키지카시아스",
"pl_PL": "Duque de Caxias",
"pt_BR": "Duque de Caxias",
"ru_RU": "Дуки-ди-Кашиас"
},
"DURMUTI": {
"de_DE": "Durmuti",
"es_ES": "Durmuti",
"fr_FR": "Durmuti",
"it_IT": "Durmuti",
"ja_JP": "ドゥルムティ",
"ko_KR": "두르무티",
"pl_PL": "Durmuti",
"pt_BR": "Durmuti",
"ru_RU": "Дурмути"
},
"DUROCORTERON": {
"de_DE": "Durocorteron",
"es_ES": "Durocorteron",
"fr_FR": "Durocorter",
"it_IT": "Durocorteron",
"ja_JP": "ドゥロコルトルム",
"ko_KR": "두로코르테론",
"pl_PL": "Durocorteron",
"pt_BR": "Durocorteron",
"ru_RU": "Дурокортер"
},
"DUROCORTORUM": {
"de_DE": "Durocortorum",
"es_ES": "Durocortorum",
"fr_FR": "Durocortorum",
"it_IT": "Durocortorum",
"ja_JP": "ドゥロコルトルム",
"ko_KR": "두로코르토룸",
"pl_PL": "Durocortorum",
"pt_BR": "Durocortorum",
"ru_RU": "Дурокортурум"
},
"DUR_KURIGALZU": {
"de_DE": "Dur-Kurigalzu",
"es_ES": "Dur-Kurigalzu",
"fr_FR": "Dûr-Kurigalzu",
"it_IT": "Dur-Kurigalzu",
"ja_JP": "ドゥル・クリガルズ",
"ko_KR": "두르 쿠리갈수",
"pl_PL": "Dur-Kurigalzu",
"pt_BR": "Dur-Kurigalzu",
"ru_RU": "Дур-Куригальзу"
},
"DUSSELDORF": {
"de_DE": "Düsseldorf",
"es_ES": "Düsseldorf",
"fr_FR": "Düsseldorf",
"it_IT": "Düsseldorf",
"ja_JP": "デュッセルドルフ",
"ko_KR": "뒤셀도르프",
"pl_PL": "Dusseldorf",
"pt_BR": "Düsseldorf",
"ru_RU": "Дюссельдорф"
},
"DVIRADAPURA": {
"de_DE": "Dviradapura",
"es_ES": "Dviradapura",
"fr_FR": "Dviradapura",
"it_IT": "Dviradapura",
"ja_JP": "ドビラダプラ",
"ko_KR": "드비라다푸라",
"pl_PL": "Dwiradapura",
"pt_BR": "Dviradapura",
"ru_RU": "Двирадапура"
},
"DYRRACHIUM": {
"de_DE": "Dyrrachium",
"es_ES": "Dirraquio",
"fr_FR": "Dyrrachium",
"it_IT": "Dyrrachium",
"ja_JP": "デュッラキウム",
"ko_KR": "디라치움",
"pl_PL": "Dyrrachium",
"pt_BR": "Dirráquio",
"ru_RU": "Диррахий"
},
"DZIBILCHALTUN": {
"de_DE": "Dzibilchaltún",
"es_ES": "Dzibilchaltún",
"fr_FR": "Dzibilchaltún",
"it_IT": "Dzibilchaltun",
"ja_JP": "ジビルチャルトゥン",
"ko_KR": "드시빌찰툰",
"pl_PL": "Dzibilchaltún",
"pt_BR": "Dzibilchaltún",
"ru_RU": "Цибильчальтун"
},
"EBURACUM": {
"de_DE": "Eburacum",
"es_ES": "Eburacum",
"fr_FR": "Eburacum",
"it_IT": "Eburacum",
"ja_JP": "エブラクム",
"ko_KR": "에보라쿰",
"pl_PL": "Eburacum",
"pt_BR": "Eburacum",
"ru_RU": "Эбуракум"
},
"ECATEPEC": {
"de_DE": "Ecatepec",
"es_ES": "Ecatepec",
"fr_FR": "Ecatepec",
"it_IT": "Ecatepec",
"ja_JP": "エカテペク",
"ko_KR": "에카테팩",
"pl_PL": "Ecatepec",
"pt_BR": "Ecatepec",
"ru_RU": "Экатепек"
},
"EDESSA": {
"de_DE": "Edessa",
"es_ES": "Edessa",
"fr_FR": "Édessa",
"it_IT": "Edessa",
"ja_JP": "エデッサ",
"ko_KR": "에데사",
"pl_PL": "Edessa",
"pt_BR": "Edessa",
"ru_RU": "Эдесса"
},
"EDFU": {
"de_DE": "Edfu",
"es_ES": "Edfú",
"fr_FR": "Edfou",
"it_IT": "Edfu",
"ja_JP": "エドフ",
"ko_KR": "이드푸",
"pl_PL": "Edfu",
"pt_BR": "Edfu",
"ru_RU": "Эдфу"
},
"EDINBURGH": {
"de_DE": "Edinburgh",
"es_ES": "Edimburgo",
"fr_FR": "Édimbourg",
"it_IT": "Edimburgo",
"ja_JP": "エディンバラ",
"ko_KR": "에든버러",
"pl_PL": "Edynburg",
"pt_BR": "Edimburgo",
"ru_RU": "Эдинбург"
},
"EDIRNE": {
"de_DE": "Edirne",
"es_ES": "Edirne",
"fr_FR": "Edirne",
"it_IT": "Edirne",
"ja_JP": "エディルネ",
"ko_KR": "에디르네",
"pl_PL": "Edirne",
"pt_BR": "Edirne",
"ru_RU": "Эдирне"
},
"EDMONTON": {
"de_DE": "Edmonton",
"es_ES": "Edmonton",
"fr_FR": "Edmonton",
"it_IT": "Edmonton",
"ja_JP": "エドモントン",
"ko_KR": "에드먼턴",
"pl_PL": "Edmonton",
"pt_BR": "Edmonton",
"ru_RU": "Эдмонтон"
},
"EDZNA": {
"de_DE": "Edzná",
"es_ES": "Edzná",
"fr_FR": "Edzná",
"it_IT": "Edzna",
"ja_JP": "エズナ",
"ko_KR": "에드스나",
"pl_PL": "Edzna",
"pt_BR": "Edzna",
"ru_RU": "Эцна"
},
"EGER": {
"de_DE": "Eger",
"es_ES": "Eger",
"fr_FR": "Eger",
"it_IT": "Eger",
"ja_JP": "エゲル",
"ko_KR": "에게르",
"pl_PL": "Eger",
"pt_BR": "Eger",
"ru_RU": "Эгер"
},
"EINDHOVEN": {
"de_DE": "Eindhoven",
"es_ES": "Eindhoven",
"fr_FR": "Eindhoven",
"it_IT": "Eindhoven",
"ja_JP": "アイントホーフェン",
"ko_KR": "에인트호번",
"pl_PL": "Eindhoven",
"pt_BR": "Eindhoven",
"ru_RU": "Эйндховен"
},
"ELGIN": {
"de_DE": "Elgin",
"es_ES": "Elgin",
"fr_FR": "Elgin",
"it_IT": "Elgin",
"ja_JP": "エルジン",
"ko_KR": "엘긴",
"pl_PL": "Elgin",
"pt_BR": "Elgin",
"ru_RU": "Элгин"
},
"ELIP": {
"de_DE": "Elip",
"es_ES": "Elip",
"fr_FR": "Elip",
"it_IT": "Elip",
"ja_JP": "エリップ",
"ko_KR": "엘립",
"pl_PL": "Elip",
"pt_BR": "Elip",
"ru_RU": "Элип"
},
"ELIZAVETOVSKAYA": {
"de_DE": "Elizavetovskaya",
"es_ES": "Elizavetovskaya",
"fr_FR": "Elizavetovskaya",
"it_IT": "Elizavetovskaya",
"ja_JP": "エリザベートフスカヤ",
"ko_KR": "엘리자베토우스카이아",
"pl_PL": "Elizawietowskaja",
"pt_BR": "Elizavetovskaya",
"ru_RU": "Елизаветовская"
},
"ELUTHERA": {
"de_DE": "Eleuthera",
"es_ES": "Eleuthera",
"fr_FR": "Eleuthera",
"it_IT": "Eleuthera",
"ja_JP": "エルーセラ",
"ko_KR": "엘유세라",
"pl_PL": "Eleuthera",
"pt_BR": "Eleuthera",
"ru_RU": "Эльютера"
},
"ELVAS": {
"de_DE": "Elvas",
"es_ES": "Elvas",
"fr_FR": "Elvas",
"it_IT": "Elvas",
"ja_JP": "エルバス",
"ko_KR": "엘바스",
"pl_PL": "Elvas",
"pt_BR": "Elvas",
"ru_RU": "Элваш"
},
"EL_FURA": {
"de_DE": "Al Fura",
"es_ES": "El Fura",
"fr_FR": "El Fura",
"it_IT": "El Fura",
"ja_JP": "エルフラ",
"ko_KR": "엘 푸라",
"pl_PL": "El Fura",
"pt_BR": "El Fura",
"ru_RU": "Эль-Фура"
},
"EL_KURRU": {
"de_DE": "Al-Kurru",
"es_ES": "El-Kurru",
"fr_FR": "El-Kurru",
"it_IT": "El-Kurru",
"ja_JP": "エルクッル",
"ko_KR": "엘-쿠루",
"pl_PL": "El-Kurru",
"pt_BR": "El-Kurru",
"ru_RU": "Эль-Курру"
},
"EL_MIRADOR": {
"de_DE": "El Mirador",
"es_ES": "El Mirador",
"fr_FR": "El Mirador",
"it_IT": "El Mirador",
"ja_JP": "エル・ミラドール",
"ko_KR": "엘미라도르",
"pl_PL": "El Mirador",
"pt_BR": "El Mirador",
"ru_RU": "Эль-Мирадор"
},
"EMANGWENI": {
"de_DE": "Emangweni",
"es_ES": "Emangweni",
"fr_FR": "Emangweni",
"it_IT": "Emangweni",
"ja_JP": "エマングウェニ",
"ko_KR": "에만궤니",
"pl_PL": "Emangweni",
"pt_BR": "Emangweni",
"ru_RU": "Эмангвени"
},
"EMPANGENI": {
"de_DE": "Empangeni",
"es_ES": "Empangeni",
"fr_FR": "Empangeni",
"it_IT": "Empangeni",
"ja_JP": "エンパンゲニ",
"ko_KR": "엠판게니",
"pl_PL": "Empangeni",
"pt_BR": "Empangeni",
"ru_RU": "Эмпангени"
},
"ENSCHEDE": {
"de_DE": "Enschede",
"es_ES": "Enschede",
"fr_FR": "Enschede",
"it_IT": "Enschede",
"ja_JP": "エンスヘーデ",
"ko_KR": "엔스헤데",
"pl_PL": "Enschede",
"pt_BR": "Enschede",
"ru_RU": "Энсхеде"
},
"EPHESUS": {
"de_DE": "Ephesus",
"es_ES": "Éfeso",
"fr_FR": "Éphèse",
"it_IT": "Efeso",
"ja_JP": "エフェソス",
"ko_KR": "에베소",
"pl_PL": "Efez",
"pt_BR": "Éfeso",
"ru_RU": "Эфес"
},
"ERETRIA": {
"de_DE": "Eretria",
"es_ES": "Eretria",
"fr_FR": "Erétrie",
"it_IT": "Eretria",
"ja_JP": "エレトリア",
"ko_KR": "에레트리아",
"pl_PL": "Eretria",
"pt_BR": "Eretria",
"ru_RU": "Эретрия"
},
"ERFURT": {
"de_DE": "Erfurt",
"es_ES": "Erfurt",
"fr_FR": "Erfurt",
"it_IT": "Erfurt",
"ja_JP": "エルフルト",
"ko_KR": "에르푸르트",
"pl_PL": "Erfurt",
"pt_BR": "Erfurt",
"ru_RU": "Эрфурт"
},
"ERIDU": {
"de_DE": "Eridu",
"es_ES": "Eridu",
"fr_FR": "Eridu",
"it_IT": "Eridu",
"ja_JP": "エリドゥ",
"ko_KR": "에리두",
"pl_PL": "Eridu",
"pt_BR": "Eridu",
"ru_RU": "Эриду"
},
"ERZURUM": {
"de_DE": "Erzurum",
"es_ES": "Erzurum",
"fr_FR": "Erzurum",
"it_IT": "Erzurum",
"ja_JP": "エルズルム",
"ko_KR": "에르주룸",
"pl_PL": "Erzurum",
"pt_BR": "Erzurum",
"ru_RU": "Эрзурум"
},
"ESHNUNNA": {
"de_DE": "Eshnunna",
"es_ES": "Eshnunna",
"fr_FR": "Eshnunna",
"it_IT": "Eshnunna",
"ja_JP": "エシュヌンナ",
"ko_KR": "에스누나",
"pl_PL": "Esznunna",
"pt_BR": "Eshnunna",
"ru_RU": "Эшнунна"
},
"ESKISEHIR": {
"de_DE": "Eskişehir",
"es_ES": "Eskişehir",
"fr_FR": "Eskişehir",
"it_IT": "Eskişehir",
"ja_JP": "エスキシェヒル",
"ko_KR": "에스키셰히르",
"pl_PL": "Eskişehir",
"pt_BR": "Esquiceir",
"ru_RU": "Эскишехир"
},
"ESSEN": {
"de_DE": "Essen",
"es_ES": "Essen",
"fr_FR": "Essen",
"it_IT": "Essen",
"ja_JP": "エッセン",
"ko_KR": "에센",
"pl_PL": "Essen",
"pt_BR": "Essen",
"ru_RU": "Эссен"
},
"ESZTERGOM": {
"de_DE": "Esztergom",
"es_ES": "Estrigonia",
"fr_FR": "Esztergom",
"it_IT": "Esztergom",
"ja_JP": "エステルゴム",
"ko_KR": "에스테르곰",
"pl_PL": "Ostrzyhom",
"pt_BR": "Esztergom",
"ru_RU": "Эстергом"
},
"ETHEKWINI": {
"de_DE": "Ethekwini",
"es_ES": "e'Thekwini",
"fr_FR": "Ethekwini",
"it_IT": "Ethekwini",
"ja_JP": "エテクウィニ",
"ko_KR": "에테퀴니",
"pl_PL": "Ethekwini",
"pt_BR": "Durban",
"ru_RU": "Этеквини"
},
"EVORA": {
"de_DE": "Évora",
"es_ES": "Évora",
"fr_FR": "Évora",
"it_IT": "Évora",
"ja_JP": "エヴォラ",
"ko_KR": "에보라",
"pl_PL": "Évora",
"pt_BR": "Évora",
"ru_RU": "Эвора"
},
"EXETER": {
"de_DE": "Exeter",
"es_ES": "Exeter",
"fr_FR": "Exeter",
"it_IT": "Exeter",
"ja_JP": "エクセター",
"ko_KR": "엑서터",
"pl_PL": "Exeter",
"pt_BR": "Exeter",
"ru_RU": "Эксетер"
},
"EZIQWAQWENI": {
"de_DE": "Eziqwaqweni",
"es_ES": "Eziqwaqweni",
"fr_FR": "Eziqwaqweni",
"it_IT": "Eziqwaqweni",
"ja_JP": "エジクワクウェニ",
"ko_KR": "에지콰크웨니",
"pl_PL": "Eziqwaqweni",
"pt_BR": "Eziqwaqweni",
"ru_RU": "Эзикваквени"
},
"FADAMA": {
"de_DE": "Fadama",
"es_ES": "Fadama",
"fr_FR": "Fadama",
"it_IT": "Fadama",
"ja_JP": "ファダマ",
"ko_KR": "파다마",
"pl_PL": "Fadama",
"pt_BR": "Fadama",
"ru_RU": "Фадама"
},
"FAILAKA": {
"de_DE": "Failaka",
"es_ES": "Failaka",
"fr_FR": "Failaka",
"it_IT": "Failaka",
"ja_JP": "ファイラカ",
"ko_KR": "파일라카",
"pl_PL": "Failaka",
"pt_BR": "Failaka",
"ru_RU": "Файлака"
},
"FALUN": {
"de_DE": "Falun",
"es_ES": "Falun",
"fr_FR": "Falun",
"it_IT": "Falun",
"ja_JP": "ファールン",
"ko_KR": "팔룬",
"pl_PL": "Falun",
"pt_BR": "Falun",
"ru_RU": "Фалун"
},
"FARAS": {
"de_DE": "Faras",
"es_ES": "Faras",
"fr_FR": "Faras",
"it_IT": "Faras",
"ja_JP": "ファラス",
"ko_KR": "파라스",
"pl_PL": "Faras",
"pt_BR": "Faras",
"ru_RU": "Фарас"
},
"FARO": {
"de_DE": "Faro",
"es_ES": "Faro",
"fr_FR": "Faro",
"it_IT": "Faro",
"ja_JP": "ファロ",
"ko_KR": "파로",
"pl_PL": "Faro",
"pt_BR": "Faro",
"ru_RU": "Фаро"
},
"FEIRA_DE_SANTANA": {
"de_DE": "Feira de Santana",
"es_ES": "Feira de Santana",
"fr_FR": "Feira de Santana",
"it_IT": "Feira de Santana",
"ja_JP": "フェイラ・デ・サンタナ",
"ko_KR": "페이라데산타나",
"pl_PL": "Feira de Santana",
"pt_BR": "Feira de Santana",
"ru_RU": "Фейра-ди-Сантана"
},
"FEZ": {
"de_DE": "Fez",
"es_ES": "Fez",
"fr_FR": "Fès",
"it_IT": "Fez",
"ja_JP": "フェズ",
"ko_KR": "페즈",
"pl_PL": "Fez",
"pt_BR": "Fez",
"ru_RU": "Фес"
},
"FLORIANOPOLIS": {
"de_DE": "Florianópolis",
"es_ES": "Florianópolis",
"fr_FR": "Florianópolis",
"it_IT": "Florianópolis",
"ja_JP": "フロリアノポリス",
"ko_KR": "플로리아노폴리스",
"pl_PL": "Florianopolis",
"pt_BR": "Florianópolis",
"ru_RU": "Флорианополис"
},
"FLORIDA_KEYS": {
"de_DE": "Florida Keys",
"es_ES": "Cayos de la Florida",
"fr_FR": "Florida Keys",
"it_IT": "Florida Keys",
"ja_JP": "フロリダキーズ",
"ko_KR": "플로리다키스",
"pl_PL": "Florida Keys",
"pt_BR": "Florida Keys",
"ru_RU": "Флорида-Кис"
},
"FLUSHING": {
"de_DE": "Vlissingen",
"es_ES": "Flushing",
"fr_FR": "Flushing",
"it_IT": "Flushing",
"ja_JP": "フラッシング",
"ko_KR": "플러싱",
"pl_PL": "Flushing",
"pt_BR": "Flushing",
"ru_RU": "Флашинг"
},
"FORFAR": {
"de_DE": "Forfar",
"es_ES": "Forfar",
"fr_FR": "Forfar",
"it_IT": "Forfar",
"ja_JP": "フォーファー",
"ko_KR": "포퍼",
"pl_PL": "Forfar",
"pt_BR": "Forfar",
"ru_RU": "Форфар"
},
"FOROWE_MAPU": {
"de_DE": "Forowe Mapu",
"es_ES": "Forowe Mapu",
"fr_FR": "Forowe Mapu",
"it_IT": "Forowe Mapu",
"ja_JP": "フォロウェ・マプ",
"ko_KR": "포로웨 마푸",
"pl_PL": "Forowe Mapu",
"pt_BR": "Forowe Mapu",
"ru_RU": "Форове-Мапу"
},
"FORRES": {
"de_DE": "Forres",
"es_ES": "Forres",
"fr_FR": "Forres",
"it_IT": "Forres",
"ja_JP": "フォレス",
"ko_KR": "포레스",
"pl_PL": "Forres",
"pt_BR": "Forres",
"ru_RU": "Форрес"
},
"FORTALEZA": {
"de_DE": "Fortaleza",
"es_ES": "Fortaleza",
"fr_FR": "Fortaleza",
"it_IT": "Fortaleza",
"ja_JP": "フォルタレサ",
"ko_KR": "포르탈레자",
"pl_PL": "Fortaleza",
"pt_BR": "Fortaleza",
"ru_RU": "Форталеза"
},
"FORT_CAROLUSBORG": {
"de_DE": "Fort Carolusborg",
"es_ES": "Fort Carolusborg",
"fr_FR": "Fort Carolusborg",
"it_IT": "Fort Carolusborg",
"ja_JP": "フォート・カロルスボル",
"ko_KR": "포트 카롤루스보리",
"pl_PL": "Fort Carolusborg",
"pt_BR": "Forte Carolusborg",
"ru_RU": "Форт-Карлсборг"
},
"FORT_KRISTINA": {
"de_DE": "Fort Kristina",
"es_ES": | |
import numpy as np
import matplotlib.pyplot as plt
import os, sys, time
from scipy.interpolate import RectBivariateSpline
from sklearn.metrics.pairwise import euclidean_distances
from matplotlib.ticker import FuncFormatter, MaxNLocator
import matplotlib.lines as mlines
from se2waveload import *
from Lib_GeneralFunctions import *
from Lib_GeneralSignalProcNAnalysis import *
from Lib_SigmoidProcessing import *
import pandas as pd
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1.inset_locator import (inset_axes, InsetPosition,mark_inset)
import itertools
import string
def LabelizeAxisList(AxisList,Pos=[-0.1, 1.1], OffsetLabel=0,**kwargs):
for n, ax in enumerate(AxisList):
ax.text(Pos[0], Pos[1], string.ascii_uppercase[n+OffsetLabel], transform=ax.transAxes, **kwargs)
# Sigmoid or any function of interest to represent the center of the fault / Zero level set function
def func(x, k=-0.0002, amp = 2.0):
fx = amp * (x - x * k) / (k - abs(x) * 2.0 * k + 1.0)
return fx
# The respective derivative ofthe previous zero level set function
def func_der(x, k=-0.0002, amp = 2.0):
fx_prime = amp * (1 - k * k) / ((k - abs(x) * 2.0 * k + 1.0)*(k - abs(x) * 2.0 * k + 1.0))
return fx_prime
# Sigmoid or any function of interest to represent the center of the fault / Zero level set function
def Tiltfunc(x, theta = 45*np.pi/180):
fx = x*np.tan(theta)
return fx
def Tiltfunc_der(x, theta = 45*np.pi/180):
fx_prime = x.copy()
fx_prime.fill(np.tan(theta))
return fx_prime
class ZeroLevelSet:
def __init__(self, Xval, Fxval, FxPrimeVal, GeometryDescription):
self.Xval = Xval
self.Fxval = Fxval
self.FxPrimeVal = FxPrimeVal
self.GeometryDescription = GeometryDescription
self.Normal = np.array(self.NormalVector(self.FxPrimeVal))
self.Tangent = np.array(self.TangentVector(self.FxPrimeVal))
def __repr__(self):
return "Zero level set: {GeometryDescription} geometry".format(GeometryDescription=self.GeometryDescription)
def __str__(self):
return "Zero level set: {GeometryDescription} geometry".format(GeometryDescription=self.GeometryDescription)
def PlotZeroLevelSet(self):
plt.plot(self.Xval,self.Fxval,"k-")
# Tangent vector for a given derivative
def TangentVector(self, fPrimeX):
mag = np.sqrt(1.0 + fPrimeX * fPrimeX)
TangentX = 1.0/mag
TangentY = fPrimeX/mag
return TangentX, TangentY
# Normal vector for a given derivative
def NormalVector(self,fPrimeX):
mag = np.sqrt(1.0 + fPrimeX * fPrimeX)
NormalX = -fPrimeX/mag
NormalY = 1.0/mag
return NormalX, NormalY
def SeparateList(List2Sep,nx,ny):
TotNum = len(List2Sep)
xComponent = List2Sep[0:TotNum:2]
yComponent = List2Sep[1:TotNum:2]
xComponent = np.reshape(xComponent, (nx, ny), "F")
yComponent = np.reshape(yComponent, (nx, ny), "F")
return xComponent,yComponent
def ExtractFields(w_filename, se2_coor):
se2_field = se2wave_load_wavefield(w_filename,True,True)
TimeStep = se2_field["time"].item()
LCoorX, LCoorY = SeparateList(se2_coor['coor'], se2_coor['nx'].item(), se2_coor['ny'].item())
LFieldX, LFieldY = SeparateList(se2_field['displ'], se2_field['nx'].item(), se2_field['ny'].item())
LFieldvelX, LFieldvelY = SeparateList(se2_field['vel'], se2_field['nx'].item(), se2_field['ny'].item())
return TimeStep, LCoorX, LCoorY, LFieldX, LFieldY, LFieldvelX, LFieldvelY
def GetBivariateSplineFuncFromFields(LCoorX, LCoorY, LFieldX, LFieldY,LFieldvelX, LFieldvelY):
SplineDispl = [RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldX, kx=1, ky=1),
RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldY, kx=1, ky=1)]
SplineVel = [RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldvelX, kx=1, ky=1),
RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldvelY, kx=1, ky=1)]
return SplineDispl, SplineVel
def GetLocData(Loc, SplineFunction, GetSlip=False):
CompX = SplineFunction[0](Loc[0],Loc[1])[0][0]
CompY = SplineFunction[1](Loc[0],Loc[1])[0][0]
return CompX, CompY
# Wrappers
def GetSplineFunctions(w_filename, se2_coor):
TimeStepVal, LCoorX, LCoorY, LFieldX, LFieldY, LFieldvelX, LFieldvelY = ExtractFields(w_filename, se2_coor)
SplineDisplPair, SplineVelPair = GetBivariateSplineFuncFromFields(LCoorX, LCoorY,
LFieldX, LFieldY,
LFieldvelX, LFieldvelY)
return TimeStepVal, SplineDisplPair, SplineVelPair
def FormatAx(ax,MaxNLoc=5,Axins=False):
ax.set_aspect("equal")
ax.xaxis.set_major_locator(MaxNLocator(MaxNLoc))
ax.yaxis.set_major_locator(MaxNLocator(MaxNLoc))
ax.yaxis.major.formatter.set_powerlimits((0,0))
ax.xaxis.major.formatter.set_powerlimits((0,0))
if Axins:
ax.get_xaxis().get_offset_text().set_visible(False)
ax_max = max(ax.get_xticks())
exponent_axis = np.floor(np.log10(ax_max)).astype(int)
ax.annotate(r'$\times$10$^{%i}$'%(exponent_axis),
xy=(0.5, 0.1), xycoords='axes fraction')
def FormatAxNormal(ax):
ax.set_aspect("equal")
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
def PlotDomain(CoorX, CoorY, Field, FieldName,TimeTxt,**kwargs):
try:
fig = plt.figure(figsize = (10, 10),dpi=300, constrained_layout=True)
gs = fig.add_gridspec(1, 1)
ax = fig.add_subplot(gs[:, :])
except:
fig = plt.figure(figsize = (10, 10),dpi=300)
ax = fig.add_subplot(1,1,1)
ax.set_title("{FName}".format(FName = FieldName[0]))
ax.set_xlabel("X-Coordinate [m]"), ax.set_ylabel("Y-Coordinate [m]")
ax.set_aspect('equal', 'box')
img = ax.pcolormesh(CoorX, CoorY, Field,**kwargs)
ax.annotate(text="time [s]: {0:.2f}".format(TimeTxt),xy=[0.8,0.1], xycoords= "axes fraction")
cbar = fig.colorbar(img, shrink=.5)
cbar.ax.set_ylabel(FieldName[1])
return fig, img,ax
def PlotFullSetup(CoorX, CoorY, Field1, Field2, StressFromPickle, FieldNames,TimeTxt,InsetZoom=[6250,6750,3400,3900],**kwargs):
fig = plt.figure(figsize = (12, 8),dpi=300) #constrained_layout=True
gs = fig.add_gridspec(2, 3, wspace=0.15,hspace=0.2)
ax01 = fig.add_subplot(gs[0, 0])
ax02 = fig.add_subplot(gs[0, 1])
ax03 = fig.add_subplot(gs[0, 2])
ax1 = fig.add_subplot(gs[-1, 0])
ax2 = fig.add_subplot(gs[-1, 1])
ax3 = fig.add_subplot(gs[-1, 2])
#Plot
#ax1.set_title("{FName}".format(FName = FieldNames[0]))
ax1.set_xlabel("$x$ [m]")
ax2.set_xlabel("$x$ [m]")
ax3.set_xlabel("$x$ [m]")
ax1.set_ylabel("$y$ [m]")
FormatAx(ax1)
FormatAx(ax2)
FormatAx(ax3)
img1 = ax1.pcolormesh(CoorX, CoorY, Field1,**kwargs)
img2 = ax2.pcolormesh(CoorX, CoorY, Field2,**kwargs)
img3 = ax3.pcolormesh(StressFromPickle[0], StressFromPickle[1], StressFromPickle[2], shading="flat",
vmax = 2e7, vmin= -2e7, **kwargs)
#ax2.tick_params(labelleft=False)
#ax3.tick_params(labelleft=False)
#ax2.yaxis.get_major_formatter().set_scientific(False)
#ax3.yaxis.get_major_formatter().set_scientific(False)
ax1.annotate(text="time [s]: {0:.2f}".format(TimeTxt),xy=[0.05,0.9], xycoords= "axes fraction")
# Colorbar for the ax1
cbaxes = inset_axes(ax1,width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img1,cax=cbaxes,orientation="horizontal", label=r"$u_{x}$ [m]")
cbaxes.xaxis.set_label_position('top')
# Colorbar for the ax2
cbaxes = inset_axes(ax2,width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img2,cax=cbaxes,orientation="horizontal", label=r"$v_{x}$ [m/s]")
cbaxes.xaxis.set_label_position('top')
# Colorbar for the ax3
cbaxes = inset_axes(ax3,width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img3,cax=cbaxes,orientation="horizontal", label=r"$\sigma_{12}$ [Pa]")
cbaxes.xaxis.set_label_position('top')
# Give the number of ticks in the colorbar
cbaxes.xaxis.set_major_locator(MaxNLocator(4))
# Give an offset for the scientific notation exponent
cbaxes.get_xaxis().get_offset_text().set_visible(False)
ax_max = max(cbaxes.get_xticks())
exponent_axis = np.floor(np.log10(ax_max)).astype(int)
cbaxes.annotate(r'$\times$10$^{%i}$'%(exponent_axis),
xy=(1.01, -.01), xycoords='axes fraction')
InsetLoc = [0.67, 0.08, 0.3, 0.3]
# Inset plot for the ax2
axins = ax2.inset_axes(InsetLoc)
axins.pcolormesh(CoorX, CoorY, Field2, edgecolors='silver',lw='0.1', **kwargs)
axins.set_xlim(InsetZoom[0], InsetZoom[1])
axins.set_ylim(InsetZoom[2], InsetZoom[3])
#axins.set_xticklabels('')
#axins.set_yticklabels('')
FormatAx(axins,MaxNLoc=2,Axins=True)
mark_inset(ax2, axins,loc1=2, loc2=1, edgecolor="black",ec=".5",linewidth=.5)
# Inset plot for the ax3
axins = ax3.inset_axes(InsetLoc)
axins.pcolormesh(StressFromPickle[0], StressFromPickle[1], StressFromPickle[2], shading="flat", edgecolors='silver',lw='0.1',
vmax = 2e7, vmin= -2e7, **kwargs)
axins.set_xlim(InsetZoom[0], InsetZoom[1])
axins.set_ylim(InsetZoom[2], InsetZoom[3])
#axins.set_xticklabels('')
#axins.set_yticklabels('')
FormatAx(axins,MaxNLoc=2,Axins=True)
mark_inset(ax3, axins,loc1=2, loc2=1, edgecolor="black",ec=".5",linewidth=.5)
gs.tight_layout(fig)
gs.update(top=0.95)
ax = [ax01,ax02,ax03,ax1,ax2,ax3]
#cbar.ax.set_ylabel(FieldName[1])
return fig, ax
def PlotHalfSetup(CoorX, CoorY, Field1, Field2, StressFromPickle, FieldNames,TimeTxt,InsetZoom=[6250,6750,3400,3900],**kwargs):
fig = plt.figure(figsize = (12, 4),dpi=300) #constrained_layout=True
gs = fig.add_gridspec(1, 3, wspace=0.15,hspace=0.2)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2])
#Plot
#ax1.set_title("{FName}".format(FName = FieldNames[0]))
ax2.set_xlabel("$x$ [m]")
ax1.set_ylabel("$y$ [m]")
FormatAx(ax1)
FormatAx(ax2)
FormatAx(ax3)
img1 = ax1.pcolormesh(CoorX, CoorY, Field1,**kwargs)
img2 = ax2.pcolormesh(CoorX, CoorY, Field2,**kwargs)
img3 = ax3.pcolormesh(StressFromPickle[0], StressFromPickle[1], StressFromPickle[2], shading="flat",
vmax = 2e7, vmin= -2e7, **kwargs)
ax2.tick_params(labelleft=False)
ax3.tick_params(labelleft=False)
ax2.yaxis.get_major_formatter().set_scientific(False)
ax3.yaxis.get_major_formatter().set_scientific(False)
ax1.annotate(text="t: {0:.2f} s".format(TimeTxt),xy=[0.05,0.9], xycoords= "axes fraction")
# Colorbar for the ax1
cbaxes = inset_axes(ax1,width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img1,cax=cbaxes,orientation="horizontal", label=r"$u_{x}$ [m]")
cbaxes.xaxis.set_label_position('top')
# Colorbar for the ax2
cbaxes = inset_axes(ax2,width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img2,cax=cbaxes,orientation="horizontal", label=r"$v_{x}$ [m/s]")
cbaxes.xaxis.set_label_position('top')
# Colorbar for the ax3
cbaxes = inset_axes(ax3,width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img3,cax=cbaxes,orientation="horizontal", label=r"$\sigma_{12}$ [Pa]")
cbaxes.xaxis.set_label_position('top')
# Give the number of ticks in the colorbar
cbaxes.xaxis.set_major_locator(MaxNLocator(4))
# Give an offset for the scientific notation exponent
cbaxes.get_xaxis().get_offset_text().set_visible(False)
ax_max = max(cbaxes.get_xticks())
exponent_axis = np.floor(np.log10(ax_max)).astype(int)
cbaxes.annotate(r'$\times$10$^{%i}$'%(exponent_axis),
xy=(1.01, -.01), xycoords='axes fraction')
InsetLoc = [0.67, 0.08, 0.3, 0.3]
# Inset plot for the ax2
axins = ax2.inset_axes(InsetLoc)
axins.pcolormesh(CoorX, CoorY, Field2, edgecolors='silver',lw='0.1', **kwargs)
axins.set_xlim(InsetZoom[0], InsetZoom[1])
axins.set_ylim(InsetZoom[2], InsetZoom[3])
#axins.set_xticklabels('')
#axins.set_yticklabels('')
mark_inset(ax2, axins,loc1=2, loc2=1, edgecolor="black",ec=".5",linewidth=.5)
# Inset plot for the ax3
axins = ax3.inset_axes(InsetLoc)
axins.pcolormesh(StressFromPickle[0], StressFromPickle[1], StressFromPickle[2], shading="flat", edgecolors='silver',lw='0.1',
vmax = 2e7, vmin= -2e7, **kwargs)
axins.set_xlim(InsetZoom[0], InsetZoom[1])
axins.set_ylim(InsetZoom[2], InsetZoom[3])
axins.set_xticklabels('')
axins.set_yticklabels('')
mark_inset(ax3, axins,loc1=2, loc2=1, edgecolor="black",ec=".5",linewidth=.5)
#gs.tight_layout(fig)
#gs.update(top=0.95)
ax = [ax1,ax2,ax3]
#cbar.ax.set_ylabel(FieldName[1])
return fig, ax
def Plot4KomaSetup(CoorX, CoorY, Field1, Field2, FieldNames,TimeTxt,InsetZoom=[6250,6750,3400,3900],**kwargs):
fig = plt.figure(figsize = (8, 8),dpi=300) #constrained_layout=True
gs = fig.add_gridspec(2, 2, wspace=0.15,hspace=0.2)
ax01 = fig.add_subplot(gs[0, 0])
ax02 = fig.add_subplot(gs[0, 1])
ax1 = fig.add_subplot(gs[-1, 0])
ax2 = fig.add_subplot(gs[-1, 1])
ax = [ax01,ax02,ax1,ax2]
#Plot
#ax1.set_title("{FName}".format(FName = FieldNames[0]))
ax2.set_xlabel("$x$ [m]")
ax1.set_xlabel("$x$ [m]")
ax1.set_ylabel("$y$ [m]")
FormatAx(ax1)
FormatAx(ax2)
img1 = ax1.pcolormesh(CoorX, CoorY, Field1,**kwargs)
img2 = ax2.pcolormesh(CoorX, CoorY, Field2,**kwargs)
#ax2.tick_params(labelleft=False,labelright=True)
#ax2.yaxis.get_major_formatter().set_scientific(False)
ax1.annotate(text="time [s]: {0:.2f}".format(TimeTxt),xy=[0.05,0.9], xycoords= "axes fraction")
cbaxes = inset_axes(ax1, width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img1,cax=cbaxes,orientation="horizontal", label=r"$u_{x}$ [m]")
cbaxes.xaxis.set_label_position('top')
cbaxes = inset_axes(ax2, width="40%",height="4%",loc=3, borderpad=2)
plt.colorbar(img2,cax=cbaxes,orientation="horizontal", label=r"$v_{x}$ [m/s]")
cbaxes.xaxis.set_label_position('top')
axins = ax2.inset_axes([0.67, 0.1, 0.3, 0.3])
axins.pcolormesh(CoorX, CoorY, Field2, edgecolors='silver',lw='0.1', **kwargs)
axins.set_xlim(InsetZoom[0], InsetZoom[1])
axins.set_ylim(InsetZoom[2], InsetZoom[3])
FormatAx(axins,MaxNLoc=2,Axins=True)
#axins.grid(True, which='both', axis='both', linestyle='-', color='k')
mark_inset(ax2, axins,loc1=2, loc2=1, edgecolor="black",ec="0.5",linewidth=.5)
gs.tight_layout(fig)
#gs.update(top=0.95)
#cbar.ax.set_ylabel(FieldName[1])
return fig, ax
def PlotF4Setup(CoorX, CoorY, Field1, StressFromPickle, FieldNames,TimeTxt,InsetZoom=[6250,6750,3400,3900],**kwargs):
fig = plt.figure(figsize = (8, 8),dpi=300) #constrained_layout=True
gs = fig.add_gridspec(2, 2, wspace=0.15,hspace=0.2)
ax01 = fig.add_subplot(gs[0, 0])
ax02 = fig.add_subplot(gs[0, 1])
ax1 = fig.add_subplot(gs[-1, 0])
ax2 = fig.add_subplot(gs[-1, 1])
ax = [ax01,ax02,ax1,ax2]
#Plot
#ax1.set_title("{FName}".format(FName = FieldNames[0]))
ax2.set_xlabel("$x$ [m]")
ax1.set_xlabel("$x$ [m]")
ax1.set_ylabel("$y$ [m]")
FormatAx(ax1)
FormatAx(ax2)
img1 = ax1.pcolormesh(CoorX, CoorY, Field1,**kwargs)
img2 = ax2.pcolormesh(StressFromPickle[0], StressFromPickle[1], StressFromPickle[2], shading="flat",
vmax = 2e7, vmin= -2e7, **kwargs)
#ax2.tick_params(labelleft=True)
#ax2.yaxis.get_major_formatter().set_scientific(False)
ax1.annotate(text="time [s]: {0:.2f}".format(TimeTxt),xy=[0.05,0.9], xycoords= "axes fraction")
# Colorbar for the ax1
cbaxes = inset_axes(ax1, width="35%",height="4%",loc=3, borderpad=2)
plt.colorbar(img1,cax=cbaxes,orientation="horizontal", label=r"$v_{x}$ [m/s]")
cbaxes.xaxis.set_label_position('top')
# Colorbar for the ax2
cbaxes = inset_axes(ax2, width="35%",height="4%",loc=3, borderpad=2)
plt.colorbar(img2,cax=cbaxes,orientation="horizontal", label=r"$\sigma_{12}$ [Pa]")
cbaxes.xaxis.set_label_position('top')
# Give the number of ticks in the colorbar
cbaxes.xaxis.set_major_locator(MaxNLocator(4))
# Give an offset for the scientific notation exponent
cbaxes.get_xaxis().get_offset_text().set_visible(False)
ax_max = max(cbaxes.get_xticks())
exponent_axis = np.floor(np.log10(ax_max)).astype(int)
cbaxes.annotate(r'$\times$10$^{%i}$'%(exponent_axis),
xy=(1.01, -.01), xycoords='axes fraction')
# Inset plot for the ax1
axins = ax1.inset_axes([0.67, 0.1, 0.3, 0.3])
axins.pcolormesh(CoorX, CoorY, Field1, edgecolors='silver',lw='0.1', **kwargs)
axins.set_xlim(InsetZoom[0], InsetZoom[1])
axins.set_ylim(InsetZoom[2], InsetZoom[3])
#axins.set_xticklabels('')
#axins.set_yticklabels('')
FormatAx(axins,MaxNLoc=2,Axins=True)
mark_inset(ax1, | |
<gh_stars>0
import re
import codecs
import os
from pythonds.basic.stack import Stack
"""
[1] Main Function
"""
def main():
directory = os.getcwd() + '/InputDataType2'
filename = 'BGHO0437.txt'
filename = os.path.join(directory, filename)
f = open(filename, 'r', encoding='utf-16')
is_inside = False
line_counter = 0
OUT_FILENAME = "OutputDataType2\kr-ud-dev.conllu"
with codecs.open(OUT_FILENAME, "w", "utf-8") as file:
"""
한 줄을 읽을 때마다 그 결과를 sniparray, posarray에 저장합니다.
한 줄에서 형태소를 읽을 때마다 바로바로 출력하지 않고
다 읽고 난 다음 그 결과를 출력하는데 그 이유는 다음과 같습니다.
'옮겨졌다.' 의 경우
옮기/VV + 어/EC + 지/VX + 었/EP + 다/EF + ./SF
로 분리되어 원 단어(옮겨졌다)에서 실제로 보존된 형태소는 '다', '.'만 남게 되므로
옮기/VV + 어/EC + 지/VX + 었/EP + 다/EF + ./SF
로 분리하는 대신
옮겨졌/VV + EC + VX + EP + 다/EF + ./SF
로 분리합니다.
이 때 원 단어가 형태소 기본형들의 조합인지 아니면 변형되었는지를 파악하기 위해
버퍼를 사용했습니다 (snipbuffer, posbuffer)
이에 따라 각 읽어들일 때의 상황을 4가지로 구분하고
현재 읽고 있는 위치를 파악하기 위해 wordcount를 도입하고
문장의 끝인지를 파악하기 위해 end_of_sequence 를 도입했습니다.
"""
sniparray = []
sniparrayOrigin = []
posarray = []
# 괄호 처리 관련 자료구조 init
stack = Stack()
stackLevel = [1] # for Call By Reference
totalCount = [1]
currentLevel = [1]
levelCountArray = []
wordDic = dict()
numDic = dict()
split_sentence = ""
num_word_in_sentence = 0
which_word_in_sentence =0
word = ""
special_characters = "'", "-", '"', "Q"
special_character = False
for line in f:
#print (line)
#break
chomped_line = line
# print(chomped_line)
if chomped_line[0] ==";" :
numDic = wordDicToNumDic(wordDic, levelCountArray)
"""
# For Debug . 이 부분을 풀면 많은 정보를 볼 수 있음.
print("[Last Result for dubg]")
print(sniparray)
print(sniparrayOrigin)
print(numDic)
print(wordDic)
"""
for i in range (0, len(sniparrayOrigin)):
print (i+1
, "\t", getFormStr("".join(sniparray[i]))
, "\t", getLemmaStr(sniparrayOrigin[i][0])
, "\t", getUpostagStr("+".join(posarray[i]))
, "\t", getXpostagStr("+".join(posarray[i]))
, "\t", getFeatsStr("")
, "\t" , getHeadStr(numDic[wordDic[sniparrayOrigin[i][0]] - 2])
, "\t", getDeprelStr("")
, "\t", getDepsStr("")
, "\t", getMiscStr(wordDic[sniparrayOrigin[i][0]] - 1)
)
print()
split_sentence = chomped_line.split(' ')
# print(split_sentence)
sniparray = []
sniparrayOrigin = []
posarray = []
which_word_in_sentence =0
# 괄호 처리 관련 자료구조 reset
stack = Stack()
stackLevel[0] = 0
totalCount[0] = 0
currentLevel[0] = 0
levelCountArray = []
wordDic = dict()
numDic = dict()
#any(x in a for x in b)
if any(x in special_characters for x in chomped_line):
special_character = True
print(chomped_line.replace("; ", ""))
print("This sentence contains special_character")
else:
special_character = False
elif special_character == True:
continue
elif ("(" in chomped_line) and ("\t" in chomped_line):
m1 = re.match('(.*)(\([A-Z_]+ *\t*)+([^\(\)]+)([\)]+)', chomped_line)
if m1:
#print ("features_of_previous_parsed_words", m1.group(1))
#print ("feature_of_current_parsed_word", m1.group(2))
#print ("parsed", m1.group(3))
parsed = m1.group(3)
previousStr = m1.group(1) + m1.group(2)
lastStr = m1.group(4)
#print ("last_parenthesis", m1.group(4))
snip_pairs = re.split(' \+ ', parsed) # +sign needs to be escaped in regex #던지/VV + 어/EC
snip_pairs_2d = []
parenthesesChecker(previousStr, stack, stackLevel, totalCount, levelCountArray, wordDic, currentLevel)
for snip_pair in snip_pairs:
# line_counter += 1
# print ("snip_pair = ", snip_pair) #던지/VV
m2 = re.match('^([^\/]+)\/([^\/]+)$', snip_pair)
if m2:
snip = m2.group(1)
pos = m2.group(2)
#print ("line", line_counter)
#print ("snip", snip)
#print ("pos", pos)
#print (line_counter,"\t",snip,"\t",pos)
parenthesesChecker(snip_pair, stack, stackLevel, totalCount, levelCountArray, wordDic, currentLevel, m2)
snip_pairs_2d.append([snip, pos])
parenthesesChecker(lastStr, stack, stackLevel, totalCount, levelCountArray, wordDic, currentLevel)
which_word_in_sentence +=1
# print(which_word_in_sentence)
try:
word = split_sentence[which_word_in_sentence]
except IndexError:
print("Indexerror, pass")
#print (snip_pairs_2d)
#print (word)
buffer_start = 0
bufer_end = len(snip_pairs_2d)-1
snipbuffer = []
posbuffer = []
word = list(word)
#print(word)
word_counter = 0
end_of_sequence = False
buffer = False
for snip_pair in snip_pairs_2d:
if snip_pairs_2d[-1] == snip_pair:
end_of_sequence = True
# 4 cases
# 1) if snippet is inside the word & no buffer
# 2) if snippet is inside the word & there is buffer
# 3) if snippet is NOT inside the word & no buffer
# 4) if snippet is NOT inside the word & there is buffer
# 1) if snippet is inside the word & no buffer
# => Print current word
if (snip_pair[0] in word[word_counter:]) and (buffer == False):
# print(1)
sniparray.append([snip_pair[0]])
sniparrayOrigin.append([snip_pair[0]])
posarray.append([snip_pair[1]])
buffer_start += len(snip_pair[0])
buffer = False
word_counter +=1
# 2) if snippet is inside the word & there is buffer
# => Print Buffer and Print current word
elif (snip_pair[0] in word[word_counter:]) and (buffer == True):
# print(2)
#print("Where is corresponding word:" word.index(snip_pair[0]))
buffer_end = word.index(snip_pair[0])
snipbuffer = word[buffer_start:buffer_end]
sniparray.append(snipbuffer)
sniparrayOrigin.append([snip_pair[0]])
posarray.append(posbuffer)
buffer_start +=len(snip_pair[0])
sniparray.append([snip_pair[0]])
posarray.append([snip_pair[1]])
buffer = False
word_counter +=1
# 3) if snippet is NOT inside the word & no buffer
# if End of Sequence => Print current word
# if not end of sequence => Do Not Print Buffer, Buffer Start
elif not (snip_pair[0] in word[word_counter:]) and (buffer == False):
if end_of_sequence == True:
# print("3-1")
# Print Current word(=remaining part in the 'word')
snipbuffer = word[buffer_start:]
sniparray.append(snipbuffer)
sniparrayOrigin.append([snip_pair[0]])
posarray.append([snip_pair[1]])
word_counter +=1
else:
# print("3-2")
# Buffer Start!
# snip buffer will be formed right before when buffer is eliminated
# just don't change buffer_start
posbuffer=[]
posbuffer.append(snip_pair[1])
#sniparrayOrigin.append(snip_pair[0])
sniparrayOrigin.append([snip_pair[0]])
buffer = True
word_counter +=1
# 4) if snippet is NOT inside the word & there is buffer
# if End of Sequence => Print Buffer and print current word
# if not end of sequence => Add buffer
else:
if end_of_sequence == True:
# print("4-1")
# Print Buffer and print current word
# buffer_end = len(word)-1
snipbuffer = word[buffer_start:]
sniparray.append(snipbuffer)
#sniparrayOrigin.append(snip_pair[0])
posbuffer.append(snip_pair[1])
posarray.append(posbuffer)
word_counter +=1
else:
# print("4-2")
# Add buffer
posbuffer.append(snip_pair[1])
word_counter +=1
if end_of_sequence == True:
continue
"""
[2] 괄호를 Depth & Count 로 변환한 정보를 넘겨 받아, Depth 별 카운트를 누적하여 수치화 한다.
"""
def wordDicToNumDic(wordDic, levelCountArray):
resultDic = dict()
sumResult = 0
for levelIndex in range(0, len(levelCountArray)) :
if (levelIndex == 0 ):
resultDic[levelIndex] = levelCountArray[levelIndex]
else:
resultDic[levelIndex] = resultDic[levelIndex - 1] + levelCountArray[levelIndex]
return resultDic
"""
[3] 괄호를 Stack 에 넣어 Depth 정보와 Depth 별 Count 로 리턴. ( call by reference 로 결과 리턴 )
"""
def parenthesesChecker(lineString, stack, stackLevel , totalCount, levelCountArray, wordDic , currentLevel, m2 = None) :
localIndex = 0
while localIndex < len(lineString):
symbol = lineString[localIndex]
localIndex += 1
if symbol != "(" and symbol != ")" and symbol != "/":
continue
else:
if symbol == "/" and m2 != None :
wordDic[m2.group(1)] = currentLevel[0] # 해당 단어의 current Level을 기억. 갯수는 나중에 알수 있음.
# 아직은 갯수를 모르므로, 갯수를 leveCountDic 에서 1씩 누적.(아래)
elif symbol == "/":
print("[ERR]" + lineString)
continue
elif symbol == "(":
stack.push(symbol)
if ( currentLevel[0] < len(levelCountArray) ):
levelCountArray[currentLevel[0]] += 1
else:
levelCountArray.append(1)
totalCount[0] += 1
stackLevel[0] += 1
currentLevel[0] += 1
else:
try: # traing 문서에 Tree 구문에 괄호 갯수가 오류인 데이타가 있음.
stack.pop()
currentLevel[0] -= 1
except IndexError:
print("parentheses error, pass")
"""
[4] CoNLL-U Format Function
1.ID: Word index, integer starting at 1 for each new sentence; may be a range for tokens with multiple words.
2.FORM: Word form or punctuation symbol.
3.LEMMA: Lemma or stem of word form.
4.UPOSTAG: Universal part-of-speech tag drawn from our revised version of the Google universal POS tags.
5.XPOSTAG: Language-specific part-of-speech tag; underscore if not available.
6.FEATS: List of morphological features from the universal feature inventory or from a defined language-specific extension; underscore if not available.
7.HEAD: Head of the current token, which is either a value of ID or zero (0).
8.DEPREL: Universal Stanford dependency relation to the HEAD (root iff HEAD = 0) or a defined language-specific subtype of one.
9.DEPS: List of secondary dependencies (head-deprel pairs).
10.MISC: Any other annotation. 우리 소스에서는 이곳에 Tree 의 Depth 를 넣어 놓았음.
"""
# 2.FORM - 영어 참고. 형태소 분석 전 원문을 넘김.
def getFormStr(snip):
return snip
# 3.LEMMA - 영어 참고. 형태소 분석 된 가공된 기본어휘를 넘김.
def getLemmaStr(snip):
return snip
# 4.UPOSTAG - 이 부분 좀더 Dictionary 에 맵핑 규칙을 보충해 넣어야 함.
def getUpostagStr(pos):
tagDic = dict()
tagDic['NNG'] = 'NOUN'
tagDic['VV'] = 'VERB'
tagDic['MM'] = 'DET'
tagDic['SF'] = 'PUNCT'
if pos in tagDic.keys():
return tagDic[pos]
else :
return pos
# 5.XPOSTAG
def getXpostagStr(pos):
return pos
# 6.FEATS
def getFeatsStr(pos):
return "_"
# 7.HEAD : 현재 Tree Depth 를 동일 Depth 의 Count를 고려하여 누적 값을 보여주고 있음.
def getHeadStr(pos):
return pos
# 8.DEPREL
def getDeprelStr(pos):
return "_"
# 9.DEPS
def | |
+= 1
# Store the model states before and after resampling
if self.do_save or self.p_save:
self.save(before=True)
if self.do_resample: # Can turn off resampling for benchmarking
self.reweight()
self.resample()
weightdf=pd.DataFrame(list(self.weights))
self.weight_hist = pd.concat([self.weight_hist,weightdf],axis=1)
# Store the model states before and after resampling
if self.do_save or self.p_save:
self.save(before=False)
# Animate this window
if self.do_ani:
self.ani()
print("\tFinished window {}, step {} (took {}s)".format(
self.window_counter, self.time, round(float(time.time() - window_start_time), 2)))
window_start_time = time.time()
elif self.multi_step:
assert (
False), "Should not get here, if multi_step is true then the condition above should always run"
else:
pass # Don't print the message below any more
#print("\tNo more active agents. Finishing particle step")
if self.plot_save:
self.p_save()
# Return the errors and variances before and after sampling (if we're saving information)
# Useful for debugging in console:
# for i, a in enumerate(zip([x[1] for x in zip(self.before_resample, self.mean_errors) if x[0] == True],
# [x[1] for x in zip(self.before_resample, self.mean_errors) if x[0] == False])):
# print("{} - before: {}, after: {}".format(i, a[0], a[1]))
if self.do_save:
if self.mean_errors == []:
warnings.warn("For some reason the mean_errors array is empty. Cannot store errors for this run.")
return
# Return two tuples, one with the about the error before reweighting, one after
# Work out which array indices point to results before and after reweighting
before_indices = [i for i, x in enumerate(self.before_resample) if x]
after_indices = [i for i, x in enumerate(self.before_resample) if not x]
result = []
for before in [before_indices, after_indices]:
result.append([
min(np.array(self.mean_errors)[before]),
max(np.array(self.mean_errors)[before]),
np.average(np.array(self.mean_errors)[before]),
min(np.array(self.absolute_errors)[before]),
max(np.array(self.absolute_errors)[before]),
np.average(np.array(self.absolute_errors)[before]),
min(np.array(self.variances)[before]),
max(np.array(self.variances)[before]),
np.average(np.array(self.variances)[before])
])
return result
# If not saving then just return null
return
finally: # Whatever happens, make sure the multiprocessing pool is closed
self.pool.close()
def get_external_data(self, time):
'''
Read all real pedestrian positions
in one frame, and store in the
base_model variable.
'''
file_name = self.external_info[0] + 'frame_' + str(time+1)+ '.0.dat'
try:
agentID, x, y = np.loadtxt(file_name,unpack=True)
j = 0
for agent in self.base_model.agents:
if (agent.unique_id in agentID):
agent.status = 1
agent.location = (x[j], y[j])
j += 1
elif (agent.status == 1):
agent.status = 2
agent.location = (None, None)
except TypeError:
'''
This error occurs when only one agent is active. In
this case, the data is read as a float instead of an
array.
'''
for agent in self.base_model.agents:
if (agent.unique_id == agentID):
agent.status = 1
agent.location = (x, y)
elif (agent.status == 1):
agent.status = 2
agent.location = (None, None)
except ValueError:
'''
This error occurs when there is no active agent in
the frame.
- Deactivate all active agents.
'''
for agent in self.base_model.agents:
if (agent.status == 1):
agent.status = 2
agent.location = (None, None)
except OSError:
'''
This error occurs when there is no external file to
read. It should only occur at the end of the simulation.
- Deactivate all agent.
'''
for agent in self.base_model.agents:
agent.status = 2
agent.location = (None, None)
def predict(self, numiter=1):
'''
Predict
DESCRIPTION
Increment time. Step the base model. Use a multiprocessing
method to step particle models, set the particle states as
the agent locations with some added noise, and reassign the
locations of the particle agents using the new particle
states. We extract the models and states from the stepped
particles variable.
:param numiter: The number of iterations to step
(usually either 1, or the resample window)
'''
time = self.time - numiter
if self.do_external_data:
self.get_external_data(time)
else:
for i in range(numiter):
self.base_model.step()
# stepped_particles = self.pool.starmap(ParticleFilter.step_particle, list(zip( \
# range(self.number_of_particles), # Particle numbers (in integer)
# [m for m in self.models], # Associated Models (a Model object)
# [numiter] * self.number_of_particles, # Number of iterations to step each particle (an integer)
# [self.particle_std] * self.number_of_particles, # Particle std (for adding noise) (a float)
# [s.shape for s in self.states], # Shape (for adding noise) (a tuple)
# )))
stepped_particles = list(itertools.starmap(ParticleFilter.step_particle, list(zip( \
range(self.number_of_particles), # Particle numbers (in integer)
[m for m in self.models], # Associated Models (a Model object)
[numiter] * self.number_of_particles,
[self.particle_std] * self.number_of_particles, # Particle std (for adding noise) (a float)
[s.shape for s in self.states], # Shape (for adding noise) (a tuple)
))))
self.models = [stepped_particles[i][0] for i in range(len(stepped_particles))]
self.states = np.array([stepped_particles[i][1] for i in range(len(stepped_particles))])
self.get_state_estimate()
return
def reweight(self):
'''
Reweight
DESCRIPTION
Add noise to the base model state to get a measured state, or
use external data to get a measured state. Calculate
the distance between the particle states and the measured base model
state and then calculate the new particle weights as 1/distance.
Add a small term to avoid dividing by 0. Normalise the weights.
'''
if self.do_external_data:
measured_state = self.base_model.get_state(sensor='location')
else:
measured_state = (self.base_model.get_state(sensor='location')
+ np.random.normal(0, self.model_std ** 2, size=self.states.shape))
distance = np.linalg.norm(self.states - measured_state, axis=1)
self.weights = 1 / (distance + 1e-9) ** 2
self.weights /= np.sum(self.weights)
return
def resample(self):
'''
Resample
DESCRIPTION
Calculate a random partition of (0,1) and then
take the cumulative sum of the particle weights.
Carry out a systematic resample of particles.
Set the new particle states and weights and then
update agent locations in particle models using
multiprocessing methods.
'''
offset_partition = ((np.arange(self.number_of_particles)
+ np.random.uniform()) / self.number_of_particles)
cumsum = np.cumsum(self.weights)
i, j = 0, 0
while i < self.number_of_particles:
if offset_partition[i] < cumsum[j]:
self.indexes[i] = j
i += 1
else:
j += 1
self.states[:] = self.states[self.indexes]
self.weights[:] = self.weights[self.indexes]
'''
In addition to updating and resampling the position of agents
(self.states), we will also resample the speed and gate_out. The
ideal would be to pass this information on self.states, but this
would require a change in many parts of the code.
'''
#for the hybrid version, the speed and the gate_out are not resampled!!!
#for i in range(self.number_of_particles):
# if (i != self.indexes[i]):
# model1 = self.models[i]
# model2 = self.models[self.indexes[i]]
# for i in range(self.base_model.pop_total):
# model1.agents[i].speed = model2.agents[i].speed
# model1.agents[i].loc_desire = model2.agents[i].loc_desire
# Could use pool.starmap here, but it's quicker to do it in a single process
self.models = list(itertools.starmap(ParticleFilter.assign_agents, list(zip(
range(self.number_of_particles), # Particle numbers (in integer)
[s for s in self.states], # States
[m for m in self.models] # Associated Models (a Model object)
))))
return
def get_state_estimate(self):
'''
# Save particles location estimate.
'''
active_states = [agent.status == 1 for agent in self.base_model.agents for _ in range(2)]
if any(active_states):
# Mean and variance state of all particles, weighted by their distance to the observation
mean = np.average(self.states[:, active_states], weights=self.weights, axis=0)
variance = np.average((self.states[:, active_states] - mean) ** 2, weights=self.weights, axis=0)
i = 0
for agent in self.base_model.agents:
unique_id = agent.unique_id
if agent.status == 1:
self.estimate_model.agents[unique_id].history_locations.append((mean[i]/14., mean[i+1]/14.))
self.estimate_model.agents[unique_id].history_locations_var.append((variance[i]/14., variance[i+1]/14.))
i += 2
#else:
# self.estimate_model.agents[unique_id].history_locations.append((None, None))
def save(self, before: bool):
'''
Save
DESCRIPTION
Calculate number of active agents, mean, and variance
of particles and calculate mean error between the mean
and the true base model state.
:param before: whether this is being called before or after resampling as this will have a big impact on
what the errors mean (if they're after resampling then they should be low, before and they'll be high)
'''
self.active_agents.append(sum([agent.status == 1 for agent in self.base_model.agents]))
active_states = [agent.status == 1 for agent in self.base_model.agents for _ in range(2)]
if any(active_states):
# Mean and variance state of all particles, weighted by their distance to the observation
mean = np.average(self.states[:, active_states], weights=self.weights, axis=0)
unweighted_mean = np.average(self.states[:, active_states], axis=0)
variance = np.average((self.states[:, active_states] - mean) ** 2, weights=self.weights, axis=0)
self.mean_states.append(mean)
self.variances.append(np.average(variance))
self.before_resample.append(before) # Whether this save reflects the errors before or after resampling
truth_state = self.base_model.agents2state()
self.mean_errors.append(np.linalg.norm(mean - truth_state[active_states], axis=0))
self.absolute_errors.append(np.linalg.norm(unweighted_mean - truth_state[active_states], axis=0))
# min(mean_errors) is returning empty. CHeck small values for agents/particles
return
def p_save(self):
'''
Plot Save
DESCRIPTION
Plot active agents, mean error and mean variance.
'''
plt.figure(2)
plt.plot(self.active_agents)
plt.ylabel('Active agents')
plt.show()
plt.figure(3)
plt.plot(self.mean_errors)
plt.ylabel('Mean Error')
plt.show()
plt.figure(4)
plt.plot(self.variances)
plt.ylabel('Mean Variance')
plt.show()
plt.figure(5)
plt.plot(self.unique_particles)
plt.ylabel('Unique Particles')
plt.show()
print('Max mean | |
import logging
import numpy as np
import numba
import math
import sys
import time
from multiprocessing.pool import ThreadPool
from plato.backend.common import *
from plato.backend.branch_common import *
from plato.backend.stat_expression import *
from .adapter import BranchTrainingHeatMapAdapter, calc_location
logger = logging.getLogger("plato.backend.processors.branchTrainingHeatmap")
@numba.jit(nopython=True, nogil=True)
def apply_bins(last, start_bin_idx, bin_size, bins, max_event, hm):
num_bins_used = 0
bin_idx = start_bin_idx
# for bin_idx in range(start_bin_idx, self.num_bins):
while bin_idx < len(bins):
if last >= (bin_idx + 1) * bin_size or last >= max_event:
hm += bins[bin_idx]
num_bins_used += 1
bin_idx += 1
else:
end_bin_idx = bin_idx
break
else:
end_bin_idx = len(bins)
return end_bin_idx, num_bins_used
MODE_SUM = 'sum' # Sum up all the deltas
MODE_DIFF = 'diff' # Last value minus the first value
MODE_LAST = 'last' # Last value
MODE_FIRST = 'first' # First value
# Object that can generate a heatmap and performs some binning to optimize creation of on-demand heatmaps
# for real-time display.
# Uses an adapters to read raw data and populate heatmap. Adapter also provides transform from the
# event-dimension of this heatmap (e.g. branches) to some underlying dimension (e.g. weight updates) so
# that sampling can be done in that dimension.
#
# TODO: Bins may not be needed immediately. They take time to generate and the first heatmap requested
# can be generated faster than the bins.
#
# TODO: There is very likely a faster way to generate heatmap bins by iterating the weight-update event
# table and looking at start/end bin indices rather than getting numpy arrays for each bin.
#
@logtime(logger)
class BranchTrainingHeatMapGenerator:
# Construct a heatmap generator.
# bin_size should be at least several times as big as the number of elements in a heatmap or the binning
# optimization is lost.
#
def __init__(self, adapter: BranchTrainingHeatMapAdapter, stat_columns, bin_size=300000):
self.adapter = adapter
self.bin_size = bin_size
self.num_bins = math.ceil(adapter.num_events / self.bin_size)
self.bins = {} # Per stat-col
self.coalesced_bins_right = {}
self.wupdate_bin_size = adapter.num_heatmap_elements * 10 # Ensure iterating over 1 heatmap-sized object is much faster than the number of items in a bin.
self.num_wupate_bins = math.ceil(len(adapter.pdf_wupdates) / self.wupdate_bin_size)
# Divide into bins with even branch counts. Will look up the corresponding weight-update event table row numbers
# referenced by the branch training events table and store the information needed to generate a heatmap for each
# bin.
apply_range_inputs = self.__compute_adapted_indices()
# Transform stat names into actual stat columns and modifiers
real_stats = self.__get_real_stats_and_xforms(stat_columns)
for stat_name, xform in real_stats:
logger.debug('Generating {} bins for {} {} to hold {} events'.format(self.num_bins, stat_name, xform, self.adapter.num_events))
sys.stdout.flush()
self.bins[(stat_name, xform)] = self.__generate_bins(apply_range_inputs, stat_name, xform)
t = time.time()
self.coalesced_bins_right[(stat_name, xform)] = self.__generate_coalesced_bins_right(stat_name, xform)
d = time.time() - t
logger.debug(f'Took {d:.2f} s to generate coalesced bins for {stat_name}')
# Create a small cache for different masks
self._mask_cache = NRUCache(5)
# Generate a series of bins representing the right-most (latest) values for each heatmap cell within the bin
def __generate_coalesced_bins_right(self, stat_col, xform):
bins = [None] * self.num_wupate_bins
for bin_idx in range(0, self.num_wupate_bins):
bins[bin_idx] = self.adapter.make_nan_flat_heatmap()
banks = self.adapter.pdf_wupdates.bank.values
tables = self.adapter.pdf_wupdates.table.values
rows = self.adapter.pdf_wupdates.row.values
values = self.adapter.pdf_wupdates[stat_col].values
generate_coalesced_bins_right(bins, xform, self.num_wupate_bins, self.wupdate_bin_size, len(self.adapter.pdf_wupdates), banks, tables, rows, values, self.adapter.num_banks, self.adapter.num_rows)
return bins
# Given stats of the form 'table[{table}].statname', figure out which actual stat columns
# we need to extract and how they must be transformed for implement that stat.
def __get_real_stats_and_xforms(self, stat_names):
# Extract up stat columns from compound stats
real_stats = interpret_compound_stats(stat_names, self.adapter.data_source._compound_stats)
# Adjust any stat name with the table prefix into a real column name because that is how the heatmap operates
return list(map(lambda p: (p[0].replace(self.adapter.data_source.TABLE_STAT_PREFIX, ''), p[1]), real_stats))
def __compute_adapted_indices(self):
t = time.time()
# Start branch index of each bin
bin_start_list = range(0, self.adapter.num_events, self.bin_size)
assert(len(bin_start_list) == self.num_bins)
# Figure out ranges of weight updates based on training event table
# NOTE: This is the slow part. Actually counting within each bucket (later) is fast
branch_ranges_incl = list(map(lambda r: (r, r+self.bin_size-1), bin_start_list))
weight_ranges_incl = self.adapter.batch_pre_apply_range(branch_ranges_incl)
duration = time.time() - t
logger.debug('Generating {} table indices for bins took {} s'.format(self.num_bins, duration))
sys.stdout.flush()
t = time.time()
weight_frames = self.adapter.batch_get_weight_buckets(weight_ranges_incl)
# Construct num_bins tuples, each containing a heatmap range, the weight-data frame, and the bin index to
# populate for that heatmap
bin_indices = range(0, len(bin_start_list))
apply_range_inputs = list(zip(weight_ranges_incl, weight_frames, bin_indices))
# Log timing
duration = time.time() - t
logger.debug('Getting {} table weight buckets for bins took {} s'.format(self.num_bins, duration))
sys.stdout.flush()
return apply_range_inputs
def __generate_bins(self, apply_range_inputs, stat_col, xform):
t = time.time()
bins = np.zeros(shape=(self.num_bins, self.adapter.num_heatmap_elements))
# make_bin using pre-computed data-frame
def populate_bins(items):
for item in items:
# Unpack work
bin_range = item[0]
data_frame = item[1]
bin_idx = item[2]
# Weight update event indices
first = bin_range[0]
last = bin_range[1]
# Fill the heatmap
self.adapter.batch_apply_range_to_heatmap(first, last, data_frame, stat_col, bins[bin_idx,:], value_transform_func=xform)
# NOTE: Multiprocessing doesn't work here - there is too much data to transfer. It is possible that a
# numpy memmap solution will be better and can run in a multiprocess environment. Someone should try that.
if 1:
# Partition the heatmap 'apply_range' inputs into thread work groups and use a pool to generate bins
NUM_THREADS = 8
def partition(l, chunk_size):
for i in range(0, len(l), chunk_size):
yield l[i:i+chunk_size]
partitioned_apply_range_inputs = list(partition(apply_range_inputs, math.ceil(len(apply_range_inputs) / NUM_THREADS)))
assert(len(partitioned_apply_range_inputs) <= NUM_THREADS), '{} != {}'.format(len(partitioned_apply_range_inputs),NUM_THREADS)
pool = ThreadPool()
pool.map(populate_bins, partitioned_apply_range_inputs)
pool.close()
pool.join()
else:
# Fallback non-threaded mode (debug only)
populate_bins(apply_range_inputs)
# Log timing
duration = time.time() - t
logger.debug('Generating {} bins for {} took {} s'.format(self.num_bins, stat_col, duration))
sys.stdout.flush()
return bins
# Debugging utility. Accepts only concrete stat names (not compound)
def _check_bin_sum(self, stat_col):
bins = self.bins[(stat_col, NoTransform)]
cs = 0
for b in bins:
cs += b.sum()
return cs
# Get a 2d Heatmap over range of branch index: first (incusive) to last (inclusive)
# where these are indices of BRANCHES (from ddf_branches)
# `allow_bins` lets the heatmap rely on the `bins` optimization. Disable this only for debugging
# We could optimize this to only include changes at the endpoints from last update rather than re-iterating, but its
# already super-fast.
def generate_2d_heatmap(self, first_unit, last_unit, units, stat_col, allow_bins=True, allow_threads=False, branch_predictor_filtering={}, mode=MODE_SUM):
# Convert first/last to branch indices because that's what the heatmap operates on. It is ok that
# interpolation within this range is done by branch indices because the heatmap is just summing things up
# anyway.
unit_stat = self.adapter.get_stat_for_units(units)
first_row, last_row = self.adapter.lookup_rows_for_values(first_unit, last_unit, unit_stat)
# Interpret stat
if type(stat_col) != str:
raise ValueError('stat_col was not a string')
# Transform stat names into actual stat columns and modifiers
real_stat_cols = self.__get_real_stats_and_xforms([stat_col])
# Bounds checking
if not (first_row >= 0):
raise ValueError('first/last not in range of data')
if not (last_row >= first_row):
raise ValueError('first/last not in range of data')
if not (last_row <= self.adapter.num_events):
raise ValueError('first/last not in range of data')
# Create a filter mask for the selected branches if required
brnfilt = make_branch_filter(branch_predictor_filtering)
# TODO: Move this logic into adapter
if brnfilt is not None:
if mode not in [MODE_SUM]:
raise ValueError(f'Cannot generate a heatmap with a branch filter using a mode other than "sum" because individual branches must be filtered out')
if brnfilt not in self._mask_cache:
# TODO: This cache may need a mutex if this is acutally multithreaded
# Make the entire mask here and now. The will be the last time it is needed
filter_mask, npoints = brnfilt.make_mask(0, len(self.adapter.pdf_brns), self.adapter.pdf_brns, self.adapter.pdf_wupdates)
self._mask_cache[brnfilt] = filter_mask
else:
filter_mask = self._mask_cache[brnfilt]
else:
filter_mask = None
intermediate = []
for col, xform in real_stat_cols:
intermediate.append(self._generate_2d_heatmap(first_row, last_row, col, xform, filter_mask, allow_bins=allow_bins, mode=mode))
hm = assemble_compound_stat(stat_col, self.adapter.data_source._compound_stats, *intermediate)
# Convert flat heatmap to a matrix
return self.adapter.reshape_flat_heatmap(hm)
# Generate a 2d heatmap
def _generate_2d_heatmap(self, first_row, last_row, stat_col, xform=NoTransform, filter_mask=None, allow_bins=True, mode=MODE_SUM):
# Create the empty flat heatmap to which bins will be added
hm = self.adapter.make_zero_flat_heatmap()
if mode == MODE_SUM:
return self._generate_2d_sum_heatmap(hm, first_row, last_row, stat_col, xform, filter_mask, allow_bins)
elif mode == MODE_DIFF:
return self._generate_2d_diff_heatmap(hm, first_row, last_row, stat_col, xform, allow_bins)
elif mode == MODE_LAST:
return self._generate_2d_last_heatmap(hm, first_row, last_row, stat_col, xform, allow_bins)
elif mode == MODE_FIRST:
return self._generate_2d_first_heatmap(hm, first_row, last_row, stat_col, xform, allow_bins)
| |
<filename>spearmint/kernels/kernel_utils.py
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by <NAME>, <NAME>, and
# <NAME> at Harvard University, <NAME> at the
# University of Toronto (“Toronto”), and <NAME> at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: <EMAIL>
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# <NAME>, <NAME> and <NAME>
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# <NAME>, <NAME> and <NAME>
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# <NAME>, <NAME>, <NAME> and <NAME>
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology <NAME>, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar | |
import asyncio
import json
import time
import logging
from datetime import timedelta
from functools import partial
from dataclasses import dataclass
import async_timeout
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from aiohttp import ClientSession
from homeassistant.const import *
from homeassistant.core import callback
from homeassistant.components import persistent_notification
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import aiohttp_client, discovery
from homeassistant.helpers.entity import Entity, ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.storage import Store
from homeassistant.util import color
from miio.exceptions import DeviceException
from .deps.miio_new import MiotDevice
import copy
import math
from collections import OrderedDict
from .deps.const import (
DOMAIN,
CONF_UPDATE_INSTANT,
CONF_MAPPING,
CONF_CONTROL_PARAMS,
CONF_CLOUD,
CONF_MODEL,
ATTR_STATE_VALUE,
ATTR_MODEL,
ATTR_FIRMWARE_VERSION,
ATTR_HARDWARE_VERSION,
SCHEMA,
SERVICE_SCHEMA,
SERVICE_TO_METHOD,
MAP,
DUMMY_IP,
DUMMY_TOKEN,
)
from .deps.xiaomi_cloud_new import *
from .deps.xiaomi_cloud_new import MiCloud
from .deps.miot_coordinator import MiotCloudCoordinator
from asyncio.exceptions import CancelledError
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_MAPPING): vol.All(),
vol.Required(CONF_CONTROL_PARAMS): vol.All(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SHORT_DELAY = 3
LONG_DELAY = 5
NOTIFY_INTERVAL = 60 * 10
OFFLINE_NOTIFY = False
UPDATE_BETA_FLAG = False
async def async_setup(hass, hassconfig):
"""Setup Component."""
hass.data.setdefault(DOMAIN, {})
config = hassconfig.get(DOMAIN) or {}
hass.data[DOMAIN]['config'] = config
hass.data[DOMAIN].setdefault('entities', {})
hass.data[DOMAIN].setdefault('configs', {})
hass.data[DOMAIN].setdefault('miot_main_entity', {})
hass.data[DOMAIN].setdefault('micloud_devices', [])
hass.data[DOMAIN].setdefault('cloud_instance_list', [])
hass.data[DOMAIN].setdefault('event_fetcher_list', [])
hass.data[DOMAIN].setdefault('add_handler', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
hass.data[DOMAIN]['component'] = component
await component.async_setup(config)
return True
async def async_setup_entry(hass, entry):
"""Set up shopping list from config flow."""
hass.data.setdefault(DOMAIN, {})
# entry for MiCloud login
if 'username' in entry.data:
return await _setup_micloud_entry(hass, entry)
config = {}
for item in [CONF_NAME,
CONF_HOST,
CONF_TOKEN,
CONF_CLOUD,
'cloud_write',
'devtype',
'ett_id_migrated',
'cloud_device_info',
]:
config[item] = entry.data.get(item)
for item in [CONF_MAPPING,
CONF_CONTROL_PARAMS,
]:
config[item] = json.loads(entry.data.get(item))
if type(entry.data.get('devtype')) == str:
persistent_notification.async_create(
hass,
f"感谢您选择本插件!\n"
f"本插件最近的更新,支持了“一个设备多个类型”的配置方式,\n"
f"您的 **{entry.data.get(CONF_NAME)}** 配置项是旧版本格式。\n"
f"建议您重新添加设备,确认设备正常后删除旧设备,\n"
f"即可消除此提示。\n",
"Xiaomi MIoT")
config[CONF_MAPPING] = {entry.data.get('devtype'): config[CONF_MAPPING]}
config[CONF_CONTROL_PARAMS] = {entry.data.get('devtype'): config[CONF_CONTROL_PARAMS]}
config['config_entry'] = entry
entry_id = entry.entry_id
unique_id = entry.unique_id
hass.data[DOMAIN]['configs'][entry_id] = config
hass.data[DOMAIN]['configs'][unique_id] = config
if type(entry.data.get('devtype')) == str:
hass.async_create_task(hass.config_entries.async_forward_entry_setup(entry, entry.data.get('devtype')))
else:
devtype_new = entry.data.get('devtype')
if 'sensor' in devtype_new and 'binary_sensor' not in devtype_new:
devtype_new += ['binary_sensor']
for t in devtype_new:
hass.async_create_task(hass.config_entries.async_forward_entry_setup(entry, t))
return True
async def async_unload_entry(hass, entry):
if 'username' in entry.data:
# TODO
try:
hass.data[DOMAIN]['micloud_devices'] = []
for item in hass.data[DOMAIN]['cloud_instance_list']:
if item['username'] == entry.data['username']:
del item
return True
return False
except Exception as ex:
_LOGGER.error(ex)
return False
else:
entry_id = entry.entry_id
unique_id = entry.unique_id
hass.data[DOMAIN]['configs'].pop(entry_id)
if unique_id:
hass.data[DOMAIN]['configs'].pop(unique_id)
if type(entry.data.get('devtype')) == str:
hass.async_create_task(hass.config_entries.async_forward_entry_unload(entry, entry.data.get('devtype')))
else:
for t in entry.data.get('devtype'):
hass.async_create_task(hass.config_entries.async_forward_entry_unload(entry, t))
return True
async def _setup_micloud_entry(hass, config_entry):
"""Thanks to @AlexxIT """
data: dict = config_entry.data.copy()
server_location = data.get('server_location') or 'cn'
session = aiohttp_client.async_create_clientsession(hass)
cloud = MiCloud(session)
cloud.svr = server_location
if 'service_token' in data:
# load devices with saved MiCloud auth
cloud.auth = data
devices = await cloud.get_total_devices([server_location])
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
# update MiCloud auth in .storage
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_total_devices([server_location])
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
if userid := cloud.auth.get('user_id'):
# TODO don't allow login the same account twice
hass.data[DOMAIN]['cloud_instance_list'].append({
"user_id": userid,
"username": data['username'],
"cloud_instance": cloud,
"coordinator": MiotCloudCoordinator(hass, cloud)
})
# load devices from or save to .storage
filename = sanitize_filename(data['username'])
store = Store(hass, 1, f"{DOMAIN}/{filename}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
# TODO: Think about a bunch of devices
if 'micloud_devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['micloud_devices'] = devices
else:
hass.data[DOMAIN]['micloud_devices'] += devices
return True
class GenericMiotDevice(Entity):
"""通用 MiOT 设备"""
def __init__(self, device, config, device_info, hass = None, mi_type = None):
"""Initialize the entity."""
def setup_cloud(self, hass) -> tuple:
try:
return next((cloud['cloud_instance'], cloud['coordinator']) for cloud in hass.data[DOMAIN]['cloud_instance_list']
if cloud['user_id'] == self._cloud.get('userId'))
except StopIteration:
_LOGGER.info(f"Setting up xiaomi account for {self._name}...")
mc = MiCloud(
aiohttp_client.async_create_clientsession(self._hass)
)
mc.login_by_credientals(
self._cloud.get('userId'),
self._cloud.get('serviceToken'),
self._cloud.get('ssecurity')
)
co = MiotCloudCoordinator(hass, mc)
hass.data[DOMAIN]['cloud_instance_list'].append({
"user_id": self._cloud.get('userId'),
"username": None, # 不是从UI配置的用户,没有用户名
"cloud_instance": mc,
"coordinator": co
})
return (mc, co)
self._device = device
self._mi_type = mi_type
self._did_prefix = f"{self._mi_type[:10]}_" if self._mi_type else ""
self._mapping = config.get(CONF_MAPPING)
if type(self._mapping) == str:
# 旧版单设备配置格式
self._mapping = json.loads(self._mapping)
elif type(self._mapping) == OrderedDict:
# YAML 配置格式
pass
else:
mappingnew = {}
for k,v in self._mapping.items():
for kk,vv in v.items():
mappingnew[f"{k[:10]}_{kk}"] = vv
self._mapping = mappingnew
self._ctrl_params = config.get(CONF_CONTROL_PARAMS) or {}
if type(self._ctrl_params) == str:
self._ctrl_params = json.loads(self._ctrl_params)
if not type(self._ctrl_params) == OrderedDict:
paramsnew = {}
for k,v in self._ctrl_params.items():
for kk,vv in v.items():
paramsnew[f"{k[:10]}_{kk}"] = vv
self._ctrl_params_new = paramsnew
else:
self._ctrl_params_new = self._ctrl_params
if mi_type:
self._ctrl_params = self._ctrl_params[mi_type]
self._name = config.get(CONF_NAME)
self._update_instant = config.get(CONF_UPDATE_INSTANT)
self._skip_update = False
self._delay_update = 0
self._model = device_info.model
self._unique_id = "{}-{}-{}".format(
device_info.model, device_info.mac_address, self._name
) if not config.get('ett_id_migrated') else (
f"{device_info.model.split('.')[-1]}-cloud-{config.get(CONF_CLOUD)['did'][-6:]}" if config.get(CONF_CLOUD) else
f"{device_info.model.split('.')[-1]}-{device_info.mac_address.replace(':','')}"
)
if config.get('ett_id_migrated'):
self._entity_id = self._unique_id
self.entity_id = f"{DOMAIN}.{self._entity_id}"
else:
self._entity_id = None
self._hass = hass
self._cloud = config.get(CONF_CLOUD)
self._cloud_write = config.get('cloud_write')
self._cloud_instance = None
self.coordinator = None
self._body_for_update_cloud = None
if self._cloud:
c = setup_cloud(self, hass)
self._cloud_instance = c[0]
self.coordinator = c[1]
self.coordinator.add_fixed_by_mapping(self._cloud, self._mapping)
data1 = {}
data1['datasource'] = 1
data1['params'] = []
for value in self._mapping.values():
if 'aiid' not in value:
data1['params'].append({**{'did':self._cloud.get("did")},**value})
self._body_for_update_cloud = json.dumps(data1,separators=(',', ':'))
self._fail_count = 0
self._available = None
self._state = None
self._assumed_state = False
self._state_attrs = {
ATTR_MODEL: self._model,
ATTR_FIRMWARE_VERSION: device_info.firmware_version,
ATTR_HARDWARE_VERSION: device_info.hardware_version,
}
self._last_notified = 0
self._err4004_notified = False
self._callbacks = set()
for service in (SERVICE_TO_METHOD):
schema = SERVICE_TO_METHOD[service].get("schema", SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, service, self.async_service_handler, schema=schema
)
@property
def should_poll(self):
"""Poll the miio device."""
return True if (not UPDATE_BETA_FLAG or not self._cloud) else False
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
# @property
# def icon(self):
# """Return the icon to use for device if any."""
# return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def device_info(self):
return {
'identifiers': {(DOMAIN, self._unique_id)},
'name': self._name,
'model': self._model,
'manufacturer': (self._model or 'Xiaomi').split('.', 1)[0].capitalize(),
'sw_version': self._state_attrs.get(ATTR_FIRMWARE_VERSION),
}
@property
def did_prefix(self):
return self._did_prefix
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a device command handling error messages."""
try:
result = await self.hass.async_add_job(partial(func, *args, **kwargs))
_LOGGER.info("Response received from %s: %s", self._name, result)
# This is a workaround. The action should not only return whether operation succeed, but also the 'out'.
if 'aiid' in result:
return True if result['code'] == 0 else False
return True if result[0]['code'] == 0 else False
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def set_property_new(self, field = "", params = "", multiparams:list = []):
try:
if not self._cloud_write:
if not multiparams:
result = await self._try_command(
f"Setting property for {self._name} failed.",
self._device.set_property,
field,
params,
)
if result:
if field in self._state_attrs:
self._state_attrs[field] = params
return True
else:
result = await self._try_command(
f"Setting property for {self._name} failed.",
self._device.send,
"set_properties",
multiparams,
)
if result:
return True
else:
_LOGGER.info(f"Control {self._name} by cloud.")
if not multiparams:
did = self._cloud.get("did")
spiid = self._mapping.get(field) or {}
if not (spiid := self._mapping.get(field)):
_LOGGER.error(f"Cannot control {self._name} by cloud because can't find {field} siid and piid from {self._mapping}")
return False
p = {**{'did': did, 'value': params},**spiid}
p = {'params': [p]}
pp = json.dumps(p,separators=(',', ':'))
_LOGGER.info(f"Control {self._name} params: {pp}")
results = await self._cloud_instance.set_props(pp, self._cloud.get("server_location"))
if results:
if r := results.get('result'):
for item in r:
if item['code'] == 1:
self._delay_update = LONG_DELAY
elif item['code'] == -704042011:
if self._available == True or self._available == None:
if OFFLINE_NOTIFY:
persistent_notification.async_create(
self._hass,
f"请注意,云端接入设备 **{self._name}** 已离线。",
"Xiaomi MIoT - 设备离线")
else:
_LOGGER.warn(f"请注意,云端接入设备 **{self._name}** 已离线。")
self._available = False
self._skip_update = True
return False
elif item['code'] != 0:
_LOGGER.error(f"Control {self._name} by cloud failed: {r}")
return False
if field in self._state_attrs:
self._state_attrs[field] = params
self._skip_update = True
return True
return False
else:
did = self._cloud.get("did")
p = multiparams
for item in p:
item['did'] = did
pp = {'params': p}
ppp = json.dumps(pp,separators=(',', ':'))
_LOGGER.info(f"Control {self._name} params: {ppp}")
results = await self._cloud_instance.set_props(ppp, self._cloud.get("server_location"))
if results:
if r := results.get('result'):
for item in r:
if item['code'] == 1:
self._delay_update = LONG_DELAY
elif item['code'] == -704042011:
if self._available == | |
# not all required parameters are in voevent xml file
# return id if it is already in the database
return self.get_id_existing(table, cols, value)
except psycopg2.IntegrityError:
# rollback changes
self.connection.rollback()
# re-raise exception
raise
def get_authortime(self):
'''
Get time voevent file was authored from mapping dictionary.
:returns: datetime string of format '%Y-%m-%d %H:%M:%S'
:rtype: str
'''
try:
return [item.get('value') for item in
self.mapping.get('radio_observations_params_notes')
if item.get('type') == 'authortime'][0]
except IndexError:
try:
return[item.get('value') for item in
self.mapping.get('radio_measured_params_notes')
if item.get('type') == 'authortime'][0]
except IndexError:
# fall back to insert datetime
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def get_id_existing(self, table, cols, value):
'''
Get id of an existing entry in database table.
:param table: name of database table
:param cols: cols in database table that need to be added
:param value: values to be set for the cols
:type table: str
:type cols: list
:type value: list
:returns: id of existing entry in table
:rtype: int
'''
if table == 'authors':
# authors table should have unique ivorn
sql = "select id from {} WHERE ivorn = '{}'".format(
table, value[cols == 'ivorn'][0])
elif table == 'frbs':
# frbs table should have unique name
sql = "select id from {} WHERE name = '{}'".format(
table, value[cols == 'name'][0])
elif table == 'observations':
# observation table should have an unique combination of
# frb_id, telescope, utc
sql = """select id from {} WHERE frb_id = '{}' AND
telescope = '{}' AND utc = '{}'""".format(
table, value[cols == 'frb_id'][0],
value[cols == 'telescope'][0],
value[cols == 'utc'][0])
elif table == 'radio_observations_params':
# rop table should have an unique combination of
# obs_id, settings_id
sql = """select id from {} WHERE obs_id = '{}' AND settings_id =
'{}'""".format(table,
value[cols == 'obs_id'][0],
value[cols == 'settings_id'][0])
elif table == 'radio_measured_params':
# voevent_ivorn mus tbe unique
sql = "select id from {} WHERE voevent_ivorn = '{}'".format(
table, value[cols == 'voevent_ivorn'][0])
else:
# raise IntegrityError
raise psycopg2.IntegrityError(
"Unable database table: {}".format(table))
# get the id
self.cursor.execute(sql)
return_id = self.cursor.fetchone()
if not return_id:
# Could not get the id from the database
# re-raise IntegrityError
raise psycopg2.IntegrityError(
"Unable to get id from database: {}".format(sql))
else:
return return_id['id']
def update_database(self, table, cols, value):
'''
If type supersedes we need to update existing table values,
else do nothing. This method executes the sql statement.
:param table: name of database table
:param cols: cols in database table that need to be added
:param value: values to be set for the cols
:type table: str
:type cols: list
:type value: list
'''
# remove cols with empty values
cols = nparray([i for i, j in zip(cols, value) if j])
value = nparray([j for j in value if j]).flatten()
if (self.event_type == 'supersedes'):
# event is of type supersedes, so we need to update
col_sql, parameters, value = self.define_sql_params(cols, value)
# define sql statments
if table == 'frbs':
sql = "update {} SET ({}) = {} WHERE id='{}'".format(
table, col_sql, parameters, self.frb_id)
elif table == 'observations':
sql = "update {} SET ({}) = {} WHERE id='{}'".format(
table, col_sql, parameters, self.obs_id)
elif table == 'radio_observations_params':
sql = "update {} SET ({}) = {} WHERE id='{}'".format(
table, col_sql, parameters, self.rop_id)
elif table == 'radio_measured_params':
sql = "update {} SET ({}) = {} WHERE id='{}'".format(
table, col_sql, parameters, self.rmp_id)
else:
pass
try:
# execute sql statement
self.cursor.execute(sql, tuple(value))
except NameError:
pass
else:
pass
def add_VOEvent_to_FRBCat(self):
'''
Add a VOEvent to the FRBCat database. This is the main
method that iterates over all tables and calls the
respective method for each table.
Finally, the database changes are committed and the
database connection is closed.
'''
# define database tables in the order they need to be filled
tables = ['authors', 'frbs', 'observations',
'radio_observations_params',
'radio_observations_params_notes',
'radio_measured_params', 'radio_measured_params_notes']
# get time voevent file was authored
self.authortime = self.get_authortime()
# loop over defined tables
for table in tables:
# extract cols that have values
cols = [item.get('column') for item in self.mapping.get(table) if
item.get('value') is not None]
values = [item.get('value') for item in self.mapping.get(table) if
item.get('value') is not None]
if table in ['radio_measured_params_notes',
'radio_observations_params_notes']:
notes = [item.get('note') for item in self.mapping.get(table)
if item.get('note') is not None]
if table == 'authors':
self.add_authors(table, cols, values)
try:
# set authorname, needed for notes
self.authorname = values[cols.index('contact_name')]
except ValueError:
self.authorname = 'FRBCat insert'
if table == 'frbs':
self.add_frbs(table, cols, values)
if table == 'frbs_notes':
self.add_frbs_notes(table, cols, values)
if table == 'observations':
self.add_observations(table, cols, values)
# create first part of settings_id
self.settings_id1 = str(values[cols == 'telescope'][0]
) + ';' + str(values[cols == 'utc'][0])
if table == 'observations_notes':
self.add_observations_notes(table, cols, values)
if table == 'radio_observations_params':
self.add_radio_observations_params(table, cols, values)
if table == 'radio_observations_params_notes':
self.add_radio_observations_params_notes(table, cols, notes)
if table == 'radio_measured_params':
self.add_radio_measured_params(table, cols, values)
if (self.event_exists and (self.event_type != 'supersedes')):
# event exists already and is not of type supersedes
break # don't want to add already existing event
if table == 'radio_measured_params_notes':
self.add_radio_measured_params_notes(table, cols, notes)
if (self.event_exists and (self.event_type != 'supersedes')):
# event is already in database, rollback
self.connection.rollback()
else:
dbase.commitToDB(self.connection, self.cursor)
dbase.closeDBConnection(self.connection, self.cursor)
def retract(self, voevent_cited):
'''
Retract event with the ivorn given by voevent_cited.
Retracting event should set detected/verified to False in
observations table. Database changes are committed and
database connection is closed.
:param voevent_cited: event ivorn to be retracted
:type voevent_cited: str
'''
sql = ("select o.id from radio_measured_params rmp join " +
"radio_observations_params rop ON rmp.rop_id=rop.id join " +
"observations o on rop.obs_id=o.id join frbs on " +
"o.frb_id=frbs.id join authors on frbs.author_id=authors.id " +
"where voevent_ivorn='{}'").format(voevent_cited)
try:
# execute sql statement
self.cursor.execute(sql)
except NameError:
pass
# get id in the observations table
obs_id = self.cursor.fetchone()
if obs_id:
# observation is indeed in the database
col_sql = ', '.join(map(str, ['detected', 'verified']))
parameters = ', '.join(map(str, [False, False]))
sql = "update {} SET ({}) = ({}) WHERE id='{}'".format(
'observations', col_sql, parameters, obs_id[0])
try:
# execute sql statement
self.cursor.execute(sql)
except NameError:
pass
# commit changes to database
dbase.commitToDB(self.connection, self.cursor)
# close database connection
dbase.closeDBConnection(self.connection, self.cursor)
@staticmethod
def define_sql_params(cols, value):
'''
Format sql params for the sql command from the cols and values.
:param cols: cols in the db table the sql command needs to operate on
:param value: values of the col variables
:type cols: numpy.ndarray
:type value: numpy.ndarray
:returns: col_sql (formatted cols array as a comma-seperated string),
parameters (string of format (%s,%s,...), equal to the number of
elements in cols),
value (values of the col objects, converted to flattened
regular numpy array)
:rtype: str, str, numpy.ndarray
'''
# define sql params
col_sql = ', '.join(map(str, cols))
parameters = '(' + ','.join(['%s' for i in value]) + ')'
value = [x.text if isinstance(
x, lxml.objectify.StringElement) else x for x in value]
value = nparray(value)
return col_sql, parameters, value
class FRBCat_create:
'''
Class module that creates a VOEvent file from the FRBCat
database.
:param connection: database connection
:param cursor: database cursor object
:param frbs_id: id in frbs table of FRB to be extracted
:type connection: psycopg2.extensions.connection
:type cursor: psycopg2.extras.DictCursor
:type frbs_id: int
'''
def __init__(self, connection, cursor, frbs_id):
self.connection = connection
self.cursor = cursor
self.frbs_id = frbs_id
def create_VOEvent_from_FRBCat(self):
'''
Create a VOEvent from the FRBCat database. Method gets
all information from the database, sets and output name
for the VOEvent and calls the create_xml method.
'''
sql = """select *, radio_measured_params_notes.note as rmp_note,
radio_observations_params_notes.note as rop_note,
observations_notes.note as obs_note
FROM frbs
INNER JOIN authors
ON frbs.author_id=authors.id
INNER JOIN observations
ON observations.frb_id=frbs.id
INNER JOIN radio_observations_params
ON radio_observations_params.obs_id=observations.id
INNER JOIN radio_measured_params
ON radio_measured_params.rop_id=radio_observations_params.id
LEFT JOIN radio_measured_params_notes
ON radio_measured_params_notes.rmp_id=radio_measured_params.id
LEFT JOIN radio_observations_params_notes
ON radio_observations_params_notes.rop_id=
radio_observations_params.id
LEFT JOIN observations_notes
ON observations_notes.obs_id=observations.id
WHERE frbs.id in ({})""".format(self.frbs_id)
self.cursor.execute(sql)
while True:
# extract next event from cursor
self.event = self.cursor.fetchone()
if not self.event:
# no more events to process
break
# set the xml name
try:
# more than 1 event for this frb
counter += 1
xmlname = self.event['name'] + '_' + str(counter) | |
<filename>tests/unit_tests/test_set_dropin.py
import pytest
from unittestmock import UnitTestMock
from cykhash import Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet
import cykhash
SUFFIX={Int64Set : "int64",
Int32Set : "int32",
Float64Set : "float64",
Float32Set : "float32",
PyObjectSet : "pyobject"}
def pick_fun(name, set_type):
return getattr(cykhash, name+"_"+SUFFIX[set_type])
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestSetDropIn(UnitTestMock):
def test_init_from_iter(self, set_type):
s=set_type([1,2,3,1])
self.assertEqual(len(s), 3)
self.assertTrue(1 in s)
self.assertTrue(2 in s)
self.assertTrue(3 in s)
def test_clear(self, set_type):
s=set_type([1,2,3,1])
s.clear()
self.assertEqual(len(s), 0)
s.add(5)
s.update([3,4,5,6])
self.assertEqual(s, set_type([3,4,5,6]))
s.clear()
self.assertEqual(len(s), 0)
def test_str(self, set_type):
s=set_type([1,2,3,1])
ss = str(s)
self.assertTrue("1" in ss)
self.assertTrue("2" in ss)
self.assertTrue("3" in ss)
self.assertTrue(ss.startswith("{"))
self.assertTrue(ss.endswith("}"))
def test_remove_yes(self, set_type):
s=set_type([1,2])
s.remove(1)
self.assertEqual(s,set_type([2]))
s.remove(2)
self.assertEqual(s,set_type([]))
def test_remove_no(self, set_type):
s=set_type([1,2])
with pytest.raises(KeyError) as context:
s.remove(3)
self.assertEqual(3, context.value.args[0])
def test_pop_one(self, set_type):
s=set_type([1])
el=s.pop()
self.assertEqual(s,set_type([]))
self.assertEqual(el,1)
def test_pop_all(self, set_type):
s=set_type([1,2,3])
new_s={s.pop(), s.pop(), s.pop()}
self.assertEqual(s,set_type([]))
self.assertEqual(new_s,{1,2,3})
def test_pop_empty(self, set_type):
s=set_type([])
with pytest.raises(KeyError) as context:
s.pop()
self.assertEqual("pop from empty set", context.value.args[0])
def test_pyobject_same_object_pop():
a=float("3333.2")
s=PyObjectSet([a])
b=s.pop()
assert a is b
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestIsDisjoint(UnitTestMock):
def test_aredisjoint_with_none(self, set_type):
s=set_type([1,2,3,1])
fun=pick_fun("aredisjoint", set_type)
with pytest.raises(TypeError) as context:
fun(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
fun(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
fun(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_aredisjoint_with_empty(self, set_type):
empty1=set_type()
empty2=set_type()
non_empty=set_type(range(3))
aredisjoint=pick_fun("aredisjoint", set_type)
self.assertEqual(aredisjoint(empty1, non_empty), True)
self.assertEqual(aredisjoint(non_empty, empty2), True)
self.assertEqual(aredisjoint(empty1, empty2), True)
def test_aredisjoint_yes(self, set_type):
a=set_type([1,2,3,1])
b=set_type([4,55])
fun=pick_fun("aredisjoint", set_type)
self.assertEqual(fun(a,b), True)
self.assertEqual(fun(b,a), True)
def test_aredisjoint_no(self, set_type):
a=set_type([1,2,3,333,1])
b=set_type([4,55,4,5,6,7,333])
fun=pick_fun("aredisjoint", set_type)
self.assertEqual(fun(a,b), False)
self.assertEqual(fun(b,a), False)
def test_isdisjoint_yes_set(self, set_type):
a=set_type([1,2,3,1])
b=set_type([4,55])
self.assertEqual(a.isdisjoint(b), True)
self.assertEqual(b.isdisjoint(a), True)
def test_isdisjoint_no_set(self, set_type):
a=set_type([1,2,3,333,1])
b=set_type([4,55,4,5,6,7,333])
self.assertEqual(a.isdisjoint(b), False)
self.assertEqual(b.isdisjoint(a), False)
def test_isdisjoint_yes_iter(self, set_type):
a=set_type([1,2,3,1])
b=[4,55]
self.assertEqual(a.isdisjoint(b), True)
def test_isdisjoint_no_iter(self, set_type):
a=set_type([1,2,3,333,1])
b=[4,55,4,5,6,7,333]
self.assertEqual(a.isdisjoint(b), False)
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestIsSubsetIsSuperset(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
fun=pick_fun("issubset", set_type)
with pytest.raises(TypeError) as context:
fun(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
fun(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
fun(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_with_empty(self, set_type):
a=set_type([1,2,3,1])
b=set_type([])
fun=pick_fun("issubset", set_type)
self.assertEqual(fun(a,a), True)
self.assertEqual(fun(a,b), True)
self.assertEqual(fun(b,a), False)
self.assertEqual(fun(b,b), True)
def test_yes(self, set_type):
a=set_type([1,2,3,1])
b=set_type([1,3])
fun=pick_fun("issubset", set_type)
self.assertEqual(fun(a,b), True)
self.assertEqual(fun(b,a), False)
def test_no(self, set_type):
a=set_type([1,2,3,1])
b=set_type([4])
fun=pick_fun("issubset", set_type)
self.assertEqual(fun(a,b), False)
self.assertEqual(fun(b,a), False)
def test_issuperset_yes(self, set_type):
a=set_type([1,2,3,1])
b=set_type([1,3])
self.assertEqual(a.issuperset(b), True)
self.assertEqual(b.issuperset(a), False)
def test_issuperset_no(self, set_type):
a=set_type([1,2,3,1])
b=set_type([4])
self.assertEqual(a.issuperset(b), False)
self.assertEqual(b.issuperset(a), False)
def test_issuperset_yes_iter(self, set_type):
a=set_type([1,2,3,1])
b=[1,3]
self.assertEqual(a.issuperset(b), True)
def test_issuperset_no_iter(self, set_type):
a=set_type([1,2,3,1])
b=[4]
self.assertEqual(a.issuperset(b), False)
def test_issubset_yes_iter(self, set_type):
a=set_type([1,2])
b=[1,3,2]
self.assertEqual(a.issubset(b), True)
def test_issubset_no_iter(self, set_type):
a=set_type([1,2])
b=[1,1,3]
self.assertEqual(a.issubset(b), False)
def test_issubset_yes(self, set_type):
a=set_type([1,2])
b=set_type([1,3,2])
self.assertEqual(a.issubset(b), True)
self.assertEqual(b.issubset(a), False)
def test_issubset_no(self, set_type):
a=set_type([1,2])
b=set_type([1,1,3])
self.assertEqual(a.issubset(b), False)
self.assertEqual(b.issubset(a), False)
def test_compare_self(self, set_type):
a=set_type([1,2])
self.assertEqual(a<=a, True)
self.assertEqual(a>=a, True)
self.assertEqual(a<a, False)
self.assertEqual(a>a, False)
def test_compare_no_relation(self, set_type):
a=set_type([1,2])
b=set_type([1,3])
self.assertEqual(a<=b, False)
self.assertEqual(a>=b, False)
self.assertEqual(a<b, False)
self.assertEqual(a>b, False)
def test_compare_real_subset(self, set_type):
a=set_type([1,2,3])
b=set_type([1,3])
self.assertEqual(a<=b, False)
self.assertEqual(a>=b, True)
self.assertEqual(a<b, False)
self.assertEqual(a>b, True)
def test_compare_same(self, set_type):
a=set_type([1,3])
b=set_type([1,3])
self.assertEqual(a<=b, True)
self.assertEqual(a>=b, True)
self.assertEqual(a<b, False)
self.assertEqual(a>b, False)
def test_compare_equal_yes(self, set_type):
a=set_type([2,5,7,8,1,3])
b=set_type([1,3,7,7,7,7,7,2,5,8,8,8,8,8,8])
self.assertEqual(a==b, True)
self.assertEqual(a==b, True)
def test_compare_equal_yes(self, set_type):
a=set_type([2,5,7,8,1,3])
b=set_type([3,7,7,7,7,7,2,5,8,8,8,8,8,8])
self.assertEqual(a==b, False)
self.assertEqual(a==b, False)
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestCopy(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
copy=pick_fun("copy", set_type)
self.assertTrue(copy(None) is None)
def test_with_empty(self, set_type):
a=set_type([])
copy=pick_fun("copy", set_type)
self.assertEqual(len(copy(a)), 0)
def test_small(self, set_type):
a=set_type([1,2,3,1])
copy=pick_fun("copy", set_type)
self.assertEqual(copy(a)==a, True)
def test_large(self, set_type):
a=set_type(range(33,10000,3))
copy=pick_fun("copy", set_type)
self.assertEqual(copy(a)==a, True)
def test_large_method(self, set_type):
a=set_type(range(33,10000,3))
self.assertEqual(a.copy()==a, True)
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestUpdate(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
update=pick_fun("update", set_type)
with pytest.raises(TypeError) as context:
update(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
update(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
update(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_some_common(self, set_type):
a=set_type([1,2,3,4])
b=set_type([2,1,2,5])
c=b.copy()
update=pick_fun("update", set_type)
update(a,b)
self.assertEqual(a, set_type([1,2,3,4,5]))
self.assertEqual(b, c)
def test_with_itself(self, set_type):
a=set_type([1,2,3,1])
b=a.copy()
update=pick_fun("update", set_type)
update(a,a)
self.assertEqual(a, b)
def test_with_disjunct(self, set_type):
a=set_type(range(50))
b=set_type(range(50,100))
update=pick_fun("update", set_type)
update(a,b)
self.assertEqual(a, set_type(range(100)))
def test_method_with_set(self, set_type):
a=set_type(range(50))
b=set_type(range(100))
a.update(b)
self.assertEqual(a, set_type(range(100)))
def test_method_with_set(self, set_type):
a=set_type(range(50))
b=set_type(range(100))
a.update(b)
self.assertEqual(a, set_type(range(100)))
def test_method_with_iterator(self, set_type):
a=set_type(range(50))
a.update(range(60))
self.assertEqual(a, set_type(range(60)))
def test_ior(self, set_type):
a=set_type(range(50))
a|=set_type(range(60))
self.assertEqual(a, set_type(range(60)))
def test_union(self, set_type):
a=set_type(range(30))
a_copy = a.copy()
b=a.union(range(30,40), set_type(range(40,50)), range(50,60))
self.assertEqual(b, set_type(range(60)))
self.assertEqual(a, a_copy)
def test_union_empty(self, set_type):
a=set_type(range(30))
a.union()
self.assertEqual(a, set_type(range(30)))
def test_or(self, set_type):
a=set_type(range(30))
b=set_type(range(30,40))
c=set_type(range(40,50))
d=a|b|c
self.assertEqual(d, set_type(range(50)))
self.assertEqual(a, set_type(range(30)))
self.assertEqual(b, set_type(range(30,40)))
self.assertEqual(c, set_type(range(40,50)))
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestSwap(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
swap=pick_fun("swap", set_type)
with pytest.raises(TypeError) as context:
swap(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
swap(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
swap(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_some_common(self, set_type):
a=set_type([1,2,3,4])
b=set_type([5,2,4])
a_copy=a.copy()
b_copy=b.copy()
swap=pick_fun("swap", set_type)
swap(a,b)
self.assertEqual(a, b_copy)
self.assertEqual(b, a_copy)
swap(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestIntersect(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
intersect=pick_fun("intersect", set_type)
with pytest.raises(TypeError) as context:
intersect(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
intersect(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
intersect(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_small(self, set_type):
a=set_type([1,2,3,4])
b=set_type([5,2,4])
a_copy=a.copy()
b_copy=b.copy()
intersect=pick_fun("intersect", set_type)
c=intersect(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([2,4]))
c=intersect(b,a)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([2,4]))
def test_disjunct(self, set_type):
a=set_type([1,3,5,7,9])
b=set_type([2,2,4,6,8,10])
a_copy=a.copy()
b_copy=b.copy()
intersect=pick_fun("intersect", set_type)
c=intersect(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type())
c=intersect(b,a)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([]))
def test_empty(self, set_type):
a=set_type([])
b=set_type([])
c=set_type([2,2,4,6,8,10])
intersect=pick_fun("intersect", set_type)
d=intersect(a,b)
self.assertEqual(len(d), 0)
d=intersect(c,b)
self.assertEqual(len(d), 0)
d=intersect(a,c)
self.assertEqual(len(d), 0)
def test_intersection_update(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
b=set_type([2,4,6,8,10,12])
b_copy = b.copy()
a.intersection_update(b)
self.assertEqual(a, set_type([2,4,6,8]))
self.assertEqual(b, b_copy)
def test_intersection_update_iter(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
a.intersection_update([2,4,6,8,10,12])
self.assertEqual(a, set_type([2,4,6,8]))
def test_empty_update(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
b=set_type([])
a.intersection_update(b)
self.assertEqual(len(a), 0)
def test_empty_update_iter(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
a.intersection_update([])
self.assertEqual(a, set_type())
def test_iadd(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
b=set_type([1,104,3])
a&=b
self.assertEqual(a, set_type([1,3]))
def test_add(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
b=set_type([1,104,3])
a_copy=a.copy()
b_copy=b.copy()
c=a&b
self.assertEqual(c, set_type([1,3]))
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
def test_intersection(self, set_type):
a=set_type([1,2,3,4,5,6,7,8])
a_copy=a.copy()
c=a.intersection([1,2,3,4,5,6], set_type([1,2,3,4,5]), [1,2,3])
self.assertEqual(c, set_type([1,2,3]))
self.assertEqual(a, a_copy)
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestDifference(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
difference=pick_fun("difference", set_type)
with pytest.raises(TypeError) as context:
difference(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
difference(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
difference(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_small(self, set_type):
a=set_type([1,2,3,4])
b=set_type([5,2,4])
a_copy=a.copy()
b_copy=b.copy()
difference=pick_fun("difference", set_type)
c=difference(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([1,3]))
c=difference(b,a)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([5]))
def test_disjunct(self, set_type):
a=set_type([1,3,5,7,9])
b=set_type([2,2,4,6,8,10])
a_copy=a.copy()
b_copy=b.copy()
difference=pick_fun("difference", set_type)
c=difference(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, a)
c=difference(b,a)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, b)
def test_empty(self, set_type):
a=set_type([])
b=set_type([])
c=set_type([2,2,4,6,8,10])
difference=pick_fun("difference", set_type)
d=difference(a,b)
self.assertEqual(len(d), 0)
d=difference(c,b)
self.assertEqual(c, d)
d=difference(a,c)
self.assertEqual(len(d), 0)
def test_method_update(self, set_type):
a=set_type([1,2,3,4])
b=set_type([5,2,4])
b_copy=b.copy()
a.difference_update(b)
self.assertEqual(b, b_copy)
self.assertEqual(a, set_type([1,3]))
def test_method_update2(self, set_type):
a=set_type([1,2,3,4])
b=set_type([5,2,4])
a_copy=a.copy()
b.difference_update(a)
self.assertEqual(a, a_copy)
self.assertEqual(b, set_type([5]))
def test_method_update_from_iter(self, set_type):
a=set_type([1,2,3,4])
a.difference_update([5,2,4])
self.assertEqual(a, set_type([1,3]))
def test_method_update_from_iter2(self, set_type):
a=set_type(range(1000))
a.difference_update(range(0,1000,2))
self.assertEqual(a, set_type(range(1,1000,2)))
def test_method_update_from_iter3(self, set_type):
a=set_type([1,2])
a.difference_update([1]*10000)
self.assertEqual(a, set_type([2]))
def test_sub(self, set_type):
a=set_type([0,222,3,444,5])
b=set_type([222,3,4])
a_copy=a.copy()
b_copy=b.copy()
c=a-b
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([0,444,5]))
c=b-a
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([4]))
def test_sub2(self, set_type):
a=set_type([1,2,3,4])
a_copy=a.copy()
b=a-a-a-a
self.assertEqual(a, a_copy)
self.assertEqual(b, set_type())
def test_isub(self, set_type):
a=set_type([0,222,3,444,5])
b=set_type([222,3,4])
b_copy=b.copy()
a-=b
self.assertEqual(b, b_copy)
self.assertEqual(a, set_type([0,444,5]))
def test_isub2(self, set_type):
a=set_type([1,2,3,4])
a-=a
self.assertEqual(a, set_type())
def test_difference_method(self, set_type):
a=set_type(range(10000))
a_copy=a.copy()
b=a.difference(range(5000), set_type(range(5000,10000,2)), range(1,9999,2))
self.assertEqual(b, set_type([9999]))
self.assertEqual(a, a_copy)
@pytest.mark.parametrize(
"set_type",
[Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet]
)
class TestSymmetricDifference(UnitTestMock):
def test_with_none(self, set_type):
s=set_type([1,2,3,1])
symmetric_difference=pick_fun("symmetric_difference", set_type)
with pytest.raises(TypeError) as context:
symmetric_difference(None,s)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
symmetric_difference(s,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
with pytest.raises(TypeError) as context:
symmetric_difference(None,None)
self.assertTrue("'NoneType' object is not iterable" in context.value.args[0])
def test_small(self, set_type):
a=set_type([1,2,3,4])
b=set_type([5,2,4])
a_copy=a.copy()
b_copy=b.copy()
symmetric_difference=pick_fun("symmetric_difference", set_type)
c=symmetric_difference(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([1,3,5]))
c=symmetric_difference(b,a)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, set_type([1,3,5]))
def test_disjunct(self, set_type):
a=set_type([1,3,5,7,9])
b=set_type([2,2,4,6,8,10])
a_copy=a.copy()
b_copy=b.copy()
symmetric_difference=pick_fun("symmetric_difference", set_type)
c=symmetric_difference(a,b)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, a|b)
c=symmetric_difference(b,a)
self.assertEqual(a, a_copy)
self.assertEqual(b, b_copy)
self.assertEqual(c, a|b)
def test_empty(self, set_type):
a=set_type([])
b=set_type([])
c=set_type([2,2,4,6,8,10])
symmetric_difference=pick_fun("symmetric_difference", set_type)
d=symmetric_difference(a,b)
self.assertEqual(len(d), 0)
d=symmetric_difference(c,b)
| |
value > a:
return a
else:
return value
def pivotScalar(scalar, pivot):
# reflect scalar about pivot; see tests below
return pivot + (pivot - scalar)
if __debug__ and __name__ == '__main__':
assert pivotScalar(1, 0) == -1
assert pivotScalar(-1, 0) == 1
assert pivotScalar(3, 5) == 7
assert pivotScalar(10, 1) == -8
def weightedChoice(choiceList, rng=random.random, sum=None):
"""given a list of (weight, item) pairs, chooses an item based on the
weights. rng must return 0..1. if you happen to have the sum of the
weights, pass it in 'sum'."""
# TODO: add support for dicts
if sum is None:
sum = 0.
for weight, item in choiceList:
sum += weight
rand = rng()
accum = rand * sum
for weight, item in choiceList:
accum -= weight
if accum <= 0.:
return item
# rand is ~1., and floating-point error prevented accum from hitting 0.
# Or you passed in a 'sum' that was was too large.
# Return the last item.
return item
def randFloat(a, b=0., rng=random.random):
"""returns a random float in [a, b]
call with single argument to generate random float between arg and zero
"""
return lerp(a, b, rng())
def normalDistrib(a, b, gauss=random.gauss):
"""
NOTE: assumes a < b
Returns random number between a and b, using gaussian distribution, with
mean=avg(a, b), and a standard deviation that fits ~99.7% of the curve
between a and b.
For ease of use, outlying results are re-computed until result is in [a, b]
This should fit the remaining .3% of the curve that lies outside [a, b]
uniformly onto the curve inside [a, b]
------------------------------------------------------------------------
http://www-stat.stanford.edu/~naras/jsm/NormalDensity/NormalDensity.html
The 68-95-99.7% Rule
====================
All normal density curves satisfy the following property which is often
referred to as the Empirical Rule:
68% of the observations fall within 1 standard deviation of the mean.
95% of the observations fall within 2 standard deviations of the mean.
99.7% of the observations fall within 3 standard deviations of the mean.
Thus, for a normal distribution, almost all values lie within 3 standard
deviations of the mean.
------------------------------------------------------------------------
In calculating our standard deviation, we divide (b-a) by 6, since the
99.7% figure includes 3 standard deviations _on_either_side_ of the mean.
"""
while True:
r = gauss((a+b)*.5, (b-a)/6.)
if (r >= a) and (r <= b):
return r
def weightedRand(valDict, rng=random.random):
"""
pass in a dictionary with a selection -> weight mapping. Eg.
{"Choice 1": 10,
"Choice 2": 30,
"bear": 100}
-Weights need not add up to any particular value.
-The actual selection will be returned.
"""
selections = valDict.keys()
weights = valDict.values()
totalWeight = 0
for weight in weights:
totalWeight += weight
# get a random value between 0 and the total of the weights
randomWeight = rng() * totalWeight
# find the index that corresponds with this weight
for i in xrange(len(weights)):
totalWeight -= weights[i]
if totalWeight <= randomWeight:
return selections[i]
assert True, "Should never get here"
return selections[-1]
def randUint31(rng=random.random):
"""returns a random integer in [0..2^31).
rng must return float in [0..1]"""
return int(rng() * 0x7FFFFFFF)
def randInt32(rng=random.random):
"""returns a random integer in [-2147483648..2147483647].
rng must return float in [0..1]
"""
i = int(rng() * 0x7FFFFFFF)
if rng() < .5:
i *= -1
return i
def randUint32(rng=random.random):
"""returns a random integer in [0..2^32).
rng must return float in [0..1]"""
return long(rng() * 0xFFFFFFFFL)
class SerialNumGen:
"""generates serial numbers"""
def __init__(self, start=None):
if start is None:
start = 0
self.__counter = start-1
def next(self):
self.__counter += 1
return self.__counter
class SerialMaskedGen(SerialNumGen):
def __init__(self, mask, start=None):
self._mask = mask
SerialNumGen.__init__(self, start)
def next(self):
v = SerialNumGen.next(self)
return v & self._mask
_serialGen = SerialNumGen()
def serialNum():
global _serialGen
return _serialGen.next()
def uniqueName(name):
global _serialGen
return '%s-%s' % (name, _serialGen.next())
class EnumIter:
def __init__(self, enum):
self._values = enum._stringTable.keys()
self._index = 0
def __iter__(self):
return self
def next(self):
if self._index >= len(self._values):
raise StopIteration
self._index += 1
return self._values[self._index-1]
class Enum:
"""Pass in list of strings or string of comma-separated strings.
Items are accessible as instance.item, and are assigned unique,
increasing integer values. Pass in integer for 'start' to override
starting value.
Example:
>>> colors = Enum('red, green, blue')
>>> colors.red
0
>>> colors.green
1
>>> colors.blue
2
>>> colors.getString(colors.red)
'red'
"""
if __debug__:
# chars that cannot appear within an item string.
InvalidChars = string.whitespace
def _checkValidIdentifier(item):
invalidChars = string.whitespace+string.punctuation
invalidChars = invalidChars.replace('_','')
invalidFirstChars = invalidChars+string.digits
if item[0] in invalidFirstChars:
raise SyntaxError, ("Enum '%s' contains invalid first char" %
item)
if not disjoint(item, invalidChars):
for char in item:
if char in invalidChars:
raise SyntaxError, (
"Enum\n'%s'\ncontains illegal char '%s'" %
(item, char))
return 1
_checkValidIdentifier = staticmethod(_checkValidIdentifier)
def __init__(self, items, start=0):
if type(items) == types.StringType:
items = items.split(',')
self._stringTable = {}
# make sure we don't overwrite an existing element of the class
assert self._checkExistingMembers(items)
assert uniqueElements(items)
i = start
for item in items:
# remove leading/trailing whitespace
item = string.strip(item)
# is there anything left?
if len(item) == 0:
continue
# make sure there are no invalid characters
assert Enum._checkValidIdentifier(item)
self.__dict__[item] = i
self._stringTable[i] = item
i += 1
def __iter__(self):
return EnumIter(self)
def hasString(self, string):
return string in set(self._stringTable.values())
def fromString(self, string):
if self.hasString(string):
return self.__dict__[string]
# throw an error
{}[string]
def getString(self, value):
return self._stringTable[value]
def __contains__(self, value):
return value in self._stringTable
def __len__(self):
return len(self._stringTable)
def copyTo(self, obj):
# copies all members onto obj
for name, value in self._stringTable:
setattr(obj, name, value)
if __debug__:
def _checkExistingMembers(self, items):
for item in items:
if hasattr(self, item):
return 0
return 1
############################################################
# class: Singleton
# Purpose: This provides a base metaclass for all classes
# that require one and only one instance.
#
# Example: class mySingleton:
# __metaclass__ = PythonUtil.Singleton
# def __init__(self, ...):
# ...
#
# Note: This class is based on Python's New-Style Class
# design. An error will occur if a defined class
# attemps to inherit from a Classic-Style Class only,
# ie: class myClassX:
# def __init__(self, ...):
# ...
#
# class myNewClassX(myClassX):
# __metaclass__ = PythonUtil.Singleton
# def __init__(self, ...):
# myClassX.__init__(self, ...)
# ...
#
# This causes problems because myNewClassX is a
# New-Style class that inherits from only a
# Classic-Style base class. There are two ways
# simple ways to resolve this issue.
#
# First, if possible, make myClassX a
# New-Style class by inheriting from object
# object. IE: class myClassX(object):
#
# If for some reason that is not an option, make
# myNewClassX inherit from object and myClassX.
# IE: class myNewClassX(object, myClassX):
############################################################
class Singleton(type):
def __init__(cls, name, bases, dic):
super(Singleton, cls).__init__(name, bases, dic)
cls.instance=None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance=super(Singleton, cls).__call__(*args, **kw)
return cls.instance
class SingletonError(ValueError):
""" Used to indicate an inappropriate value for a Singleton."""
def printListEnumGen(l):
# log each individual item with a number in front of it
digits = 0
n = len(l)
while n > 0:
digits += 1
n //= 10
format = '%0' + '%s' % digits + 'i:%s'
for i in xrange(len(l)):
print format % (i, l[i])
yield None
def printListEnum(l):
for result in printListEnumGen(l):
pass
# base class for all Panda C++ objects
# libdtoolconfig doesn't seem to have this, grab it off of PandaNode
dtoolSuperBase = None
def _getDtoolSuperBase():
global dtoolSuperBase
from panda3d.core import PandaNode
dtoolSuperBase = PandaNode('').__class__.__bases__[0].__bases__[0].__bases__[0]
assert repr(dtoolSuperBase) == "<type 'libdtoolconfig.DTOOL_SUPER_BASE111'>" \
or repr(dtoolSuperBase) == "<type 'libdtoolconfig.DTOOL_SUPPER_BASE111'>" \
or repr(dtoolSuperBase) == "<type 'dtoolconfig.DTOOL_SUPER_BASE111'>"
safeReprNotify = None
def _getSafeReprNotify():
global safeReprNotify
from direct.directnotify.DirectNotifyGlobal import directNotify
safeReprNotify = directNotify.newCategory("safeRepr")
return safeReprNotify
def safeRepr(obj):
global dtoolSuperBase
if dtoolSuperBase is None:
_getDtoolSuperBase()
global safeReprNotify
if safeReprNotify is None:
_getSafeReprNotify()
if isinstance(obj, dtoolSuperBase):
# repr of C++ object could crash, particularly if the object has been deleted
# log that we're calling repr
safeReprNotify.info('calling repr on instance of %s.%s' % (obj.__class__.__module__, obj.__class__.__name__))
sys.stdout.flush()
try:
return repr(obj)
except:
return '<** FAILED REPR OF %s instance at %s **>' % (obj.__class__.__name__, hex(id(obj)))
def safeReprTypeOnFail(obj):
global dtoolSuperBase
if dtoolSuperBase is None:
_getDtoolSuperBase()
global safeReprNotify
if safeReprNotify is None:
_getSafeReprNotify()
if isinstance(obj, dtoolSuperBase):
return type(obj)
try:
return repr(obj)
except:
return '<** FAILED REPR OF %s instance at %s **>' % (obj.__class__.__name__, hex(id(obj)))
def fastRepr(obj, maxLen=200, strFactor=10, _visitedIds=None):
""" caps the length of | |
Shape [N, H, W, C]
alpha (float, optional): Regularization weight. Defaults to 0.012.
ratio (float, optional): Downsample ratio. Defaults to 0.8.
min_width (int, optional): Minimal witdth of the coarsest level. Defaults to 20.
n_outer_fp_iterations (int, optional): Number of outer fixed point iterations. Defaults to 7.
n_inner_fp_iterations (int, optional): Number of inner fixed point iterations. Defaults to 1.
n_sor_iterations (int, optional): Number of SOR iterations. Defaults to 30.
Returns:
tf.Tensor: The optical flow with shape [N, H, W, 2]
References:
[1] https://github.com/pathak22/pyflow
[2] https://people.csail.mit.edu/celiu/OpticalFlow/
"""
import pyflow
num_channels = tf.shape(img_a)[-1]
if num_channels == 3:
# RGB
color_type = 0
elif num_channels == 1:
# Grayscale
color_type = 1
else:
raise ValueError("Number of channels must be 1 or 3.")
pyflow_fn = functools.partial(
pyflow.coarse2fine_flow,
alpha=alpha,
ratio=ratio,
minWidth=min_width,
nOuterFPIterations=n_outer_fp_iterations,
nInnerFPIterations=n_inner_fp_iterations,
nSORIterations=n_sor_iterations,
colType=color_type,
)
def flow_on_stacked(x: TensorLike):
u, v, _ = tf.numpy_function(pyflow_fn, [x[..., 0], x[..., 1]], tf.float64)
return tf.stack([v, u], axis=-1)
# Make sure the input type is float64
a = tf.cast(img_a, dtype=tf.float64)
b = tf.cast(img_b, dtype=tf.float64)
# Map the function on the complete batch
stacked = tf.stack([a, b], axis=-1)
return tf.map_fn(flow_on_stacked, stacked)
def backward_flow(
forward_flow: TensorLike,
img1: TensorLike = None,
img2: TensorLike = None,
) -> tf.Tensor:
"""Compute the backward flow from the forward flow.
If no images are given the negative flow is warped using the flow itself.
If images are given pyinverseflow is used to compute the inverse flow using
the strategy "max_image" and fill method "oriented".
Args:
forward_flow (TensorLike): The forward flow. Shape [N, H, W, 2]
img1 (TensorLike, optional): The first image. Shape [N, H, W, 3]
img2 (TensorLike, optional): The second image. Shape [N, H, W, 3]
Returns:
tf.Tensor: The backward flow. Shape [N, H, W, 2]
"""
# No images given. The best we can do is warping the negative flow
if img1 is None or img2 is None:
return tfa.image.dense_image_warp(-forward_flow, forward_flow)
# Images given. We can use pyinverseflow with strategy "max_image"
from pyinverseflow import inverse_flow
def inv_flow_fn(f, i1, i2):
return inverse_flow(f, i1, i2, strategy="max_image", fill="oriented")
def inverse_flow_on_concat(x: TensorLike):
bf, _ = tf.numpy_function(
inv_flow_fn, [x[..., :2], x[..., 2:5], x[..., 5:8]], tf.float32
)
return bf
concatenated = tf.concat([forward_flow, img1, img2], axis=-1)
return tf.map_fn(inverse_flow_on_concat, concatenated)
# ==================================================================================================
# LIGHT FIELD
# ==================================================================================================
def lf_flows(
disparity: TensorLike,
grid_height: int,
grid_width: int,
center_i: int = None,
center_j: int = None,
):
"""Get the forward flows from the given light field disparity map.
The forward flow is the flow from the center image to the individual images.
Args:
disparity (TensorLike): The disparity map of shape [H, W].
grid_height (int): The number of lightfield frames in y direction.
grid_width (int): The number of lightfield frames in x direction.
center_i (int, optional): The index of the center frame. Defaults to (grid_height-1)/2.
center_j (int, optional): The index of the center frame. Defaults to (grid_width-1)/2.
Returns:
Tensor: The forward flows for all lightfield frames of shape
[grid_height, grid_width, H, W, 2]
"""
def center(c, s):
if c is None:
return (tf.cast(s, dtype=tf.float32) - 1.0) / 2.0
return c
center_i = center(center_i, grid_height)
center_j = center(center_j, grid_width)
coords_i = tf.range(grid_height, dtype=tf.float32) - center_i
coords_j = tf.range(grid_width, dtype=tf.float32) - center_j
grid = tf.stack(tf.meshgrid(coords_i, coords_j, indexing="ij"), axis=-1)
return -disparity[None, None, ..., None] * grid[:, :, None, None, :]
def lf_to_video(lf: TensorLike) -> tf.Tensor:
"""Flatten a light field such that in the video a frame always follows a neighboring light
field frame.
lf = [
[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]
[16 17 18 19]
]
lf_to_video(lf):
[ 0, 1, 2, 3, 7, 6, 5, 4, 8, 9, 10, 11, 15, 14, 13, 12, 16, 17, 18, 19]
Args:
lf (TensorLike): The light field of shape [GH, GW, H, W, C]
Returns:
tf.Tensor: The flattened light field of shape [GH * GW, H, W, C]
"""
gh = tf.shape(lf)[0]
inner_shape = tf.shape(lf)[2:]
to_right = lf[::2, :] # Rows that are not flipped
to_left = lf[1::2, ::-1] # Rows that are flipped
# Stich back together
to_right_indices = tf.range(gh)[::2]
to_left_indices = tf.range(gh)[1::2]
stitched = tf.dynamic_stitch(
[to_right_indices, to_left_indices], [to_right, to_left]
)
# Flatten the first axis
return tf.reshape(stitched, [-1, *inner_shape])
def resize_lf(
lf: TensorLike, factor: int, upscale: bool, resize_fn: ResizeFnType = None
):
"""Resize a light field.
Args:
lf (TensorLike): The light field with shape [GH, GW, H, W, C]
factor (int): The factor by which it should be scaled
upscale (bool): If the light field should be upscaled or downscaled
resize_fn (ResizeFnType, optional): The resize fn to use for resizing interpolating the
values. Defaults to 'bicubic' with antialiasing.
Returns:
tf.Tensor: The resized light field
"""
if resize_fn is None:
resize_fn = create_resize_fn()
lf_grid = tf.shape(lf)[:2]
orig_size = tf.shape(lf)[2:]
lf_batch = tf.reshape(lf, tf.concat([[-1], orig_size], axis=0))
lf_resized_batch = resize_fn(lf_batch, factor, upscale)
resized_size = tf.shape(lf_resized_batch)[1:]
return tf.reshape(lf_resized_batch, tf.concat([lf_grid, resized_size], axis=0))
LFATTNET_MODEL = None
def lf_disparity_lfattnet(lf: TensorLike) -> tf.Tensor:
"""Estimate the disparity of the light field by using LFattNet [1].
Currently, this function only supports light fields of a 9x9 grid.
Args:
lf (TensorLike): The light field of shape [9, 9, H, W, 3]
Returns:
tf.Tensor: The disparity estimation of shape [H, W]
References:
[1] <NAME>, <NAME>, <NAME>, and <NAME>, “Attention-Based View
Selection Networks for Light-Field Disparity Estimation,” AAAI, vol. 34, no. 07,
pp. 12095–12103, Apr. 2020, doi: 10.1609/aaai.v34i07.6888.
"""
def load_model():
model_file = tf.keras.utils.get_file(
f"LFattNet_9x9.h5",
"https://github.com/HedgehogCode/deep-plug-and-play-prior/releases/download"
+ f"/thesis/LFattNet_9x9.h5",
)
return tf.keras.models.load_model(
model_file, custom_objects={"tf": tf, "tfa": tfa}, compile=False
)
# Load the model if it is not yet loaded
global LFATTNET_MODEL
if LFATTNET_MODEL is None:
LFATTNET_MODEL = load_model()
# RGB -> Gray
rgb = [0.299, 0.587, 0.114]
inp = (rgb[0] * lf[..., 0] + rgb[1] * lf[..., 1] + rgb[2] * lf[..., 2])[..., None]
# LF tensor to list of inputs
inp = [f[None, ...] for f in tfdsbw.lf_to_batch(inp)]
# Run the model
return LFATTNET_MODEL(inp)[0][0]
# ==================================================================================================
# INPAINTING
# ==================================================================================================
def inpaint_border(
x: TensorLike, mask: TensorLike, blur_stddev: float = 0, noise_stddev: float = 0
) -> TensorType:
"""Inpaint the image x at the locations where mask is 0 by replicating the nearest pixels."""
def cond(x, x_mask):
return tf.reduce_any(x_mask == 0)
def body(x, x_mask):
def roll_in_all_directions(v):
list_of_rolled = [
tf.roll(v, shift=shift, axis=axis)
for shift, axis in itertools.product([1, -1], [1, 2])
]
return tf.stack(list_of_rolled, axis=-1)
image_neighbors = roll_in_all_directions(x)
mask_neighbors = roll_in_all_directions(x_mask)
image_neighbors_sum = tf.reduce_sum(image_neighbors, axis=-1)
mask_neighbors_sum = tf.reduce_sum(mask_neighbors, axis=-1)
image_neighbors_mean = tf.math.divide_no_nan(
image_neighbors_sum, mask_neighbors_sum
)
return (
x * x_mask + image_neighbors_mean * (1 - x_mask),
tf.clip_by_value(mask_neighbors_sum, clip_value_min=0, clip_value_max=1),
)
inpainted = tf.while_loop(cond, body, loop_vars=[x, mask])[0]
# Apply blur to inpainted region
if blur_stddev > 0:
k = gaussian_kernel(math.floor(blur_stddev * 3), 0, blur_stddev)
blurred = blur(inpainted, conv2D_filter_rgb(k), 0)
inpainted = mask * inpainted + (1 - mask) * blurred # type: ignore
# Apply noise to inpainted region
if noise_stddev > 0:
noise = tf.random.normal(tf.shape(inpainted), stddev=noise_stddev)
noisy = inpainted + noise
inpainted = mask * inpainted + (1 - mask) * noisy # type: ignore
return inpainted
def inpaint_random_normal(x: TensorLike, mask: TensorLike) -> TensorType:
"""Fill the image x at the locations where mask is 0 with random normal noise N(0.5,0.25)."""
noise = tf.clip_by_value(tf.random.normal(tf.shape(x), mean=0.5, stddev=0.25), 0, 1)
return x * mask + noise * (1 - mask) # type: ignore
def inpaint_random_uniform(x: TensorLike, mask: TensorLike) -> TensorType:
"""Fill the image x at the locations where mask is 0 with random uniform noise between 0 and 1."""
noise = tf.random.uniform(tf.shape(x), minval=0, maxval=1)
return x * mask + noise * (1 - mask) # type: ignore
def inpaint_gray(x: TensorLike, mask: TensorLike) -> TensorType:
"""Fill the image x at the locations where mask is 0 with gray values (0.5)."""
gray = tf.ones(tf.shape(mask)) / 2
return x * mask + gray * (1 - mask) # type: ignore
def inpaint_mean(x: TensorLike, mask: TensorLike) -> TensorType:
"""Fill the image x at the locations where mask is 0 with the mean value of the image."""
mean = tf.ones(tf.shape(mask)) * tf.reduce_mean(x)
return x * mask + mean * (1 - mask) # | |
8:2.000000 9:1.000000 10:1.000000
... '''
>>>
>>> import numpy as np
>>> # Each row is an instance and takes the form **<target value> <feature index>:<feature value> ... **.
... # Dataset is 'classification' type and target values (first column) represents class label of each sample, i.e., type='classification' (default)
... # All features assume only integral values, i.e., dtype=np.int
... main.read_data_libsvm(BytesIO(data), dtype=np.int)
>>>
>>> # Print the data samples
... print(main.data)
[[1000025 5 1 1 1 2 1 3 1 1]
[1002945 5 4 4 5 7 10 3 2 1]
[1015425 3 1 1 1 2 2 3 1 1]
[1016277 6 8 8 1 3 4 3 7 1]
[1017023 4 1 1 3 2 1 3 1 1]
[1017122 8 10 10 8 7 10 9 7 1]
[1018099 1 1 1 1 2 10 3 1 1]
[1018561 2 1 2 1 2 1 3 1 1]
[1033078 2 1 1 1 2 1 1 1 5]
[1033078 4 2 1 1 2 1 2 1 1]]
>>>
>>> # Print indices of columns or features. Assumption: Feature indices always uses one-based index
... print(main.columns_)
[ 1 2 3 4 5 6 7 8 9 10]
>>>
>>> # Print target values
... print(main.target)
[2 2 2 2 2 4 2 2 2 2]
>>>
>>> # Print the distinct classes in target values
... print(main.classes_)
[2 4]
"""
dataset = load_svmlight_file(f=file, dtype=dtype, n_features=n_features, query_id=False, **kargs)
data, target = dataset[0].toarray(), dataset[1]
del dataset
self.classes_ = None
if type.casefold()=="classification":
target = target.astype(np.int)
target_labelEncoder = LabelEncoder()
target = target_labelEncoder.fit_transform(target)
self.classes_ = target_labelEncoder.classes_.tolist()
elif type.casefold()=="regression":
pass
elif type.casefold()=="ranking":
logger.error("'ranking' type datasets are not currently supported")
raise NotImplementedError("'ranking' type datasets are not currently supported")
n_features = data.shape[1]
self.columns_ = np.arange(1, n_features+1)
self._nominal_columns = None
self.data, self.target = data, target
self.n_samples, self.n_features = self.data.shape
# TODO: Allow use of subset of attributes
def read_data_arff(self, file, target_attr='class', encode_target='infer', numeric_categorical_attrs=None, drop_na_rows=True):
"""Read data from ARFF format file
Parameters:
file (str or open file): path to ARFF data file or ``open file`` object
target_attr (str, default='class'): attribute name of the target column. ``target_attr=None``implies no target columns.
encode_target (bool, default-'infer'): Encode target values. ``encode_target='infer'`` encodes nominal target and ignores numeric target attributes.
numeric_categorical_attrs (:obj:`list`, default= ``None``): List of 'names' of numeric attributes to be inferred as nominal and to be encoded. Note: All nominal attributes are implicitly encoded.
drop_na_rows (bool, detault=True): Drop data samples with NA/NaN ('?') features
Note:
All nominal type attributes are implicitly encoded.
Examples:
Illustration of **Reading from ARFF data file** ::
>>> from automs import eda
>>> main = eda.EDA()
>>>
>>> from io import StringIO
>>>
>>> # An excerpt from dataset 'Hepatitis' involving features 'Age', 'Sex', 'Steroid', Albumin', 'Protime' and 'Class'.
>>> data = '''
... % Dataset: Hepatitis (Source: Weka)
... @relation hepatitis
...
... @attribute Age integer
... @attribute Sex {male, female}
... @attribute Steroid {no, yes}
... @attribute Albumin real
... @attribute Class {DIE, LIVE}
...
... @data
... 30,male,no,4,LIVE
... 50,female,no,3.5,LIVE
... 78,female,yes,4,LIVE
... 31,female,?,4,LIVE
... 34,female,yes,4,LIVE
... 46,female,yes,3.3,DIE
... 44,female,yes,4.3,LIVE
... 61,female,no,4.1,LIVE
... 53,male,no,4.1,LIVE
... 43,female,yes,3.1,DIE
... '''
>>>
>>> # The target is attribute 'Class', i.e., target_attr='Class'
... # Data samples with any missing ('?') features should be dropped, i.e., drop_na_rows=True (default).
... main.read_data_arff(StringIO(data), target_attr='Class')
info: The dataset may contain attributes with N/A ('?') values
>>>
>>> # Print the processed data samples.
... '''Note: Nominal features ['Sex', 'Steroid'] have been implicitly encoded.
... Samples with any missing value('?') features have been dropped'''
[[ 30. 1. 0. 4. ]
[ 50. 0. 0. 3.5]
[ 78. 0. 1. 4. ]
[ 34. 0. 1. 4. ]
[ 46. 0. 1. 3.3]
[ 44. 0. 1. 4.3]
[ 61. 0. 0. 4.1]
[ 53. 1. 0. 4.1]
[ 43. 0. 1. 3.1]]
>>>
>>> # Print the names of columns in data
... print(main.columns_)
['Age', 'Sex', 'Steroid', 'Albumin']
>>>
>>> # Print the target values. Note: Target attribute 'Class' has been encoded.
... print(main.target)
[1 1 1 1 0 1 1 1 0]
>>>
>>> # Print the distinct (original) classes in target values
... print(main.classes_)
['DIE', 'LIVE']
"""
dataset, metadata = loadarff(f=file)
rows_without_na = np.ones(dataset.shape[0], dtype=np.bool)
for attribute in metadata:
if metadata[attribute][0] == 'nominal':
rows_without_na[np.where(dataset[attribute] == b'?')] = False
if metadata[attribute][0] == 'numeric':
rows_without_na[np.isnan(dataset[attribute])] = False
if not rows_without_na.all():
logger.info("The dataset may contain attributes with N/A ('?') values")
# print("info: The dataset may contain attributes with N/A ('?') values")
if drop_na_rows:
dataset = dataset[rows_without_na]
# if target_attr is None or target_attr in metadata:
# data_records, target = dataset[[attribute for attribute in metadata if attribute!=target_attr]], None if target_attr is None else dataset[target_attr]
self.columns_ = metadata.names().copy()
if target_attr is None or target_attr in metadata:
if target_attr in metadata:
self.columns_.remove(target_attr)
data_records, target = dataset[self.columns_], None if target_attr is None else dataset[target_attr]
del dataset
else:
# print("error: Unknown 'target' attribute name specified")
logger.error("Unknown 'target' attribute name specified")
raise ValueError("unknown 'target' attribute name specified")
# Processing target labels
if target_attr is not None:
self.classes_ = None
# 'classification' type datasets
if metadata[target_attr][0]=='nominal':
if isinstance(encode_target, str) and encode_target.casefold()=='infer':
encode_target = True
# 'regression' type datasets
elif metadata[target_attr][0]=='numeric':
target = target.astype(np.number)
if isinstance(encode_target, str) and encode_target.casefold()=='infer':
encode_target = False
if encode_target:
target_labelEncoder = LabelEncoder()
target = target_labelEncoder.fit_transform(target)
self.classes_ = [target_class.decode() for target_class in target_labelEncoder.classes_.tolist()]
#self.classes_ = target_labelEncoder.classes_.tolist()
# Form a new data array
data = np.empty( ( data_records.size, len(data_records.dtype.names) ), dtype=np.float64)
self._nominal_columns = []
# Column name indexed dictionary of distinct (original) categories in the data columns. Defaults to ``None`` for numeric (non-categorical) valued columns.
self.columns_categories_ = dict.fromkeys(self.columns_)
for index, attribute in enumerate(data_records.dtype.names):
attribute_values = data_records[attribute]
encode_attribute = False
if metadata[attribute][0] == 'numeric':
if numeric_categorical_attrs is not None and attribute in numeric_categorical_attrs:
encode_attribute = True
elif metadata[attribute][0] == 'nominal':
encode_attribute = True
if encode_attribute:
self._nominal_columns.append(attribute)
attr_labelEncoder = LabelEncoder()
attribute_values = attr_labelEncoder.fit_transform(attribute_values)
self.columns_categories_[attribute] = [attr.decode() for attr in attr_labelEncoder.classes_.tolist()]
del attr_labelEncoder
data.T[index] = attribute_values
del data_records
self.data, self.target = data, target
self.n_samples, self.n_features = self.data.shape
def dummy_coding(self, nominal_columns='infer', drop_first=False):
"""Dummy coding (One-Hot Encoding) of nominal categorical columns (features)
Parameters:
nominal_columns (:obj:`list`, int, str, 'all', default='infer'): List (str or int if singleton) of column 'names' (or absolute 'indices', if no column names) of nominal categorical columns to dummy code. ``nominal_columns='infer'`` autodetects nominal categorical columns. ``nominal_columns='all'`` implies all columns are nominal categorical. ``nominal_columns=None`` implies no nominal categorical columns.
drop_first (bool, default=False): Whether to get k-1 dummies out of k categorical levels by removing the first level.
Note:
``nominal_columns`` parameter uses absolute column 'names' (or absolute column 'indices' if no names) as presented in the original data file.
See also:
`What is One Hot Encoding? Why And When do you have to use it? (Source: HackerNoon) <https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f>`_
Examples:
Illustration of **Dummy-Coding** of Nominal Categorical Columns
::
>>> from automs import eda
>>> main = eda.EDA()
>>> from io import StringIO
>>> data = '''
... % Dataset: Automobiles (Source: UCI ML Repository)
... % Attributes : symboling (ordinal) {-3, -2, -1, 0, 1, 2, 3}
... % body-style (nominal) {hardtop, wagon, sedan, hatchback, convertible}
... % engine-size (continous) [61, 326]
... % engine-location (nominal) {front, rear}
... % Target Attribute : symboling
...
... 3,convertible,130,front
... 1,hatchback,152,front
... 2,sedan,109,front
... 3,hardtop,194,rear
... 0,wagon,132,front
... -2,sedan,141,front
... 3,convertible,194,rear
... -1,hatchback,122,front
... 2,hardtop,97,front
... 0,wagon,108,front
... '''
>>> # Ignore lines starting with '%' as comment, i.e., comment='%'.
... # Use column 0 (attribute 'symboling') as target values to predict, i.e., target_col=0.
... # Encode nominal columns 1 and 3 (body-style and engine-location), i.e., categorical_cols=[1,3]
... main.read_data_csv(StringIO(data), comment='%', target_col=0, encode_target=False, categorical_cols=[1,3])
>>> # Print the processed data samples.
... print(main.data)
[[ 0. 130. 0.]
[ 2. 152. 0.]
[ 3. 109. 0.]
[ 1. 194. 1.]
[ 4. 132. 0.]
[ 3. | |
<reponame>kiranrraj/100Days_Of_Coding
# Title : Selection sort Method#1
# Author : <NAME>.
# Date : 05:11:2020
arr=[50,5,7,1,34,22,12,3,45,2,16,8,48]
def selection_sort(arr):
length = len(arr)
# loop = 0
for i in range(length):
print(f"Main Loop:{i+1}")
for j in range(i+1,length):
# loop+=1
print(f"Sub loop: {j}", end= " ")
if arr[i] > arr[j]:
arr[i], arr[j] = arr[j], arr[i]
print(arr)
print(f"Array after main loop {i+1} : {arr}\n")
# print(f"total loop = {loop}")
return arr
print(selection_sort(arr))
#---------------Output---------------
# Main Loop:1
# Sub loop: 1 [5, 50, 7, 1, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 2 [5, 50, 7, 1, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 3 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 4 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 5 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 6 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 7 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 8 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 9 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 10 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 11 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 12 [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Array after main loop 1 : [1, 50, 7, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Main Loop:2
# Sub loop: 2 [1, 7, 50, 5, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 3 [1, 5, 50, 7, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 4 [1, 5, 50, 7, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 5 [1, 5, 50, 7, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 6 [1, 5, 50, 7, 34, 22, 12, 3, 45, 2, 16, 8, 48]
# Sub loop: 7 [1, 3, 50, 7, 34, 22, 12, 5, 45, 2, 16, 8, 48]
# Sub loop: 8 [1, 3, 50, 7, 34, 22, 12, 5, 45, 2, 16, 8, 48]
# Sub loop: 9 [1, 2, 50, 7, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 10 [1, 2, 50, 7, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 11 [1, 2, 50, 7, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 12 [1, 2, 50, 7, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Array after main loop 2 : [1, 2, 50, 7, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Main Loop:3
# Sub loop: 3 [1, 2, 7, 50, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 4 [1, 2, 7, 50, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 5 [1, 2, 7, 50, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 6 [1, 2, 7, 50, 34, 22, 12, 5, 45, 3, 16, 8, 48]
# Sub loop: 7 [1, 2, 5, 50, 34, 22, 12, 7, 45, 3, 16, 8, 48]
# Sub loop: 8 [1, 2, 5, 50, 34, 22, 12, 7, 45, 3, 16, 8, 48]
# Sub loop: 9 [1, 2, 3, 50, 34, 22, 12, 7, 45, 5, 16, 8, 48]
# Sub loop: 10 [1, 2, 3, 50, 34, 22, 12, 7, 45, 5, 16, 8, 48]
# Sub loop: 11 [1, 2, 3, 50, 34, 22, 12, 7, 45, 5, 16, 8, 48]
# Sub loop: 12 [1, 2, 3, 50, 34, 22, 12, 7, 45, 5, 16, 8, 48]
# Array after main loop 3 : [1, 2, 3, 50, 34, 22, 12, 7, 45, 5, 16, 8, 48]
# Main Loop:4
# Sub loop: 4 [1, 2, 3, 34, 50, 22, 12, 7, 45, 5, 16, 8, 48]
# Sub loop: 5 [1, 2, 3, 22, 50, 34, 12, 7, 45, 5, 16, 8, 48]
# Sub loop: 6 [1, 2, 3, 12, 50, 34, 22, 7, 45, 5, 16, 8, 48]
# Sub loop: 7 [1, 2, 3, 7, 50, 34, 22, 12, 45, 5, 16, 8, 48]
# Sub loop: 8 [1, 2, 3, 7, 50, 34, 22, 12, 45, 5, 16, 8, 48]
# Sub loop: 9 [1, 2, 3, 5, 50, 34, 22, 12, 45, 7, 16, 8, 48]
# Sub loop: 10 [1, 2, 3, 5, 50, 34, 22, 12, 45, 7, 16, 8, 48]
# Sub loop: 11 [1, 2, 3, 5, 50, 34, 22, 12, 45, 7, 16, 8, 48]
# Sub loop: 12 [1, 2, 3, 5, 50, 34, 22, 12, 45, 7, 16, 8, 48]
# Array after main loop 4 : [1, 2, 3, 5, 50, 34, 22, 12, 45, 7, 16, 8, 48]
# Main Loop:5
# Sub loop: 5 [1, 2, 3, 5, 34, 50, 22, 12, 45, 7, 16, 8, 48]
# Sub loop: 6 [1, 2, 3, 5, 22, 50, 34, 12, 45, 7, 16, 8, 48]
# Sub loop: 7 [1, 2, 3, 5, 12, 50, 34, 22, 45, 7, 16, 8, 48]
# Sub loop: 8 [1, 2, 3, 5, 12, 50, 34, 22, 45, 7, 16, 8, 48]
# Sub loop: 9 [1, 2, 3, 5, 7, 50, 34, 22, 45, 12, 16, 8, 48]
# Sub loop: 10 [1, 2, 3, 5, 7, 50, 34, 22, 45, 12, 16, 8, 48]
# Sub loop: 11 [1, 2, 3, 5, 7, 50, 34, 22, 45, 12, 16, 8, 48]
# Sub loop: 12 [1, 2, 3, 5, 7, 50, 34, 22, 45, 12, 16, 8, 48]
# Array after main loop 5 : [1, 2, 3, 5, 7, 50, 34, 22, 45, 12, 16, 8, 48]
# Main Loop:6
# Sub loop: 6 [1, 2, 3, 5, 7, 34, 50, 22, 45, 12, 16, 8, 48]
# Sub loop: 7 [1, 2, 3, 5, 7, 22, 50, 34, 45, 12, 16, 8, 48]
# Sub loop: 8 [1, 2, 3, 5, 7, 22, 50, 34, 45, 12, 16, 8, 48]
# Sub loop: 9 [1, 2, 3, 5, 7, 12, 50, 34, 45, 22, 16, 8, 48]
# Sub loop: 10 [1, 2, 3, 5, 7, 12, 50, 34, 45, 22, 16, 8, 48]
# Sub loop: 11 [1, 2, 3, 5, 7, 8, 50, 34, 45, 22, 16, 12, 48]
# Sub loop: 12 [1, 2, 3, 5, 7, 8, 50, 34, 45, 22, 16, 12, 48]
# Array after main loop 6 : [1, 2, 3, 5, 7, 8, 50, 34, 45, 22, 16, 12, 48]
# Main Loop:7
# Sub loop: 7 [1, 2, 3, 5, 7, 8, 34, 50, 45, 22, 16, 12, 48]
# Sub loop: 8 [1, 2, 3, 5, 7, 8, 34, 50, 45, 22, 16, 12, 48]
# Sub loop: 9 [1, 2, 3, 5, 7, 8, 22, 50, 45, 34, 16, 12, 48]
# Sub loop: 10 [1, 2, 3, 5, 7, 8, 16, 50, 45, 34, 22, 12, 48]
# Sub loop: 11 [1, 2, 3, 5, 7, 8, 12, 50, 45, 34, 22, 16, 48]
# Sub loop: 12 [1, 2, 3, 5, 7, 8, 12, 50, 45, 34, 22, 16, 48]
# Array after main loop 7 : [1, 2, 3, 5, 7, 8, 12, 50, 45, 34, 22, 16, 48]
# Main Loop:8
# Sub loop: 8 [1, 2, 3, 5, 7, 8, 12, 45, 50, 34, 22, 16, 48]
# Sub loop: 9 [1, 2, 3, 5, 7, 8, 12, 34, 50, 45, 22, 16, 48]
# Sub loop: 10 [1, 2, 3, 5, 7, 8, 12, 22, 50, 45, 34, 16, 48]
# Sub loop: 11 [1, 2, 3, 5, 7, 8, 12, 16, 50, | |
<reponame>tkamishima/kamrecsys<filename>kamrecsys/model_selection/split.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Data Splitter for hold-out tests or cross validation.
The usage of these splitter classes are similar to the splitters of `sklearn`
such as :class:`sklearn.model_selection.KFold` .
"""
from __future__ import (
print_function,
division,
absolute_import,
unicode_literals)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
import logging
import numpy as np
from sklearn.utils import indexable, check_random_state
from sklearn.model_selection import (
BaseCrossValidator, PredefinedSplit, train_test_split, KFold)
from sklearn.model_selection._split import _validate_shuffle_split_init
# =============================================================================
# Metadata variables
# =============================================================================
# =============================================================================
# Public symbols
# =============================================================================
__all__ = []
# =============================================================================
# Constants
# =============================================================================
# =============================================================================
# Variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
# =============================================================================
# Classes
# =============================================================================
class ShuffleSplitWithinGroups(BaseCrossValidator):
"""
Generate random splits within each group
Data are first divided into groups specified by `groups` . Then, for each
group, data are split into training ant test sets at random. The way of
splitting data is the same as the
:class:`sklearn.model_selection.ShuffleSplit`
Parameters
----------
n_splits : int, default 10
Number of re-shuffling & splitting iterations.
test_size : float, int, None, default=0.1
See the specification of :class:`sklearn.model_selection.ShuffleSplit`
train_size : float, int, or None, default=None
See the specification of :class:`sklearn.model_selection.ShuffleSplit`
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional (default=None)
See the specification of :class:`sklearn.model_selection.ShuffleSplit`
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
shuffle=True, random_state=None):
super(ShuffleSplitWithinGroups, self).__init__()
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = int(n_splits)
self.test_size = test_size
self.train_size = train_size
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""
Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
# check arguments
X, y, groups = indexable(X, y, groups)
for train, test in super(
ShuffleSplitWithinGroups, self).split(X, y, groups):
yield train, test
def _iter_test_masks(self, X, y=None, groups=None):
# yields mask array for test splits
n_samples = X.shape[0]
# if groups is not specified, an entire data is specified as one group
if groups is None:
groups = np.zeros(n_samples, dtype=int)
# constants
indices = np.arange(n_samples, dtype=int)
test_fold = np.empty(n_samples, dtype=bool)
rng = check_random_state(self.random_state)
group_indices = np.unique(groups)
# generate training and test splits
for fold in xrange(self.n_splits):
test_fold[:] = False
for i, g in enumerate(group_indices):
train_i, test_i = train_test_split(
indices[groups == g],
test_size=self.test_size, train_size=self.train_size,
shuffle=self.shuffle, random_state=rng)
test_fold[test_i] = True
yield test_fold
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFoldWithinGroups(BaseCrossValidator):
"""
Generate K-fold splits within each group
Data are first divided into groups specified by `groups` . Then, each group
is further divided into K-folds. The elements having the same fold number
are assigned to the same fold. The way of splitting data is the same as
the :class:`sklearn.model_selection.ShuffleSplit`
Parameters
----------
n_samples : int
Total number of elements.
groups : array, dtype=int, shape=(n,)
the specification of group. If `None` , an entire data is treated as
one group.
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(KFoldWithinGroups, self).__init__()
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""
Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
# check arguments
X, y, groups = indexable(X, y, groups)
# get the number of samples
n_samples = X.shape[0]
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}."
).format(self.n_splits, n_samples))
for train, test in super(KFoldWithinGroups, self).split(X, y, groups):
yield train, test
def _iter_test_masks(self, X, y=None, groups=None):
# yields mask array for test splits
n_samples = X.shape[0]
# if groups is not specified, an entire data is specified as one group
if groups is None:
groups = np.zeros(n_samples, dtype=int)
# constants
indices = np.arange(n_samples)
test_fold = np.empty(n_samples, dtype=bool)
rng = check_random_state(self.random_state)
group_indices = np.unique(groups)
iters = np.empty(group_indices.shape[0], dtype=object)
# generate iterators
cv = KFold(self.n_splits, self.shuffle, rng)
for i, g in enumerate(group_indices):
group_member = indices[groups == g]
iters[i] = cv.split(group_member)
# generate training and test splits
for fold in xrange(self.n_splits):
test_fold[:] = False
for i, g in enumerate(group_indices):
group_train_i, group_test_i = next(iters[i])
test_fold[indices[groups == g][group_test_i]] = True
yield test_fold
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class InterlacedKFold(BaseCrossValidator):
"""
k-folds by a interlaced grouping.
The i-th data is assigned to the (i mod n_splits)-th group.
Subsequent data are are grouped into the same fold in a case of a standard
k-fold cross validation, but this is inconvenient if subsequent data are
highly correlated. This class is useful in such a situation.
Parameters
----------
n_splits : int, default=3
Number of folds. It must be `n_splits >= 2` .
"""
def __init__(self, n_splits=3):
super(InterlacedKFold, self).__init__()
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = X.shape[0]
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}."
).format(self.n_splits, n_samples))
# generate test fold
test_fold = np.arange(n_samples, dtype=int) % self.n_splits
cv = PredefinedSplit(test_fold)
return(cv.split())
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
| |
"""
Support for STATS data files.
STATS binary file structure
===========================
A stats binary output files begins with a stats_hdt_t structure::
typedef struct
{
(unsigned short header_size /* bytes, may or may not be there */
unsigned short spcid; /* station id - 10, 40, 60, 21 */
unsigned short vsrid; /* vsr1a, vsr1b ... from enum */
unsigned short chanid; /* subchannel id 0,1,2,3 */
unsigned short bps; /* number of bits per sample - 1, 2, 4, 8,
or 16 */
unsigned long srate; /* number of samples per second in kilo-
samples per second */
unsigned short error; /* hw err flag, dma error or num_samples
error,
0 ==> no errors */
unsigned short year; /* time tag - year */
unsigned short doy; /* time tag - day of year */
unsigned long sec; /* time tag - second of day */
double freq; /* in Hz */
unsigned long orate; /* number of statistics samples per
second */
unsigned short nsubchan; /* number of output sub chans */
}
stats_hdr_t;
This unpacks with "=4H Q HHH Q d Q H"
A data record looks like this::
fwrite(&doy, sizeof(int), 1, ofp);
fwrite(&sec, sizeof(int), 1, ofp);
fwrite(&i, sizeof(int), 1, ofp);
fwrite(&mean, sizeof(double), 1, ofp);
fwrite(&var, sizeof(double), 1, ofp);
fwrite(&skew, sizeof(double), 1, ofp);
fwrite(&kurt, sizeof(double), 1, ofp);
fwrite(&mean, sizeof(double), 1, ofp);
fwrite(&var, sizeof(double), 1, ofp);
fwrite(&skew, sizeof(double), 1, ofp);
fwrite(&kurt, sizeof(double), 1, ofp);
which unpacks with "=LLL dddd dddd"
STATS ASCII file structure
==========================
A STATS file begins with a header. The data lines consist of::
- column 0: second, starting with 1
- column 1: sample number within the second (typically 0-999)
- column (subchannel + 2): mean
- column (subchannel + 3): r.m.s.
- column (subchannel + 4): kurtosis
- column (subchannel + 5): skewness
where subchannel = 0, 1.
"""
import glob
import numpy
import os.path
import time
import DatesTimes as DT
import Data_Reduction as DRDSN
diag = True
diag_read = False
def process_STATS_ASCII_header(fd):
"""Process the header in a STATS file
STATS files are created from VSR data. The earliest
versions of a STATS file did not preface header data
with #. This was added later for use with some plotting
programs.
@param fd : file descriptor
@return: dictionary
The keys are the same ones as in the header.
"""
header = {}
doing_header = True
while(doing_header):
line = fd.readline().strip()
if re.search('::',line):
[k,v] = line.split('::')
key = k.strip().lstrip('#')
if re.search('\.',v) == None:
header[key] = int(v.strip())
else:
header[key] = float(v.strip())
elif re.search('HEADER_END',line):
doing_header = False
return header
def process_STATS_ASCII_data_line(line,nchan):
"""This processes one line of a STATS data file
@param line : string
One line from an ASCII STATS data file
@param nchan : number of signal channels in the file
@return: list of lists
Lists of the means, rms, kurtoses and skews for the subchannels
at one sampling time.
"""
data = line.split()
mean = []
rms = []
skew = []
kurtosis = []
sec = int(data[0])
ms = int(data[1])
for i in range(2,2+4*nchan,4):
mean.append(float(data[i]))
rms.append(float(data[i+1]))
kurtosis.append(float(data[i+2]))
skew.append(float(data[i+3]))
return (sec,ms,mean,rms,kurtosis,skew)
def get_STATS_ASCII_data_block(fd,nchan,nsamps):
"""This reads data for one block from the data file.
This should sit and wait until data are available, line by line.
When the required number of lines have been read it
returns the data as a tuple of arrays.
@param fd : file descriptor
@param nchan : int
Number of data channels processed
@param nsamps : int
Number of samples in a block
@return: tuple
The tuple consists of five arrays
(means,rootmeansquare,kurtosis,skewness,sec)
Each array shape is (nsamps,nchan).
"""
if diag_read:
print("Reading",fd)
counter = 0
while(counter < nsamps):
fd.flush()
line = fd.readline()
if line == '':
# end-of-file
return zeros(1),zeros(1),zeros(1),zeros(1),0
# Handle incomplete lines
while line[-1] != '\n':
fd.flush()
line += fd.readline()
# process the line
line = line.strip()
if line != '':
if diag_read:
print("Read:",line)
# mean, rms, kurt and skew are lists whose length is the number of
# channels
sec,ms,mean,rms,kurt,skew = process_STATS_ASCII_data_line(line,nchan)
if counter == 0:
# initialize the arrays
# ndmin forces the arrays to have 2 dimensions, the first
# dimension being 1 and the second num_subch.
means = numpy.array(mean,ndmin=2)
rootmeansquare = numpy.array(rms,ndmin=2)
kurtosis = numpy.array(kurt,ndmin=2)
skewness = numpy.array(skew,ndmin=2)
else:
# append to the moment arrays
means = numpy.append(means,numpy.array(mean,ndmin=2),axis=0)
rootmeansquare = numpy.append(rootmeansquare,numpy.array(rms,ndmin=2),axis=0)
kurtosis = numpy.append(kurtosis,numpy.array(kurt,ndmin=2),axis=0)
skewness = numpy.append(skewness,numpy.array(skew,ndmin=2),axis=0)
counter += 1
return means,rootmeansquare,kurtosis,skewness,sec
def get_data_block(fd,nchan,nsamps):
"""
Alias for get_STATS_ASCII_data_block
For backward compatibility
@param fd : file descriptor
@param nchan : int
Number of data channels processed
@param nsamps : int
Number of samples in a block
@return: tuple
The tuple consists of five arrays
(means,rootmeansquare,kurtosis,skewness,sec)
Each array shape is (nsamps,nchan).
"""
return get_STATS_ASCII_data_block(fd,nchan,nsamps)
def parse_STATS_ASCII_header(header):
"""
Parses the header of a STATS
@param header : dictionary
Header dictionary of a STATS file.
@return: tuple
(year,doy,start_sec,freq,spc,vsr,nchan,bw,bps,nsamps)
"""
year = header['YEAR']
doy = header['DOY']
start_sec = header['START_SEC']
freq = header['RF_FREQ[HZ]']/1.e6 # MHz
spc = header['SPC_ID']
vsr = header['VSR_ID']
nchan = header['NOCHAN'] # number of sub-channels
bw = header['SAMPLE_RATE[HZ]']/2.e6 # MHz
bps = header['BITS_PER_SAMPLE']
nsamps = header['OUTPUT_RATE[HZ]'] # output samples/sec
return year,doy,start_sec,freq,spc,vsr,nchan,bw,bps,nsamps
def parse_STATS_header(header):
"""
Extract the header from a binary STATS data file.
This extracts the STATS binary file header information
into variables with meaningful names. It also converts year
and day_of_year to seconds at midnight since the epoch used by
UNIX systems.
@param header : string of binary data
@return: tuple
(spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq,
orate,nsubchan)
"""
(spcid, # 1) station id - 10, 40, 60, 21
vsrid, # 2) vsr1a, vsr1b ...
chanid, # 3) subchannel id 0,1,2,3
bps, # 4) number of bits per sample - 1, 2, 4, 8, or 16
srate, # 5) number of samples per second in samples per second
errflg, # 6) hardware error flag, dma error or num_samples
# error, 0 ==> no errors
year, # 7) time tag - year
doy, # 8) time tag - day of year
sec, # 9) time tag - second of day
freq, # 10) frequency in Hz
orate, # 11) number of statistics samples per second
nsubchan # 12) number of output sub chans
) = header
return spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq, \
orate,nsubchan
def get_binary_stats_header(fd):
"""Get the header from a binary stats file.
There is an old format in which the first datum is a short with the
station ID of 13. Otherwise the first long has a header size. This
handles either case.
Notes
=====
Function parse_STATS_header() is one-liner that
translates the tuple members to variables with meaningful names.
@param fd : file descriptor.
@return: tuple.
A string with binary data followed by the size of the header (int).
"""
first_word = fd.read(2)
first = struct.unpack_from('=H',first_word)
if diag_read:
print("Header size =",first)
# Unpack returns a tuple
if first[0] != 13:
# header_size = first[0]
header_size = 52 # header length prepends header
fd.seek(2,1) # advance past the long which alledgedly has the
# header size
buf = fd.read(header_size-4)
else:
header_size = 48
# read the remaining header
buf = first_word + fd.read(header_size-2)
# This will change if header_size changes
header = struct.unpack_from('=4H Q HHH Q d Q H',buf)
return header,header_size
def write_binary_stats_header(header):
"""
Write a header in binary format.
This packs a header into a buffer for creating a binary file.
@param header : tuple
@return: binary string
"""
buf = struct.pack('=4H Q HHH Q d Q H',*header)
return buf
def get_binary_stats_record(fd,header_size,index):
"""
Extracts a binary record at the specified record index.
If a particular time is wanted, then it is necessary to read the seconds
since midnight and the index (usually milliseconds) to verify and
possibly adjust the position.
Notes
=====
Two data channels are assumed.
@param fd : file descriptor.
@param header_size : int.
Header size in bytes.
@param index : long.
Index of record to be retrieved.
@return: tuple.
(DOY, start_sec, record_index , | |
below to reduce I/O ?
echo -e "\nMapping sample to assembly ... "
bwa mem -t {config[cores][metabat]} $fsampleID.fa *.fastq.gz > $id.sam
echo -e "\nConverting SAM to BAM with samtools view ... "
samtools view -@ {config[cores][metabat]} -Sb $id.sam > $id.bam
echo -e "\nSorting BAM file with samtools sort ... "
samtools sort -@ {config[cores][metabat]} -o $id.sort $id.bam
echo -e "\nRunning jgi_summarize_bam_contig_depths script to generate contig abundance/depth file ... "
jgi_summarize_bam_contig_depths --outputDepth $id.depth $id.sort
echo -e "\nCopying depth file to workspace"
mv $id.depth {output}
echo -e "\nRemoving temporary files ... "
rm *.fastq.gz *.sam *.bam
done
nSamples=$(ls {input.READS}|wc -l)
echo -e "\nDone mapping focal sample $fsampleID agains $nSamples samples in dataset folder."
echo -e "\nRunning jgi_summarize_bam_contig_depths for all sorted bam files ... "
jgi_summarize_bam_contig_depths --outputDepth $id.all.depth *.sort
echo -e "\nRunning metabat2 ... "
metabat2 -i $fsampleID.fa -a $id.all.depth -o $fsampleID
mv *.fa $id.all.depth $(dirname {output})
"""
rule maxbin:
input:
assembly = rules.megahit.output,
depth = rules.metabat.output
output:
directory(f'{config["path"]["root"]}/{config["folder"]["maxbin"]}/{{IDs}}/{{IDs}}.maxbin-bins')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.maxbin.benchmark.txt'
message:
"""
Note that this rule uses of the output depth of metabat2 as an input to bin using maxbin2.
"""
shell:
"""
set +u;source activate {config[envs][metabagpipes]};set -u;
cp -r {input.assembly} {input.depth} $TMPDIR
mkdir -p $(dirname $(dirname {output}))
cd $TMPDIR
echo -e "\nUnzipping assembly ... "
gunzip contigs.fasta.gz
echo -e "\nGenerating list of depth files based on metabat2 output ... "
find $(basename {input.depth}) -name "*.depth" > abund.list
echo -e "\nRunning maxbin2 ... "
run_MaxBin.pl -contig contigs.fasta -out $(basename $(dirname {output})) -abund_list abund.list
rm contigs.fasta *.gz
mkdir $(basename {output})
mkdir -p $(dirname {output})
mv *.fasta $(basename {output})
mv $(basename {output}) *.summary *.abundance $(dirname {output})
"""
rule concoct:
input:
contigs = rules.megahit.output,
reads = f'{config["path"]["root"]}/{config["folder"]["qfiltered"]}'
output:
directory(f'{config["path"]["root"]}/{config["folder"]["concoct"]}/{{IDs}}/{{IDs}}.concoct-bins')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.concoct.benchmark.txt'
shell:
"""
set +u;source activate {config[envs][metabagpipes]};set -u;
mkdir -p $(dirname $(dirname {output}))
fsampleID=$(echo $(basename $(dirname {input.contigs})))
echo -e "\nCopying focal sample assembly $fsampleID to TMPDIR ... "
cp {input.contigs} $TMPDIR
cd $TMPDIR
echo "Unzipping assembly ... "
gunzip $(basename {input.contigs})
echo -e "Done. \nCutting up contigs to 10kbp chunks (default), do not use this for mapping!"
cut_up_fasta.py -c {config[params][cutfasta]} -o 0 -m contigs.fasta -b assembly_c10k.bed > assembly_c10k.fa
echo -e "\nIndexing assembly of original contigs for mapping (not 10kbp chunks assembly file) ... "
bwa index contigs.fasta
echo -e "Done. \nPreparing to map focal sample against other samples ... "
for folder in {input.reads}/*;do
id=$(basename $folder)
echo -e "\nCopying sample $id to be mapped againts the focal sample $fsampleID ..."
cp $folder/*.gz .
# Maybe I should be piping the lines below to reduce I/O ?
echo -e "\nMapping sample to assembly ... "
bwa mem -t {config[cores][concoct]} contigs.fasta *.fastq.gz > $id.sam
echo -e "\nConverting SAM to BAM with samtools view ... "
samtools view -@ {config[cores][concoct]} -Sb $id.sam > $id.bam
echo -e "\nSorting BAM file with samtools sort ... "
samtools sort -@ {config[cores][concoct]} -o $id.sort $id.bam
echo -e "\nIndexing sorted BAM file with samtools index ... "
samtools index $id.sort
echo -e "\nRemoving temporary files ... "
rm *.fastq.gz *.sam *.bam
done
echo -e "\nSummarizing sorted and indexed BAM files with concoct_coverage_table.py ... "
concoct_coverage_table.py assembly_c10k.bed *.sort > coverage_table.tsv
echo -e "\nRunning CONCOCT ... "
concoct --coverage_file coverage_table.tsv --composition_file assembly_c10k.fa \
-b $(basename $(dirname {output})) \
-t {config[cores][concoct]} \
-c {config[params][concoct]}
echo -e "\nMerging clustering results into original contigs with merge_cutup_clustering.py ... "
merge_cutup_clustering.py $(basename $(dirname {output}))_clustering_gt1000.csv > $(basename $(dirname {output}))_clustering_merged.csv
echo -e "\nExtracting bins ... "
mkdir -p $(basename {output})
extract_fasta_bins.py contigs.fasta $(basename $(dirname {output}))_clustering_merged.csv --output_path $(basename {output})
mkdir -p $(dirname {output})
mv $(basename {output}) *.txt *.csv $(dirname {output})
"""
rule binRefine:
input:
concoct = f'{config["path"]["root"]}/{config["folder"]["concoct"]}/{{IDs}}/{{IDs}}.concoct-bins',
metabat = f'{config["path"]["root"]}/{config["folder"]["metabat"]}/{{IDs}}/{{IDs}}.metabat-bins',
maxbin = f'{config["path"]["root"]}/{config["folder"]["maxbin"]}/{{IDs}}/{{IDs}}.maxbin-bins'
output:
directory(f'{config["path"]["root"]}/{config["folder"]["refined"]}/{{IDs}}')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.binRefine.benchmark.txt'
shell:
"""
set +u;source activate {config[envs][metawrap]};set -u;
mkdir -p $(dirname {output})
mkdir -p {output}
cd $TMPDIR
echo "Copying bins from CONCOCT, metabat2, and maxbin2 to tmpdir ... "
cp -r {input.concoct} {input.metabat} {input.maxbin} $TMPDIR
echo "Renaming bin folders to avoid errors with metaWRAP ... "
mv $(basename {input.concoct}) $(echo $(basename {input.concoct})|sed 's/-bins//g')
mv $(basename {input.metabat}) $(echo $(basename {input.metabat})|sed 's/-bins//g')
mv $(basename {input.maxbin}) $(echo $(basename {input.maxbin})|sed 's/-bins//g')
echo "Running metaWRAP bin refinement module ... "
metaWRAP bin_refinement -o . \
-A $(echo $(basename {input.concoct})|sed 's/-bins//g') \
-B $(echo $(basename {input.metabat})|sed 's/-bins//g') \
-C $(echo $(basename {input.maxbin})|sed 's/-bins//g') \
-t {config[cores][refine]} \
-m {config[params][refineMem]} \
-c {config[params][refineComp]} \
-x {config[params][refineCont]}
rm -r $(echo $(basename {input.concoct})|sed 's/-bins//g') $(echo $(basename {input.metabat})|sed 's/-bins//g') $(echo $(basename {input.maxbin})|sed 's/-bins//g') work_files
mv * {output}
"""
rule binReassemble:
input:
READS = rules.qfilter.output,
refinedBins = rules.binRefine.output
output:
directory(f'{config["path"]["root"]}/{config["folder"]["reassembled"]}/{{IDs}}')
benchmark:
f'{config["path"]["root"]}/benchmarks/{{IDs}}.binReassemble.benchmark.txt'
shell:
"""
set +u;source activate {config[envs][metawrap]};set -u;
mkdir -p $(dirname {output})
cp -r {input.refinedBins}/metawrap_*_bins {input.READS} $TMPDIR
cd $TMPDIR
echo "Running metaWRAP bin reassembly ... "
metaWRAP reassemble_bins -o $(basename {output}) \
-b metawrap_*_bins \
-1 $(basename {input.READS}) \
-2 $(basename {input.READS}) \
-t {config[cores][reassemble]} \
-m {config[params][reassembleMem]} \
-c {config[params][reassembleComp]} \
-x {config[params][reassembleCont]}
rm -r metawrap_*_bins
rm -r $(basename {output})/work_files
rm *.fastq.gz
mv * $(dirname {output})
"""
rule binningVis:
input:
f'{config["path"]["root"]}'
output:
text = f'{config["path"]["root"]}/{config["folder"]["stats"]}/reassembled_bins.stats',
plot = f'{config["path"]["root"]}/{config["folder"]["stats"]}/binningVis.pdf'
message:
"""
Generate bar plot with number of bins and density plot of bin contigs,
total length, completeness, and contamination across different tools.
"""
shell:
"""
set +u;source activate {config[envs][metabagpipes]};set -u;
# READ CONCOCT BINS
echo "Generating concoct_bins.stats file containing bin ID, number of contigs, and length ... "
cd {input}/{config[folder][concoct]}
for folder in */;do
var=$(echo $folder|sed 's|/||g'); # Define sample name
for bin in $folder*concoct-bins/*.fa;do
name=$(echo $bin | sed "s|^.*/|$var.bin.|g" | sed 's/.fa//g'); # Define bin name
N=$(less $bin | grep -c ">");
L=$(less $bin |grep ">"|cut -d '-' -f4|sed 's/len=//g'|awk '{{sum+=$1}}END{{print sum}}')
echo "Reading bin $bin ... Contigs: $N , Length: $L "
echo $name $N $L >> concoct_bins.stats;
done;
done
mv *.stats {input}/{config[folder][reassembled]}
echo "Done reading CONCOCT bins, moving concoct_bins.stats file to $(echo {input}/{config[folder][reassembled]}) ."
# READ METABAT2 BINS
echo "Generating metabat_bins.stats file containing bin ID, number of contigs, and length ... "
cd {input}/{config[folder][metabat]}
for folder in */;do
var=$(echo $folder | sed 's|/||'); # Define sample name
for bin in $folder*metabat-bins/*.fa;do
name=$(echo $bin|sed 's/.fa//g'|sed 's|^.*/||g'|sed "s/^/$var./g"); # Define bin name
N=$(less $bin | grep -c ">");
L=$(less $bin |grep ">"|cut -d '-' -f4|sed 's/len=//g'|awk '{{sum+=$1}}END{{print sum}}')
echo "Reading bin $bin ... Contigs: $N , Length: $L "
echo $name $N $L >> metabat_bins.stats;
done;
done
mv *.stats {input}/{config[folder][reassembled]}
echo "Done reading metabat2 bins, moving metabat_bins.stats file to $(echo {input}/{config[folder][reassembled]}) ."
# READ MAXBIN2 BINS
echo "Generating maxbin_bins.stats file containing bin ID, number of contigs, and length ... "
cd {input}/{config[folder][maxbin]}
for folder in */;do
for bin in $folder*maxbin-bins/*.fasta;do
name=$(echo $bin | sed 's/.fasta//g' | sed 's|^.*/||g'); # Define bin name
N=$(less $bin | grep -c ">");
L=$(less $bin |grep ">"|cut -d '-' -f4|sed 's/len=//g'|awk '{{sum+=$1}}END{{print sum}}')
echo "Reading bin $bin ... Contigs: $N , Length: $L "
echo $name $N $L >> maxbin_bins.stats;
done;
done
mv *.stats {input}/{config[folder][reassembled]}
echo "Done reading maxbin2 bins, moving maxbin_bins.stats file to $(echo {input}/{config[folder][reassembled]}) ."
# READ METAWRAP REFINED BINS
echo "Generating refined_bins.stats file containing bin ID, number of contigs, and length ... "
cd {input}/{config[folder][refined]}
for folder in */;do
samp=$(echo $folder | sed 's|/||'); # Define sample name
for bin in $folder*metawrap_*_bins/*.fa;do
name=$(echo $bin | sed 's/.fa//g'|sed 's|^.*/||g'|sed "s/^/$samp./g"); # Define bin name
N=$(less $bin | grep -c ">");
L=$(less $bin |grep ">"|cut -d '-' -f4|sed 's/len_//g'|awk '{{sum+=$1}}END{{print sum}}')
echo "Reading bin $bin ... Contigs: $N , Length: $L "
echo $name $N $L >> refined_bins.stats;
done;
done
echo "Done reading metawrap refined bins ... "
# READ METAWRAP REFINED CHECKM OUTPUT
echo "Generating CheckM summary files across samples: concoct.checkm, metabat.checkm, maxbin.checkm, and refined.checkm ... "
for folder in */;do
var=$(echo $folder|sed 's|/||g'); # Define sample | |
bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = lwdown_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary longwave array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del lwdown_in
del ind_valid
def ncar_wspd_hrrr_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR bias correction on incoming windspeed for input: " + \
input_forcings.productName + " at step " + str(config_options.current_output_step)
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
fhr = config_options.current_output_step
# need to get wind speed from U, V components
ugrd_idx = input_forcings.grib_vars.index('UGRD')
vgrd_idx = input_forcings.grib_vars.index('VGRD')
ugrid_in = input_forcings.final_forcings[input_forcings.input_map_output[ugrd_idx], :, :]
vgrid_in = input_forcings.final_forcings[input_forcings.input_map_output[vgrd_idx], :, :]
wdir = np.arctan2(vgrid_in, ugrid_in)
wspd = np.sqrt(np.square(ugrid_in) + np.square(vgrid_in))
if config_options.ana_flag:
wspd_bias_corr = 0.35 # fixed for AnA
else:
wspd_net_bias_sr = [0.18, 0.15, 0.13, 0.12, 0.11, 0.10, 0.08, 0.07, 0.06, 0.05,
0.03, 0.02, 0.01, -0.01, -0.02, -0.03, -0.04, -0.05]
wspd_bias_corr = wspd_net_bias_sr[config_options.current_output_step - 1]
wspd = wspd + wspd_bias_corr
wspd = np.where(wspd < 0, 0, wspd)
ugrid_out = wspd * np.cos(wdir)
vgrid_out = wspd * np.sin(wdir)
# TODO: cache the "other" value so we don't repeat this calculation unnecessarily
bias_corrected = ugrid_out if force_num == ugrd_idx else vgrid_out
wind_in = None
try:
wind_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming windspeed from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(wind_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming windspeed for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
wind_in[ind_valid] = bias_corrected[ind_valid]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform windspeed bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = wind_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary windspeed array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del wind_in
del ind_valid
def ncar_wspd_gfs_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR bias correction on incoming windspeed for input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
fhr = config_options.current_output_step
wspd_net_bias_mr = -0.20
wspd_fhr_mult_mr = 0.00
wspd_diurnal_ampl_mr = -0.32
wspd_diurnal_offs_mr = -1.1
# need to get wind speed from U, V components
ugrd_idx = input_forcings.grib_vars.index('UGRD')
vgrd_idx = input_forcings.grib_vars.index('VGRD')
ugrid_in = input_forcings.final_forcings[input_forcings.input_map_output[ugrd_idx], :, :]
vgrid_in = input_forcings.final_forcings[input_forcings.input_map_output[vgrd_idx], :, :]
wdir = np.arctan2(vgrid_in, ugrid_in)
wspd = np.sqrt(np.square(ugrid_in) + np.square(vgrid_in))
wspd_bias_corr = wspd_net_bias_mr + wspd_fhr_mult_mr * fhr + \
wspd_diurnal_ampl_mr * math.sin(wspd_diurnal_offs_mr + hh / 24 * TWO_PI)
wspd = wspd + wspd_bias_corr
wspd = np.where(wspd < 0, 0, wspd)
ugrid_out = wspd * np.cos(wdir)
vgrid_out = wspd * np.sin(wdir)
# TODO: cache the "other" value so we don't repeat this calculation unnecessarily
bias_corrected = ugrid_out if force_num == ugrd_idx else vgrid_out
wind_in = None
try:
wind_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming windspeed from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(wind_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming windspeed for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
wind_in[ind_valid] = bias_corrected[ind_valid]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform windspeed bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = wind_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary windspeed array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del wind_in
del ind_valid
def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, force_num):
"""
Routine to run CDF/PDF bias correction parametric corrections
SPECIFIC to the NWM long-range configuration.
:param mpi_config:
:param input_forcings:
:param config_options:
:param force_num:
:return:
"""
# TODO: move these into a (.py or .json) configuration file
# Create a dictionary that maps forcing numbers to the expected NetCDF variable names, etc.
nldas_param1_vars = {
2: 'UGRD10M_PARAM_1',
3: 'VGRD10M_PARAM_1',
6: 'LW_PARAM_1',
4: 'PRATE_PARAM_1',
0: 'T2M_PARAM_1',
1: 'Q2M_PARAM_1',
7: 'PSFC_PARAM_1',
5: 'SW_PARAM_1'
}
nldas_param2_vars = {
2: 'UGRD10M_PARAM_2',
3: 'VGRD10M_PARAM_2',
6: 'LW_PARAM_2',
4: 'PRATE_PARAM_2',
0: 'T2M_PARAM_2',
1: 'Q2M_PARAM_2',
7: 'PSFC_PARAM_2',
5: 'SW_PARAM_2'
}
cfs_param_path_vars = {
2: 'ugrd',
3: 'vgrd',
6: 'dlwsfc',
4: 'prate',
0: 'tmp2m',
1: 'q2m',
7: 'pressfc',
5: 'dswsfc'
}
# Specify the min/max ranges on CDF/PDF values for each variable
val_range1 = {
2: -50.0,
3: -50.0,
6: 1.0,
4: 0.01,
0: 200.0,
1: 0.01,
7: 50000.0,
5: 0.0
}
val_range2 = {
2: 50.0,
3: 50.0,
6: 800.0,
4: 100.0,
0: 330.0,
1: 40.0,
7: 1100000.0,
5: 0.0
}
val_bins = {
2: 1000,
3: 1000,
6: 4000,
4: 2000,
0: 1300,
1: 1000,
7: 3000,
5: 0
}
if mpi_config.rank == 0:
config_options.statusMsg = "Running NLDAS-CFSv2 CDF/PDF bias correction on variable: " + \
input_forcings.netcdf_var_names[force_num]
err_handler.log_msg(config_options, mpi_config)
# Check to ensure we are running with CFSv2 here....
if input_forcings.productName != "CFSv2_6Hr_Global_GRIB2":
config_options.errMsg = "Attempting to run CFSv2-NLDAS bias correction on: " + input_forcings.productName
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Open the necessary parameter grids, which are on the global CFSv2 grid, then scatter them out
# to the various processors.
id_nldas_param = nldas_param_file = None
if mpi_config.rank == 0:
nldas_param_file = input_forcings.paramDir + "/NLDAS_Climo/nldas2_" + \
config_options.current_output_date.strftime('%m%d%H') + \
"_dist_params.nc"
if not os.path.isfile(nldas_param_file):
config_options.errMsg = "Unable to locate necessary bias correction parameter file: " + \
nldas_param_file
err_handler.log_critical(config_options, mpi_config)
# Open the NetCDF file.
try:
id_nldas_param = Dataset(nldas_param_file, 'r')
except OSError as err:
config_options.errMsg = "Unable to open parameter file: " + nldas_param_file + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
raise err
# Ensure dimensions/variables are as expected.
if 'lat_0' not in id_nldas_param.dimensions.keys():
config_options.errMsg = "Expected to find lat_0 dimension in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if 'lon_0' not in id_nldas_param.dimensions.keys():
config_options.errMsg = "Expected to find lon_0 dimension in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if id_nldas_param.dimensions['lat_0'].size != PARAM_NY:
config_options.errMsg = "Expected lat_0 size is {} - found size of: ".format(PARAM_NY) + \
str(id_nldas_param.dimensions['lat_0'].size) + " in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if id_nldas_param.dimensions['lon_0'].size != PARAM_NX:
config_options.errMsg = "Expected lon_0 size is {} - found size of: ".format(PARAM_NX) + \
str(id_nldas_param.dimensions['lon_0'].size) + " in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if nldas_param1_vars[force_num] not in id_nldas_param.variables.keys():
config_options.errMsg = "Expected variable: " + nldas_param1_vars[force_num] + " not found " + \
"in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if nldas_param2_vars[force_num] not in id_nldas_param.variables.keys():
config_options.errMsg = "Expected variable: " + nldas_param2_vars[force_num] + " not found " + \
"in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if force_num == 4:
if 'ZERO_PRECIP_PROB' not in id_nldas_param.variables.keys():
config_options.errMsg = "Expected variable: ZERO_PRECIP_PROB not found in: " + \
nldas_param_file
err_handler.log_critical(config_options, mpi_config)
nldas_param_1 = None
try:
nldas_param_1 = id_nldas_param.variables[nldas_param1_vars[force_num]][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract: " + nldas_param1_vars[force_num] + \
" from: " + nldas_param_file + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
nldas_param_2 = None
try:
nldas_param_2 = id_nldas_param.variables[nldas_param2_vars[force_num]][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract: " + nldas_param2_vars[force_num] + \
" from: " + nldas_param_file + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
if nldas_param_1.shape[0] != PARAM_NY or nldas_param_1.shape[1] != PARAM_NX:
config_options.errMsg = "Parameter variable: " + nldas_param1_vars[force_num] + " from: " + \
nldas_param_file + " not of shape [{},{}].".format(PARAM_NY, PARAM_NX)
err_handler.log_critical(config_options, mpi_config)
if nldas_param_2.shape[0] != PARAM_NY or nldas_param_2.shape[1] != PARAM_NX:
config_options.errMsg = "Parameter variable: | |
HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = request
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', 'AiNameFormatted')
def genderize(self, request: AiNameGenderizeRequest) -> AiNameGenderHypothesisList:
"""Detect person's gender from name string.
:param request: AiNameGenderizeRequest object with parameters
:type request: AiNameGenderizeRequest
:return: AiNameGenderHypothesisList
"""
# verify the required parameter 'name' is set
if request.name is None:
raise ValueError("Missing the required parameter `name` when calling `genderize`")
collection_formats = {}
path = '/email/AiName/genderize'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('name') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.name if request.name is not None else '')
else:
if request.name is not None:
query_params.append((self._lowercase_first_letter('name'), request.name))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
if request.location is not None:
query_params.append((self._lowercase_first_letter('location'), request.location))
path_parameter = '{' + self._lowercase_first_letter('encoding') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.encoding if request.encoding is not None else '')
else:
if request.encoding is not None:
query_params.append((self._lowercase_first_letter('encoding'), request.encoding))
path_parameter = '{' + self._lowercase_first_letter('script') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.script if request.script is not None else '')
else:
if request.script is not None:
query_params.append((self._lowercase_first_letter('script'), request.script))
path_parameter = '{' + self._lowercase_first_letter('style') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.style if request.style is not None else '')
else:
if request.style is not None:
query_params.append((self._lowercase_first_letter('style'), request.style))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'AiNameGenderHypothesisList')
def genderize_parsed(self, request: AiNameParsedRequest) -> AiNameGenderHypothesisList:
"""Detect person's gender from parsed name.
:param request: Gender detection request data.
:type request: AiNameParsedRequest
:return: AiNameGenderHypothesisList
"""
# verify the required parameter 'request' is set
if request is None:
raise ValueError("Missing the required parameter `request` when calling `genderize_parsed`")
collection_formats = {}
path = '/email/AiName/genderize-parsed'
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = request
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', 'AiNameGenderHypothesisList')
def match(self, request: AiNameMatchRequest) -> AiNameMatchResult:
"""Compare people's names. Uses options for comparing instructions.
:param request: AiNameMatchRequest object with parameters
:type request: AiNameMatchRequest
:return: AiNameMatchResult
"""
# verify the required parameter 'name' is set
if request.name is None:
raise ValueError("Missing the required parameter `name` when calling `match`")
# verify the required parameter 'other_name' is set
if request.other_name is None:
raise ValueError("Missing the required parameter `other_name` when calling `match`")
collection_formats = {}
path = '/email/AiName/match'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('name') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.name if request.name is not None else '')
else:
if request.name is not None:
query_params.append((self._lowercase_first_letter('name'), request.name))
path_parameter = '{' + self._lowercase_first_letter('otherName') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.other_name if request.other_name is not None else '')
else:
if request.other_name is not None:
query_params.append((self._lowercase_first_letter('otherName'), request.other_name))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
if request.location is not None:
query_params.append((self._lowercase_first_letter('location'), request.location))
path_parameter = '{' + self._lowercase_first_letter('encoding') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.encoding if request.encoding is not None else '')
else:
if request.encoding is not None:
query_params.append((self._lowercase_first_letter('encoding'), request.encoding))
path_parameter = '{' + self._lowercase_first_letter('script') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.script if request.script is not None else '')
else:
if request.script is not None:
query_params.append((self._lowercase_first_letter('script'), request.script))
path_parameter = '{' + self._lowercase_first_letter('style') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.style if request.style is not None else '')
else:
if request.style is not None:
query_params.append((self._lowercase_first_letter('style'), request.style))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'AiNameMatchResult')
def match_parsed(self, request: AiNameMatchParsedRequest) -> AiNameMatchResult:
"""Compare people's parsed names and attributes. Uses options for comparing instructions.
:param request: Parsed names to match.
:type request: AiNameMatchParsedRequest
:return: AiNameMatchResult
"""
# verify the required parameter 'request' is set
if request is None:
raise ValueError("Missing the required parameter `request` when calling `match_parsed`")
collection_formats = {}
path = '/email/AiName/match-parsed'
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = request
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', 'AiNameMatchResult')
def parse(self, request: AiNameParseRequest) -> AiNameComponentList:
"""Parse name to components.
:param request: AiNameParseRequest object with parameters
:type request: AiNameParseRequest
:return: AiNameComponentList
"""
# verify the required parameter 'name' is set
if request.name is None:
raise ValueError("Missing the required parameter `name` when calling `parse`")
collection_formats = {}
path = '/email/AiName/parse'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('name') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.name if request.name is not None else '')
else:
if request.name is not None:
query_params.append((self._lowercase_first_letter('name'), request.name))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
if request.location is not None:
query_params.append((self._lowercase_first_letter('location'), request.location))
path_parameter = '{' + self._lowercase_first_letter('encoding') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.encoding if request.encoding is not None else '')
else:
if request.encoding is not None:
query_params.append((self._lowercase_first_letter('encoding'), request.encoding))
path_parameter = '{' + self._lowercase_first_letter('script') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.script if request.script is not None else '')
else:
if request.script is not None:
query_params.append((self._lowercase_first_letter('script'), request.script))
path_parameter = '{' + self._lowercase_first_letter('style') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.style if request.style is not None else '')
else:
if request.style is not None:
query_params.append((self._lowercase_first_letter('style'), request.style))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'AiNameComponentList')
def parse_email_address(self, request: AiNameParseEmailAddressRequest) -> AiNameExtractedList:
"""Parse person's name out of an email address.
:param request: AiNameParseEmailAddressRequest object with parameters
:type request: AiNameParseEmailAddressRequest
:return: AiNameExtractedList
"""
# verify the required parameter 'email_address' is set
if request.email_address is None:
raise ValueError("Missing the required parameter `email_address` when calling `parse_email_address`")
collection_formats = {}
path = '/email/AiName/parse-email-address'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('emailAddress') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.email_address if request.email_address is not None else '')
else:
if request.email_address is not None:
query_params.append((self._lowercase_first_letter('emailAddress'), request.email_address))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
| |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, asarray, cos, exp, floor, pi, sign, sin, sqrt, sum,
size, tril, isnan, atleast_2d, repeat)
from numpy.testing import assert_almost_equal
from .go_benchmark import Benchmark
class CarromTable(Benchmark):
"""
CarromTable objective function.
The CarromTable [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{CarromTable}}(x) = - \frac{1}{30}\left(\cos(x_1)
cos(x_2) e^{\left|1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi}\right|}\right)^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -24.15681551650653` for :math:`x_i = \pm
9.646157266348881` for :math:`i = 1, 2`
.. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(9.646157266348881, 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881, -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def fun(self, x, *args):
self.nfev += 1
u = cos(x[0]) * cos(x[1])
v = sqrt(x[0] ** 2 + x[1] ** 2)
return -((u * exp(abs(1 - v / pi))) ** 2) / 30.
class Chichinadze(Benchmark):
"""
Chichinadze objective function.
This class defines the Chichinadze [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Chichinadze}}(x) = x_{1}^{2} - 12 x_{1}
+ 8 \sin\left(\frac{5}{2} \pi x_{1}\right)
+ 10 \cos\left(\frac{1}{2} \pi x_{1}\right) + 11
- 0.2 \frac{\sqrt{5}}{e^{\frac{1}{2} \left(x_{2} -0.5 \right)^{2}}}
with :math:`x_i \in [-30, 30]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -42.94438701899098` for :math:`x =
[6.189866586965680, 0.5]`
.. [1] <NAME>. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#33 has a dividing factor of 2 in the sin term. However, f(x)
for the given solution does not give the global minimum. i.e. the equation
is at odds with the solution.
Only by removing the dividing factor of 2, i.e. `8 * sin(5 * pi * x[0])`
does the given solution result in the given global minimum.
Do we keep the result or equation?
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.0] * self.N, [30.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[6.189866586965680, 0.5]]
self.fglob = -42.94438701899098
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 - 12 * x[0] + 11 + 10 * cos(pi * x[0] / 2)
+ 8 * sin(5 * pi * x[0] / 2)
- 1.0 / sqrt(5) * exp(-((x[1] - 0.5) ** 2) / 2))
class Cigar(Benchmark):
"""
Cigar objective function.
This class defines the Cigar [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cigar}}(x) = x_1^2 + 10^6\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2 + 1e6 * sum(x[1:] ** 2)
class Cola(Benchmark):
"""
Cola objective function.
This class defines the Cola global optimization problem. The 17-dimensional
function computes indirectly the formula :math:`f(n, u)` by setting
:math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i2)}, y_i = u_{2(i2)+1}` :
.. math::
f_{\text{Cola}}(x) = \sum_{i<j}^{n} \left (r_{i,j} - d_{i,j} \right )^2
Where :math:`r_{i, j}` is given by:
.. math::
r_{i, j} = \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\{d} = \left [ d_{ij} \right ] = \begin{pmatrix}
1.27 & & & & & & & & \\
1.69 & 1.43 & & & & & & & \\
2.04 & 2.35 & 2.43 & & & & & & \\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\end{pmatrix}
This function has bounds :math:`x_0 \in [0, 4]` and :math:`x_i \in [-4, 4]`
for :math:`i = 1, ..., n-1`.
*Global optimum* 11.7464.
.. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self._bounds = [[0.0, 4.0]] + list(zip([-4.0] * (self.N - 1),
[4.0] * (self.N - 1)))
self.global_optimum = [[0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]]
self.fglob = 11.7464
self.d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97, 0.]])
def fun(self, x, *args):
self.nfev += 1
xi = atleast_2d(asarray([0.0, x[0]] + list(x[1::2])))
xj = repeat(xi, size(xi, 1), axis=0)
xi = xi.T
yi = atleast_2d(asarray([0.0, 0.0] + list(x[2::2])))
yj = repeat(yi, size(yi, 1), axis=0)
yi = yi.T
inner = (sqrt(((xi - xj) ** 2 + (yi - yj) ** 2)) - self.d) ** 2
inner = tril(inner, -1)
return sum(sum(inner, axis=1))
class Colville(Benchmark):
"""
Colville objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Colville}}(x) = \left(x_{1} -1\right)^{2}
+ 100 \left(x_{1}^{2} - x_{2}\right)^{2}
+ 10.1 \left(x_{2} -1\right)^{2} + \left(x_{3} -1\right)^{2}
+ 90 \left(x_{3}^{2} - x_{4}\right)^{2}
+ 10.1 \left(x_{4} -1\right)^{2} + 19.8 \frac{x_{4} -1}{x_{2}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., 4`
.. [1] <NAME>. & <NAME>.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO docstring equation is wrong use Jamil#36
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[0] - x[1] ** 2) ** 2
+ (1 - x[0]) ** 2 + (1 - x[2]) ** 2
+ 90 * (x[3] - x[2] ** 2) ** 2
+ 10.1 * ((x[1] - 1) ** 2 + (x[3] - 1) ** 2)
+ 19.8 * (x[1] - 1) * (x[3] - 1))
class Corana(Benchmark):
"""
Corana objective function.
This class defines the Corana [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Corana}}(x) = \begin{cases} \sum_{i=1}^n 0.15 d_i
[z_i - 0.05\textrm{sgn}(z_i)]^2 & \textrm{if }|x_i-z_i| < 0.05 \\
d_ix_i^2 & \textrm{otherwise}\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \lfloor |x_i/s_i|+0.49999\rfloor\textrm{sgn}(x_i),
d_i=(1,1000,10,100, ...)
with :math:`x_i \in [-5, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] <NAME>. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
| |
`examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_
'''
res = self.list_memberships(team)
if res[0] is False:
return res
full_memberships = res[1]
full_memberships.update(memberships)
res = self.edit_team(team, full_memberships)
if res[0] is False:
return res
else:
return [True, None]
def remove_memberships(self, team, users):
'''
**Description**
Remove user memberships from specified team.
**Arguments**
- **team**: the name of the team from which user memberships are removed
- **users**: list of usernames which should be removed from team
**Example**
`examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_
'''
res = self.list_memberships(team)
if res[0] is False:
return res
old_memberships = res[1]
new_memberships = {k: v for k, v in old_memberships.items() if k not in users}
res = self.edit_team(team, new_memberships)
if res[0] is False:
return res
else:
return [True, None]
def get_agents_config(self):
res = requests.get(self.url + '/api/agents/config', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
data = res.json()
return [True, data]
def set_agents_config(self, config):
res = requests.put(self.url + '/api/agents/config', headers=self.hdrs, data=json.dumps(config), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def clear_agents_config(self):
data = {'files' : []}
self.set_agents_config(data)
def get_user_api_token(self, username, teamname):
res = self.get_team(teamname)
if res[0] == False:
return res
t = res[1]
res = requests.get(self.url + '/api/token/%s/%d' % (username, t['id']), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
data = res.json()
return [True, data['token']['key']]
class SdMonitorClient(_SdcCommon):
def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True):
super(SdMonitorClient, self).__init__(token, sdc_url, ssl_verify)
def get_alerts(self):
'''**Description**
Retrieve the list of alerts configured by the user.
**Success Return Value**
An array of alert dictionaries, with the format described at `this link <https://app.sysdigcloud.com/apidocs/#!/Alerts/get_api_alerts>`__
**Example**
`examples/list_alerts.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_alerts.py>`_
'''
res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def get_notifications(self, from_ts, to_ts, state=None, resolved=None):
'''**Description**
Returns the list of Sysdig Monitor alert notifications.
**Arguments**
- **from_ts**: filter events by start time. Timestamp format is in UTC (seconds).
- **to_ts**: filter events by start time. Timestamp format is in UTC (seconds).
- **state**: filter events by alert state. Supported values are ``OK`` and ``ACTIVE``.
- **resolved**: filter events by resolution status. Supported values are ``True`` and ``False``.
**Success Return Value**
A dictionary containing the list of notifications.
**Example**
`examples/list_alert_notifications.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_alert_notifications.py>`_
'''
params = {}
if from_ts is not None:
params['from'] = from_ts * 1000000
if to_ts is not None:
params['to'] = to_ts * 1000000
if state is not None:
params['state'] = state
if resolved is not None:
params['resolved'] = resolved
res = requests.get(self.url + '/api/notifications', headers=self.hdrs, params=params, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def update_notification_resolution(self, notification, resolved):
'''**Description**
Updates the resolution status of an alert notification.
**Arguments**
- **notification**: notification object as returned by :func:`~SdcClient.get_notifications`.
- **resolved**: new resolution status. Supported values are ``True`` and ``False``.
**Success Return Value**
The updated notification.
**Example**
`examples/resolve_alert_notifications.py <https://github.com/draios/python-sdc-client/blob/master/examples/resolve_alert_notifications.py>`_
'''
if 'id' not in notification:
return [False, 'Invalid notification format']
notification['resolved'] = resolved
data = {'notification': notification}
res = requests.put(self.url + '/api/notifications/' + str(notification['id']), headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def create_alert(self, name=None, description=None, severity=None, for_atleast_s=None, condition=None,
segmentby=[], segment_condition='ANY', user_filter='', notify=None, enabled=True,
annotations={}, alert_obj=None):
'''**Description**
Create a threshold-based alert.
**Arguments**
- **name**: the alert name. This will appear in the Sysdig Monitor UI and in notification emails.
- **description**: the alert description. This will appear in the Sysdig Monitor UI and in notification emails.
- **severity**: syslog-encoded alert severity. This is a number from 0 to 7 where 0 means 'emergency' and 7 is 'debug'.
- **for_atleast_s**: the number of consecutive seconds the condition must be satisfied for the alert to fire.
- **condition**: the alert condition, as described here https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts
- **segmentby**: a list of Sysdig Monitor segmentation criteria that can be used to apply the alert to multiple entities. For example, segmenting a CPU alert by ['host.mac', 'proc.name'] allows to apply it to any process in any machine.
- **segment_condition**: When *segmentby* is specified (and therefore the alert will cover multiple entities) this field is used to determine when it will fire. In particular, you have two options for *segment_condition*: **ANY** (the alert will fire when at least one of the monitored entities satisfies the condition) and **ALL** (the alert will fire when all of the monitored entities satisfy the condition).
- **user_filter**: a boolean expression combining Sysdig Monitor segmentation criteria that makes it possible to reduce the scope of the alert. For example: *kubernetes.namespace.name='production' and container.image='nginx'*.
- **notify**: the type of notification you want this alert to generate. Options are *EMAIL*, *SNS*, *PAGER_DUTY*, *SYSDIG_DUMP*.
- **enabled**: if True, the alert will be enabled when created.
- **annotations**: an optional dictionary of custom properties that you can associate to this alert for automation or management reasons
- **alert_obj**: an optional fully-formed Alert object of the format returned in an "alerts" list by :func:`~SdcClient.get_alerts` This is an alternative to creating the Alert using the individual parameters listed above.
**Success Return Value**
A dictionary describing the just created alert, with the format described at `this link <https://app.sysdigcloud.com/apidocs/#!/Alerts/post_api_alerts>`__
**Example**
`examples/create_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_alert.py>`_
'''
#
# Get the list of alerts from the server
#
res = requests.get(self.url + '/api/alerts', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
j = res.json()
if alert_obj is None:
if None in (name, description, severity, for_atleast_s, condition):
return [False, 'Must specify a full Alert object or all parameters: name, description, severity, for_atleast_s, condition']
else:
#
# Populate the alert information
#
alert_json = {
'alert' : {
'type' : 'MANUAL',
'name' : name,
'description' : description,
'enabled' : enabled,
'severity' : severity,
'timespan' : for_atleast_s * 1000000,
'condition' : condition,
'filter': user_filter
}
}
if segmentby != None and segmentby != []:
alert_json['alert']['segmentBy'] = segmentby
alert_json['alert']['segmentCondition'] = {'type' : segment_condition}
if annotations != None and annotations != {}:
alert_json['alert']['annotations'] = annotations
if notify != None:
alert_json['alert']['notificationChannelIds'] = notify
else:
# The REST API enforces "Alert ID and version must be null", so remove them if present,
# since these would have been there in a dump from the list_alerts.py example.
alert_obj.pop('id', None)
alert_obj.pop('version', None)
alert_json = {
'alert' : alert_obj
}
#
# Create the new alert
#
res = requests.post(self.url + '/api/alerts', headers=self.hdrs, data=json.dumps(alert_json), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def update_alert(self, alert):
'''**Description**
Update a modified threshold-based alert.
**Arguments**
- **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`.
**Success Return Value**
The updated alert.
**Example**
`examples/update_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/update_alert.py>`_
'''
if 'id' not in alert:
return [False, "Invalid alert format"]
res = requests.put(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, data=json.dumps({ "alert": alert}), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def delete_alert(self, alert):
'''**Description**
Deletes an alert.
**Arguments**
- **alert**: the alert dictionary as returned by :func:`~SdcClient.get_alerts`.
**Success Return Value**
``None``.
**Example**
`examples/delete_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/delete_alert.py>`_
'''
if 'id' not in alert:
return [False, 'Invalid alert format']
res = requests.delete(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, None]
def get_explore_grouping_hierarchy(self):
'''**Description**
Return the user's current grouping hierarchy as visible in the Explore tab of Sysdig Monitor.
**Success Return Value**
A list containing the list of the user's Explore grouping criteria.
**Example**
`examples/print_explore_grouping.py <https://github.com/draios/python-sdc-client/blob/master/examples/print_explore_grouping.py>`_
'''
res = requests.get(self.url + '/api/groupConfigurations', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
data = res.json()
if 'groupConfigurations' not in data:
return [False, 'corrupted groupConfigurations API response']
gconfs = data['groupConfigurations']
for gconf in gconfs:
if gconf['id'] == 'explore':
res = []
items = gconf['groups'][0]['groupBy']
for item in items:
res.append(item['metric'])
return [True, res]
return [False, 'corrupted groupConfigurations API response, missing "explore" entry']
def set_explore_grouping_hierarchy(self, new_hierarchy):
'''**Description**
Changes the grouping hierarchy in the Explore panel of the current user.
**Arguments**
- **new_hierarchy**: a list of sysdig segmentation metrics indicating the new grouping hierarchy.
'''
body = {
'id': 'explore',
'groups': [{'groupBy':[]}]
}
for item in new_hierarchy:
body['groups'][0]['groupBy'].append({'metric': item})
res = requests.put(self.url + '/api/groupConfigurations/explore', headers=self.hdrs,
data=json.dumps(body), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
else:
| |
<reponame>MediaBrain-SJTU/GroupNet
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def make_mlp(dim_list, activation='relu', batch_norm=True, dropout=0):
layers = []
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
return nn.Sequential(*layers)
class MLP_dict_softmax(nn.Module):
def __init__(self, input_dim, output_dim, hidden_size=(1024, 512), activation='relu', discrim=False, dropout=-1,edge_types=10):
super(MLP_dict_softmax, self).__init__()
self.bottleneck_dim = edge_types
self.MLP_distribution = MLP(input_dim = input_dim, output_dim = self.bottleneck_dim, hidden_size=hidden_size)
# self.dict_layer = conv1x1(self.bottleneck_dim,output_dim)
# self.dict_layer = nn.Linear(self.bottleneck_dim,output_dim,bias=False)
self.MLP_factor = MLP(input_dim = input_dim, output_dim = 1, hidden_size=hidden_size)
self.init_MLP = MLP(input_dim = input_dim, output_dim = input_dim, hidden_size=hidden_size)
def forward(self, x):
x = self.init_MLP(x)
distribution = gumbel_softmax(self.MLP_distribution(x),tau=1/2, hard=False)
# embed = self.dict_layer(distribution)
factor = torch.sigmoid(self.MLP_factor(x))
# factor = 1
out = factor * distribution
return out, distribution
class MS_HGNN_oridinary(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0, nmp_layers=4, vis=False
):
super(MS_HGNN_oridinary, self).__init__()
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim
self.nmp_layers = nmp_layers
self.batch_norm = batch_norm
self.activation = activation
self.vis = vis
hdim_extend = 64
self.hdim_extend = hdim_extend
self.edge_types = 6
self.nmp_mlp_start = MLP_dict_softmax(input_dim = hdim_extend, output_dim = h_dim, hidden_size=(128,),edge_types=self.edge_types)
self.nmp_mlps = self.make_nmp_mlp()
self.nmp_mlp_end = MLP(input_dim = h_dim*2, output_dim = bottleneck_dim, hidden_size=(128,))
attention_mlp = []
for i in range(nmp_layers):
attention_mlp.append(MLP(input_dim=hdim_extend*2, output_dim=1, hidden_size=(32,)))
self.attention_mlp = nn.ModuleList(attention_mlp)
node2edge_start_mlp = []
for i in range(nmp_layers):
node2edge_start_mlp.append(MLP(input_dim = h_dim, output_dim = hdim_extend, hidden_size=(256,)))
self.node2edge_start_mlp = nn.ModuleList(node2edge_start_mlp)
edge_aggregation_list = []
for i in range(nmp_layers):
edge_aggregation_list.append(edge_aggregation(input_dim = h_dim, output_dim = bottleneck_dim, hidden_size=(128,),edge_types=self.edge_types))
self.edge_aggregation_list = nn.ModuleList(edge_aggregation_list)
def make_nmp_mlp(self):
nmp_mlp = []
for i in range(self.nmp_layers-1):
mlp1 = MLP(input_dim = self.h_dim*2, output_dim = self.h_dim, hidden_size=(128,))
mlp2 = MLP_dict_softmax(input_dim = self.hdim_extend, output_dim = self.h_dim, hidden_size=(128,),edge_types=self.edge_types)
nmp_mlp.append(mlp1)
nmp_mlp.append(mlp2)
nmp_mlp = nn.ModuleList(nmp_mlp)
return nmp_mlp
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def edge2node(self, x, rel_rec, rel_send, ori, idx):
# NOTE: Assumes that we have the same graph across all samples.
H = rel_rec + rel_send
incoming = self.edge_aggregation_list[idx](x,H,ori)
return incoming / incoming.size(1)
def node2edge(self, x, rel_rec, rel_send, idx):
# NOTE: Assumes that we have the same graph across all samples.
H = rel_rec + rel_send
x = self.node2edge_start_mlp[idx](x)
edge_init = torch.matmul(H,x)
node_num = x.shape[1]
edge_num = edge_init.shape[1]
x_rep = (x[:,:,None,:].transpose(2,1)).repeat(1,edge_num,1,1)
edge_rep = edge_init[:,:,None,:].repeat(1,1,node_num,1)
node_edge_cat = torch.cat((x_rep,edge_rep),dim=-1)
attention_weight = self.attention_mlp[idx](node_edge_cat)[:,:,:,0]
H_weight = attention_weight * H
H_weight = F.softmax(H_weight,dim=2)
H_weight = H_weight * H
edges = torch.matmul(H_weight,x)
return edges
def init_adj(self, num_ped, batch):
off_diag = np.ones([num_ped, num_ped])
rel_rec = np.array(encode_onehot(np.where(off_diag)[1]), dtype=np.float64)
rel_send = np.array(encode_onehot(np.where(off_diag)[0]), dtype=np.float64)
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
rel_rec = rel_rec.cuda()
rel_send = rel_send.cuda()
rel_rec = rel_rec[None,:,:].repeat(batch,1,1)
rel_send = rel_send[None,:,:].repeat(batch,1,1)
return rel_rec, rel_send
def forward(self, h_states):
batch = h_states.shape[0]
actor_num = h_states.shape[1]
curr_hidden = h_states
# Neural Message Passing
rel_rec, rel_send = self.init_adj(actor_num,batch)
# iter 1
edge_feat = self.node2edge(curr_hidden, rel_rec, rel_send,0) # [num_edge, h_dim*2]
# edge_feat = torch.cat([edge_feat, curr_rel_embedding], dim=2) # [num_edge, h_dim*2+embedding_dim]
edge_feat, factors = self.nmp_mlp_start(edge_feat) # [num_edge, h_dim]
node_feat = curr_hidden
nodetoedge_idx = 0
if self.nmp_layers <= 1:
pass
else:
for nmp_l, nmp_mlp in enumerate(self.nmp_mlps):
if nmp_l%2==0:
node_feat = nmp_mlp(self.edge2node(edge_feat, rel_rec, rel_send,node_feat,nodetoedge_idx)) # [num_ped, h_dim]
nodetoedge_idx += 1
else:
edge_feat, _ = nmp_mlp(self.node2edge(node_feat, rel_rec, rel_send,nodetoedge_idx)) # [num_ped, h_dim] -> [num_edge, 2*h_dim] -> [num_edge, h_dim]
node_feat = self.nmp_mlp_end(self.edge2node(edge_feat, rel_rec, rel_send, node_feat,nodetoedge_idx))
return node_feat, factors
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, hidden_size=(1024, 512), activation='relu', discrim=False, dropout=-1):
super(MLP, self).__init__()
dims = []
dims.append(input_dim)
dims.extend(hidden_size)
dims.append(output_dim)
self.layers = nn.ModuleList()
for i in range(len(dims)-1):
self.layers.append(nn.Linear(dims[i], dims[i+1]))
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
self.sigmoid = nn.Sigmoid() if discrim else None
self.dropout = dropout
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
if i != len(self.layers)-1:
x = self.activation(x)
if self.dropout != -1:
x = nn.Dropout(min(0.1, self.dropout/3) if i == 1 else self.dropout)(x)
elif self.sigmoid:
x = self.sigmoid(x)
return x
class MLP_dict(nn.Module):
def __init__(self, input_dim, output_dim, hidden_size=(1024, 512), activation='relu', discrim=False, dropout=-1,edge_types=10):
super(MLP_dict, self).__init__()
self.bottleneck_dim = edge_types
self.MLP_distribution = MLP(input_dim = input_dim, output_dim = self.bottleneck_dim, hidden_size=hidden_size)
# self.dict_layer = conv1x1(self.bottleneck_dim,output_dim)
# self.dict_layer = nn.Linear(self.bottleneck_dim,output_dim,bias=False)
self.MLP_factor = MLP(input_dim = input_dim, output_dim = 1, hidden_size=hidden_size)
self.init_MLP = MLP(input_dim = input_dim, output_dim = input_dim, hidden_size=hidden_size)
def forward(self, x):
x = self.init_MLP(x)
distribution = torch.abs(self.MLP_distribution(x))
return distribution, distribution
class edge_aggregation(nn.Module):
def __init__(self, input_dim, output_dim, hidden_size=(1024, 512), activation='relu', discrim=False, dropout=-1, edge_types=5):
super(edge_aggregation, self).__init__()
self.edge_types = edge_types
self.dict_dim = input_dim
self.agg_mlp = []
for i in range(edge_types):
self.agg_mlp.append(MLP(input_dim=input_dim, output_dim=input_dim, hidden_size=(128,)))
self.agg_mlp = nn.ModuleList(self.agg_mlp)
# self.embed_dict = nn.Parameter(torch.Tensor(self.edge_types, self.dict_dim))
self.mlp = MLP(input_dim=input_dim, output_dim=input_dim, hidden_size=(128,))
def forward(self,edge_distribution,H,ori):
batch = edge_distribution.shape[0]
edges = edge_distribution.shape[1]
edge_feature = torch.zeros(batch,edges,ori.shape[-1]).type_as(ori)
edges = torch.matmul(H,ori)
for i in range(self.edge_types):
edge_feature += edge_distribution[:,:,i:i+1]*self.agg_mlp[i](edges)
node_feature = torch.cat((torch.matmul(H.permute(0,2,1), edge_feature),ori),dim=-1)
return node_feature
class MS_HGNN_hyper(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0, nmp_layers=4, scale=2, vis=False, actor_number=11
):
super(MS_HGNN_hyper, self).__init__()
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim
self.nmp_layers = nmp_layers
self.batch_norm = batch_norm
self.activation = activation
self.scale = scale
self.vis = vis
mlp_pre_dim = embedding_dim + h_dim
self.vis = vis
self.spatial_embedding = nn.Linear(2, embedding_dim)
self.spatial_transform = nn.Linear(h_dim,h_dim)
hdim_extend = 64
self.hdim_extend = hdim_extend
self.edge_types = 10
self.nmp_mlp_start = MLP_dict_softmax(input_dim=hdim_extend, output_dim=h_dim, hidden_size=(128,),edge_types=self.edge_types)
self.nmp_mlps = self.make_nmp_mlp()
self.nmp_mlp_end = MLP(input_dim=h_dim*2, output_dim=bottleneck_dim, hidden_size=(128,))
attention_mlp = []
for i in range(nmp_layers):
attention_mlp.append(MLP(input_dim=hdim_extend*2, output_dim=1, hidden_size=(32,)))
self.attention_mlp = nn.ModuleList(attention_mlp)
node2edge_start_mlp = []
for i in range(nmp_layers):
node2edge_start_mlp.append(MLP(input_dim = h_dim, output_dim = hdim_extend, hidden_size=(256,)))
self.node2edge_start_mlp = nn.ModuleList(node2edge_start_mlp)
edge_aggregation_list = []
for i in range(nmp_layers):
edge_aggregation_list.append(edge_aggregation(input_dim = h_dim, output_dim = bottleneck_dim, hidden_size=(128,),edge_types=self.edge_types))
self.edge_aggregation_list = nn.ModuleList(edge_aggregation_list)
self.listall = False
if self.listall:
if scale < actor_number:
group_size = scale
all_combs = []
for i in range(actor_number):
tensor_a = torch.arange(actor_number).cuda()
tensor_a = torch.cat((tensor_a[0:i],tensor_a[i+1:]),dim=0)
padding = (1,0,0,0)
all_comb = F.pad(torch.combinations(tensor_a,r=group_size-1),padding,value=i)
all_combs.append(all_comb[None,:,:])
self.all_combs = torch.cat(all_combs,dim=0)
self.all_combs = self.all_combs.cuda()
def make_nmp_mlp(self):
nmp_mlp = []
for i in range(self.nmp_layers-1):
mlp1 = MLP(input_dim=self.h_dim*2, output_dim=self.h_dim, hidden_size=(128,))
mlp2 = MLP_dict_softmax(input_dim=self.hdim_extend, output_dim=self.h_dim, hidden_size=(128,),edge_types=self.edge_types)
nmp_mlp.append(mlp1)
nmp_mlp.append(mlp2)
nmp_mlp = nn.ModuleList(nmp_mlp)
return nmp_mlp
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def edge2node(self, x, ori, H, idx):
# NOTE: Assumes that we have the same graph across all samples.
incoming = self.edge_aggregation_list[idx](x,H,ori)
return incoming/incoming.size(1)
def node2edge(self, x, H, idx):
x = self.node2edge_start_mlp[idx](x)
edge_init = torch.matmul(H,x)
node_num = x.shape[1]
edge_num = edge_init.shape[1]
x_rep = (x[:,:,None,:].transpose(2,1)).repeat(1,edge_num,1,1)
edge_rep = edge_init[:,:,None,:].repeat(1,1,node_num,1)
node_edge_cat = torch.cat((x_rep,edge_rep),dim=-1)
attention_weight = self.attention_mlp[idx](node_edge_cat)[:,:,:,0]
H_weight = attention_weight * H
H_weight = F.softmax(H_weight,dim=2)
H_weight = H_weight * H
edges = torch.matmul(H_weight,x)
return edges
def init_adj_attention(self, feat,feat_corr, scale_factor=2):
batch = feat.shape[0]
actor_number = feat.shape[1]
if scale_factor == actor_number:
H_matrix = torch.ones(batch,1,actor_number).type_as(feat)
return H_matrix
group_size = scale_factor
if group_size < 1:
group_size = 1
_,indice = torch.topk(feat_corr,dim=2,k=group_size,largest=True)
H_matrix = torch.zeros(batch,actor_number,actor_number).type_as(feat)
H_matrix = H_matrix.scatter(2,indice,1)
return H_matrix
def init_adj_attention_listall(self, feat,feat_corr, scale_factor=2):
batch = feat.shape[0]
actor_number = feat.shape[1]
if scale_factor == actor_number:
H_matrix = torch.ones(batch,1,actor_number).type_as(feat)
return H_matrix
group_size = scale_factor
if group_size < 1:
group_size = 1
all_indice = self.all_combs.clone() #(N,C,m)
all_indice = all_indice[None,:,:,:].repeat(batch,1,1,1)
all_matrix = feat_corr[:,None,None,:,:].repeat(1,actor_number,all_indice.shape[2],1,1)
all_matrix = torch.gather(all_matrix,3,all_indice[:,:,:,:,None].repeat(1,1,1,1,actor_number))
all_matrix = torch.gather(all_matrix,4,all_indice[:,:,:,None,:].repeat(1,1,1,group_size,1))
score = torch.sum(all_matrix,dim=(3,4),keepdim=False)
_,max_idx = torch.max(score,dim=2)
indice = torch.gather(all_indice,2,max_idx[:,:,None,None].repeat(1,1,1,group_size))[:,:,0,:]
H_matrix = torch.zeros(batch,actor_number,actor_number).type_as(feat)
H_matrix = H_matrix.scatter(2,indice,1)
return H_matrix
def forward(self, h_states, corr):
curr_hidden = h_states #(num_pred, h_dim)
if self.listall:
H = self.init_adj_attention_listall(curr_hidden,corr,scale_factor=self.scale)
else:
H = self.init_adj_attention(curr_hidden,corr,scale_factor=self.scale)
edge_hidden = self.node2edge(curr_hidden, H, idx=0)
edge_feat, factor = self.nmp_mlp_start(edge_hidden)
node_feat = curr_hidden
node2edge_idx = 0
if self.nmp_layers <= 1:
pass
else:
for nmp_l, nmp_mlp in enumerate(self.nmp_mlps):
if nmp_l%2==0:
node_feat = nmp_mlp(self.edge2node(edge_feat,node_feat,H,node2edge_idx)) | |
or reservation.start_time < '14:30' < reservation.end_time:
if r1400_1430_yesterday.count(reservation) == 0:
r1400_1430_yesterday.append(reservation)
if '14:30' <= reservation.start_time < '15:00':
r1430_1500_yesterday.append(reservation)
if '14:30' < reservation.end_time <= '15:00' or reservation.start_time < '15:00' < reservation.end_time:
if r1430_1500_yesterday.count(reservation) == 0:
r1430_1500_yesterday.append(reservation)
if '15:00' <= reservation.start_time < '15:30':
r1500_1530_yesterday.append(reservation)
if '15:00' < reservation.end_time <= '15:30' or reservation.start_time < '15:30' < reservation.end_time:
if r1500_1530_yesterday.count(reservation) == 0:
r1500_1530_yesterday.append(reservation)
if '15:30' <= reservation.start_time < '16:00':
r1530_1600_yesterday.append(reservation)
if '15:30' < reservation.end_time <= '16:00' or reservation.start_time < '16:00' < reservation.end_time:
if r1530_1600_yesterday.count(reservation) == 0:
r1530_1600_yesterday.append(reservation)
if '16:00' <= reservation.start_time < '16:30':
r1600_1630_yesterday.append(reservation)
if '16:00' < reservation.end_time <= '16:30' or reservation.start_time < '16:30' < reservation.end_time:
if r1600_1630_yesterday.count(reservation) == 0:
r1600_1630_yesterday.append(reservation)
if '16:30' <= reservation.start_time < '17:00':
r1630_1700_yesterday.append(reservation)
if '16:30' < reservation.end_time <= '17:00' or reservation.start_time < '17:00' < reservation.end_time:
if r1630_1700_yesterday.count(reservation) == 0:
r1630_1700_yesterday.append(reservation)
if '17:00' <= reservation.start_time < '17:30':
r1700_1730_yesterday.append(reservation)
if '17:00' < reservation.end_time <= '17:30' or reservation.start_time < '17:30' < reservation.end_time:
if r1700_1730_yesterday.count(reservation) == 0:
r1700_1730_yesterday.append(reservation)
if '17:30' <= reservation.start_time < '18:00':
r1730_1800_yesterday.append(reservation)
if '17:30' < reservation.end_time <= '18:00' or reservation.start_time < '18:00' < reservation.end_time:
if r1730_1800_yesterday.count(reservation) == 0:
r1730_1800_yesterday.append(reservation)
if '18:00' <= reservation.start_time < '18:30':
r1800_1830_yesterday.append(reservation)
if '18:00' < reservation.end_time <= '18:30' or reservation.start_time < '18:30' < reservation.end_time:
if r1800_1830_yesterday.count(reservation) == 0:
r1800_1830_yesterday.append(reservation)
if '18:30' <= reservation.start_time < '19:00':
r1830_1900_yesterday.append(reservation)
if '18:30' < reservation.end_time <= '19:00' or reservation.start_time < '19:00' < reservation.end_time:
if r1830_1900_yesterday.count(reservation) == 0:
r1830_1900_yesterday.append(reservation)
if '19:00' <= reservation.start_time < '19:30':
r1900_1930_yesterday.append(reservation)
if '19:00' < reservation.end_time <= '19:30' or reservation.start_time < '19:30' < reservation.end_time:
if r1900_1930_yesterday.count(reservation) == 0:
r1900_1930_yesterday.append(reservation)
if '19:30' <= reservation.start_time < '20:00':
r1930_2000_yesterday.append(reservation)
if '19:30' < reservation.end_time <= '20:00' or reservation.start_time < '20:00' < reservation.end_time:
if r1930_2000_yesterday.count(reservation) == 0:
r1930_2000_yesterday.append(reservation)
if '20:00' <= reservation.start_time < '20:30':
r2000_2030_yesterday.append(reservation)
if '20:00' < reservation.end_time <= '20:30' or reservation.start_time < '20:30' < reservation.end_time:
if r2000_2030_yesterday.count(reservation) == 0:
r2000_2030_yesterday.append(reservation)
if '20:30' <= reservation.start_time < '21:00':
r2030_2100_yesterday.append(reservation)
if '20:30' < reservation.end_time <= '21:00' or reservation.start_time < '21:00' < reservation.end_time:
if r2030_2100_yesterday.count(reservation) == 0:
r2030_2100_yesterday.append(reservation)
if '21:00' <= reservation.start_time < '21:30':
r2100_2130_yesterday.append(reservation)
if '21:00' < reservation.end_time <= '21:30' or reservation.start_time < '21:30' < reservation.end_time:
if r2100_2130_yesterday.count(reservation) == 0:
r2100_2130_yesterday.append(reservation)
if '21:30' <= reservation.start_time < '22:00':
r2130_2200_yesterday.append(reservation)
if '21:30' < reservation.end_time <= '22:00' or reservation.start_time < '22:00' < reservation.end_time:
if r2130_2200_yesterday.count(reservation) == 0:
r2130_2200_yesterday.append(reservation)
if '22:00' <= reservation.start_time < '22:30':
r2200_2230_yesterday.append(reservation)
if '22:00' < reservation.end_time <= '22:30' or reservation.start_time < '22:30' < reservation.end_time:
if r2200_2230_yesterday.count(reservation) == 0:
r2200_2230_yesterday.append(reservation)
if '22:30' <= reservation.start_time < '23:00':
r2230_2300_yesterday.append(reservation)
if '22:30' < reservation.end_time <= '23:00' or reservation.start_time < '23:00' < reservation.end_time:
if r2230_2300_yesterday.count(reservation) == 0:
r2230_2300_yesterday.append(reservation)
if '23:00' <= reservation.start_time < '23:30':
r2300_2330_yesterday.append(reservation)
if '23:00' < reservation.end_time <= '23:30' or reservation.start_time < '23:30' < reservation.end_time:
if r2300_2330_yesterday.count(reservation) == 0:
r2300_2330_yesterday.append(reservation)
if '23:30' <= reservation.start_time < '00:00':
r2330_0000_yesterday.append(reservation)
if '23:30' < reservation.end_time <= '00:00' or reservation.start_time < '00:00' < reservation.end_time:
if r2330_0000_yesterday.count(reservation) == 0:
r2330_0000_yesterday.append(reservation)
dict_r0000_0030_yesterday['key2'] = r0000_0030_yesterday
dict_r0030_0100_yesterday['key2'] = r0030_0100_yesterday
dict_r0100_0130_yesterday['key2'] = r0100_0130_yesterday
dict_r0130_0200_yesterday['key2'] = r0130_0200_yesterday
dict_r0200_0230_yesterday['key2'] = r0200_0230_yesterday
dict_r0230_0300_yesterday['key2'] = r0230_0300_yesterday
dict_r0300_0330_yesterday['key2'] = r0300_0330_yesterday
dict_r0330_0400_yesterday['key2'] = r0330_0400_yesterday
dict_r0400_0430_yesterday['key2'] = r0400_0430_yesterday
dict_r0430_0500_yesterday['key2'] = r0430_0500_yesterday
dict_r0500_0530_yesterday['key2'] = r0500_0530_yesterday
dict_r0530_0600_yesterday['key2'] = r0530_0600_yesterday
dict_r0600_0630_yesterday['key2'] = r0600_0630_yesterday
dict_r0630_0700_yesterday['key2'] = r0630_0700_yesterday
dict_r0700_0730_yesterday['key2'] = r0700_0730_yesterday
dict_r0730_0800_yesterday['key2'] = r0730_0800_yesterday
dict_r0800_0830_yesterday['key2'] = r0800_0830_yesterday
dict_r0830_0900_yesterday['key2'] = r0830_0900_yesterday
dict_r0900_0930_yesterday['key2'] = r0900_0930_yesterday
dict_r0930_1000_yesterday['key2'] = r0930_1000_yesterday
dict_r1000_1030_yesterday['key2'] = r1000_1030_yesterday
dict_r1030_1100_yesterday['key2'] = r1030_1100_yesterday
dict_r1100_1130_yesterday['key2'] = r1100_1130_yesterday
dict_r1130_1200_yesterday['key2'] = r1130_1200_yesterday
dict_r1200_1230_yesterday['key2'] = r1200_1230_yesterday
dict_r1230_1300_yesterday['key2'] = r1230_1300_yesterday
dict_r1300_1330_yesterday['key2'] = r1300_1330_yesterday
dict_r1330_1400_yesterday['key2'] = r1330_1400_yesterday
dict_r1400_1430_yesterday['key2'] = r1400_1430_yesterday
dict_r1430_1500_yesterday['key2'] = r1430_1500_yesterday
dict_r1500_1530_yesterday['key2'] = r1500_1530_yesterday
dict_r1530_1600_yesterday['key2'] = r1530_1600_yesterday
dict_r1600_1630_yesterday['key2'] = r1600_1630_yesterday
dict_r1630_1700_yesterday['key2'] = r1630_1700_yesterday
dict_r1700_1730_yesterday['key2'] = r1700_1730_yesterday
dict_r1730_1800_yesterday['key2'] = r1730_1800_yesterday
dict_r1800_1830_yesterday['key2'] = r1800_1830_yesterday
dict_r1830_1900_yesterday['key2'] = r1830_1900_yesterday
dict_r1900_1930_yesterday['key2'] = r1900_1930_yesterday
dict_r1930_2000_yesterday['key2'] = r1930_2000_yesterday
dict_r2000_2030_yesterday['key2'] = r2000_2030_yesterday
dict_r2030_2100_yesterday['key2'] = r2030_2100_yesterday
dict_r2100_2130_yesterday['key2'] = r2100_2130_yesterday
dict_r2130_2200_yesterday['key2'] = r2130_2200_yesterday
dict_r2200_2230_yesterday['key2'] = r2200_2230_yesterday
dict_r2230_2300_yesterday['key2'] = r2230_2300_yesterday
dict_r2300_2330_yesterday['key2'] = r2300_2330_yesterday
dict_r2330_0000_yesterday['key2'] = r2330_0000_yesterday
dict_r0000_0030_yesterday['key3'] = len(r0000_0030_yesterday)
dict_r0030_0100_yesterday['key3'] = len(r0030_0100_yesterday)
dict_r0100_0130_yesterday['key3'] = len(r0100_0130_yesterday)
dict_r0130_0200_yesterday['key3'] = len(r0130_0200_yesterday)
dict_r0200_0230_yesterday['key3'] = len(r0200_0230_yesterday)
dict_r0230_0300_yesterday['key3'] = len(r0230_0300_yesterday)
dict_r0300_0330_yesterday['key3'] = len(r0300_0330_yesterday)
dict_r0330_0400_yesterday['key3'] = len(r0330_0400_yesterday)
dict_r0400_0430_yesterday['key3'] = len(r0400_0430_yesterday)
dict_r0430_0500_yesterday['key3'] = len(r0430_0500_yesterday)
dict_r0500_0530_yesterday['key3'] = len(r0500_0530_yesterday)
dict_r0530_0600_yesterday['key3'] = len(r0530_0600_yesterday)
dict_r0600_0630_yesterday['key3'] = len(r0600_0630_yesterday)
dict_r0630_0700_yesterday['key3'] = len(r0630_0700_yesterday)
dict_r0700_0730_yesterday['key3'] = len(r0700_0730_yesterday)
dict_r0730_0800_yesterday['key3'] = len(r0730_0800_yesterday)
dict_r0800_0830_yesterday['key3'] = len(r0800_0830_yesterday)
dict_r0830_0900_yesterday['key3'] = len(r0830_0900_yesterday)
dict_r0900_0930_yesterday['key3'] = len(r0900_0930_yesterday)
dict_r0930_1000_yesterday['key3'] = len(r0930_1000_yesterday)
dict_r1000_1030_yesterday['key3'] = len(r1000_1030_yesterday)
dict_r1030_1100_yesterday['key3'] = len(r1030_1100_yesterday)
dict_r1100_1130_yesterday['key3'] = len(r1100_1130_yesterday)
dict_r1130_1200_yesterday['key3'] = len(r1130_1200_yesterday)
dict_r1200_1230_yesterday['key3'] = len(r1200_1230_yesterday)
dict_r1230_1300_yesterday['key3'] = len(r1230_1300_yesterday)
dict_r1300_1330_yesterday['key3'] = len(r1300_1330_yesterday)
dict_r1330_1400_yesterday['key3'] = len(r1330_1400_yesterday)
dict_r1400_1430_yesterday['key3'] = len(r1400_1430_yesterday)
dict_r1430_1500_yesterday['key3'] = len(r1430_1500_yesterday)
dict_r1500_1530_yesterday['key3'] = len(r1500_1530_yesterday)
dict_r1530_1600_yesterday['key3'] = len(r1530_1600_yesterday)
dict_r1600_1630_yesterday['key3'] = len(r1600_1630_yesterday)
dict_r1630_1700_yesterday['key3'] = len(r1630_1700_yesterday)
dict_r1700_1730_yesterday['key3'] = len(r1700_1730_yesterday)
dict_r1730_1800_yesterday['key3'] = len(r1730_1800_yesterday)
dict_r1800_1830_yesterday['key3'] = len(r1800_1830_yesterday)
dict_r1830_1900_yesterday['key3'] = len(r1830_1900_yesterday)
dict_r1900_1930_yesterday['key3'] = len(r1900_1930_yesterday)
dict_r1930_2000_yesterday['key3'] = len(r1930_2000_yesterday)
dict_r2000_2030_yesterday['key3'] = len(r2000_2030_yesterday)
dict_r2030_2100_yesterday['key3'] = len(r2030_2100_yesterday)
dict_r2100_2130_yesterday['key3'] = len(r2100_2130_yesterday)
dict_r2130_2200_yesterday['key3'] = len(r2130_2200_yesterday)
dict_r2200_2230_yesterday['key3'] = len(r2200_2230_yesterday)
dict_r2230_2300_yesterday['key3'] = len(r2230_2300_yesterday)
dict_r2300_2330_yesterday['key3'] = len(r2300_2330_yesterday)
dict_r2330_0000_yesterday['key3'] = len(r2330_0000_yesterday)
dict_r0000_0030_yesterday['key4'] = '00:00 - 00:30'
dict_r0030_0100_yesterday['key4'] = '00:30 - 01:00'
dict_r0100_0130_yesterday['key4'] = '01:00 - 01:30'
dict_r0130_0200_yesterday['key4'] = '01:30 - 02:00'
dict_r0200_0230_yesterday['key4'] = '02:00 - 02:30'
dict_r0230_0300_yesterday['key4'] = '02:30 - 03:00'
dict_r0300_0330_yesterday['key4'] = '03:00 - 03:30'
dict_r0330_0400_yesterday['key4'] = '03:30 - 04:00'
dict_r0400_0430_yesterday['key4'] = '04:00 - 04:30'
dict_r0430_0500_yesterday['key4'] = '04:30 - 05:00'
dict_r0500_0530_yesterday['key4'] = '05:00 - 05:30'
dict_r0530_0600_yesterday['key4'] = '05:30 - 06:00'
dict_r0600_0630_yesterday['key4'] = '06:00 - 06:30'
dict_r0630_0700_yesterday['key4'] = '06:30 - 07:00'
dict_r0700_0730_yesterday['key4'] = '07:00 - 07:30'
dict_r0730_0800_yesterday['key4'] = '07:30 - 08:00'
dict_r0800_0830_yesterday['key4'] = '08:00 - 08:30'
dict_r0830_0900_yesterday['key4'] = '08:30 - 09:00'
dict_r0900_0930_yesterday['key4'] = '09:00 - 09:30'
dict_r0930_1000_yesterday['key4'] = '09:30 - 10:00'
dict_r1000_1030_yesterday['key4'] = '10:00 - 10:30'
dict_r1030_1100_yesterday['key4'] = '10:30 - 11:00'
dict_r1100_1130_yesterday['key4'] = '11:00 - 11:30'
dict_r1130_1200_yesterday['key4'] = '11:30 - 12:00'
dict_r1200_1230_yesterday['key4'] = '12:00 - 12:30'
dict_r1230_1300_yesterday['key4'] = '12:30 - 13:00'
dict_r1300_1330_yesterday['key4'] = '13:00 - 13:30'
dict_r1330_1400_yesterday['key4'] = '13:30 - 14:00'
dict_r1400_1430_yesterday['key4'] = '14:00 - 14:30'
dict_r1430_1500_yesterday['key4'] = '14:30 - 15:00'
dict_r1500_1530_yesterday['key4'] = '15:00 - 15:30'
dict_r1530_1600_yesterday['key4'] = '15:30 - 16:00'
dict_r1600_1630_yesterday['key4'] = '16:00 - 16:30'
dict_r1630_1700_yesterday['key4'] = '16:30 - 17:00'
dict_r1700_1730_yesterday['key4'] = '17:00 - 17:30'
dict_r1730_1800_yesterday['key4'] = '17:30 - 18:00'
dict_r1800_1830_yesterday['key4'] = '18:00 - 18:30'
dict_r1830_1900_yesterday['key4'] = '18:30 - 19:00'
dict_r1900_1930_yesterday['key4'] = '19:00 - 19:30'
dict_r1930_2000_yesterday['key4'] = '19:30 - 20:00'
dict_r2000_2030_yesterday['key4'] = '20:00 - 20:30'
dict_r2030_2100_yesterday['key4'] = '20:30 - 21:00'
dict_r2100_2130_yesterday['key4'] = '21:00 - 21:30'
dict_r2130_2200_yesterday['key4'] = '21:30 - 22:00'
dict_r2200_2230_yesterday['key4'] = '22:00 - 22:30'
dict_r2230_2300_yesterday['key4'] = '22:30 - 23:00'
dict_r2300_2330_yesterday['key4'] = '23:00 - 23:30'
dict_r2330_0000_yesterday['key4'] = '23:30 - 00:00'
all_reservations_yesterday.append(dict_r0000_0030_yesterday)
all_reservations_yesterday.append(dict_r0030_0100_yesterday)
all_reservations_yesterday.append(dict_r0100_0130_yesterday)
all_reservations_yesterday.append(dict_r0130_0200_yesterday)
all_reservations_yesterday.append(dict_r0200_0230_yesterday)
all_reservations_yesterday.append(dict_r0230_0300_yesterday)
all_reservations_yesterday.append(dict_r0300_0330_yesterday)
all_reservations_yesterday.append(dict_r0330_0400_yesterday)
all_reservations_yesterday.append(dict_r0400_0430_yesterday)
all_reservations_yesterday.append(dict_r0430_0500_yesterday)
all_reservations_yesterday.append(dict_r0500_0530_yesterday)
all_reservations_yesterday.append(dict_r0530_0600_yesterday)
all_reservations_yesterday.append(dict_r0600_0630_yesterday)
all_reservations_yesterday.append(dict_r0630_0700_yesterday)
all_reservations_yesterday.append(dict_r0700_0730_yesterday)
all_reservations_yesterday.append(dict_r0730_0800_yesterday)
all_reservations_yesterday.append(dict_r0800_0830_yesterday)
all_reservations_yesterday.append(dict_r0830_0900_yesterday)
all_reservations_yesterday.append(dict_r0900_0930_yesterday)
all_reservations_yesterday.append(dict_r0930_1000_yesterday)
all_reservations_yesterday.append(dict_r1000_1030_yesterday)
all_reservations_yesterday.append(dict_r1030_1100_yesterday)
all_reservations_yesterday.append(dict_r1100_1130_yesterday)
all_reservations_yesterday.append(dict_r1130_1200_yesterday)
all_reservations_yesterday.append(dict_r1200_1230_yesterday)
all_reservations_yesterday.append(dict_r1230_1300_yesterday)
all_reservations_yesterday.append(dict_r1300_1330_yesterday)
all_reservations_yesterday.append(dict_r1330_1400_yesterday)
all_reservations_yesterday.append(dict_r1400_1430_yesterday)
all_reservations_yesterday.append(dict_r1430_1500_yesterday)
all_reservations_yesterday.append(dict_r1500_1530_yesterday)
all_reservations_yesterday.append(dict_r1530_1600_yesterday)
all_reservations_yesterday.append(dict_r1600_1630_yesterday)
all_reservations_yesterday.append(dict_r1630_1700_yesterday)
all_reservations_yesterday.append(dict_r1700_1730_yesterday)
all_reservations_yesterday.append(dict_r1730_1800_yesterday)
all_reservations_yesterday.append(dict_r1800_1830_yesterday)
all_reservations_yesterday.append(dict_r1830_1900_yesterday)
all_reservations_yesterday.append(dict_r1900_1930_yesterday)
all_reservations_yesterday.append(dict_r1930_2000_yesterday)
all_reservations_yesterday.append(dict_r2000_2030_yesterday)
all_reservations_yesterday.append(dict_r2030_2100_yesterday)
all_reservations_yesterday.append(dict_r2100_2130_yesterday)
all_reservations_yesterday.append(dict_r2130_2200_yesterday)
all_reservations_yesterday.append(dict_r2200_2230_yesterday)
all_reservations_yesterday.append(dict_r2230_2300_yesterday)
all_reservations_yesterday.append(dict_r2300_2330_yesterday)
all_reservations_yesterday.append(dict_r2330_0000_yesterday)
context['all_reservations_yesterday'] = all_reservations_yesterday
for reservation in reservations_tomorrow:
if '00:00' <= reservation.start_time < '00:30':
r0000_0030_tomorrow.append(reservation)
if '00:00' < reservation.end_time <= '00:30' or reservation.start_time < '00:30' < reservation.end_time:
if r0000_0030_tomorrow.count(reservation) == 0:
r0000_0030_tomorrow.append(reservation)
if '00:30' <= reservation.start_time < '01:00':
r0030_0100_tomorrow.append(reservation)
if '00:30' < reservation.end_time <= '01:00' or reservation.start_time < '01:00' < reservation.end_time:
if r0030_0100_tomorrow.count(reservation) == 0:
r0030_0100_tomorrow.append(reservation)
if '01:00' <= reservation.start_time < '01:30':
r0100_0130_tomorrow.append(reservation)
if '01:00' < reservation.end_time <= '01:30' or reservation.start_time < '01:30' < reservation.end_time:
if r0100_0130_tomorrow.count(reservation) == 0:
r0100_0130_tomorrow.append(reservation)
if '01:30' <= reservation.start_time < '02:00':
r0130_0200_tomorrow.append(reservation)
if '01:30' < reservation.end_time <= '02:00' or reservation.start_time < '02:00' < reservation.end_time:
if r0130_0200_tomorrow.count(reservation) == 0:
r0130_0200_tomorrow.append(reservation)
if '02:00' <= reservation.start_time < '02:30':
r0200_0230_tomorrow.append(reservation)
if '02:00' < reservation.end_time <= '02:30' or reservation.start_time < '02:30' < reservation.end_time:
if r0200_0230_tomorrow.count(reservation) == 0:
r0200_0230_tomorrow.append(reservation)
if '02:30' <= reservation.start_time < '03:00':
r0230_0300_tomorrow.append(reservation)
if '02:30' < reservation.end_time <= '03:00' or reservation.start_time < '03:00' < reservation.end_time:
if r0230_0300_tomorrow.count(reservation) == 0:
r0230_0300_tomorrow.append(reservation)
if '03:00' <= reservation.start_time < '03:30':
r0300_0330_tomorrow.append(reservation)
if '03:00' < reservation.end_time <= '03:30' or reservation.start_time < '03:30' < reservation.end_time:
if r0300_0330_tomorrow.count(reservation) == 0:
r0300_0330_tomorrow.append(reservation)
if '03:30' <= reservation.start_time < '04:00':
r0330_0400_tomorrow.append(reservation)
if '03:30' < reservation.end_time <= '04:00' or reservation.start_time < '04:00' < reservation.end_time:
if r0330_0400_tomorrow.count(reservation) == 0:
r0330_0400_tomorrow.append(reservation)
if '04:00' <= reservation.start_time < '04:30':
r0400_0430_tomorrow.append(reservation)
if '04:00' < reservation.end_time <= '04:30' or reservation.start_time < '04:30' < reservation.end_time:
if r0400_0430_tomorrow.count(reservation) == 0:
r0400_0430_tomorrow.append(reservation)
if '04:30' <= reservation.start_time < '05:00':
r0430_0500_tomorrow.append(reservation)
if '04:30' < reservation.end_time <= '05:00' or | |
X2=x2, X3=x3, *X4, **X5): X1, X5, x6, X9
def F2(X7=x7): X7
def F3(a1): a1
def F4(): a1
''').lstrip())
db = ImportDB("from m2 import x1, x2, x3, x4, x5, x6, x7, a1, a2")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m1 import X9
from m2 import a1, x2, x3, x6, x7
def F1(X1, X2=x2, X3=x3, *X4, **X5): X1, X5, x6, X9
def F2(X7=x7): X7
def F3(a1): a1
def F4(): a1
''').lstrip())
assert output == expected
def test_fix_missing_imports_funcall_1():
input = PythonBlock(dedent('''
def F1(x1): x1, x2, y1
''').lstrip())
db = ImportDB("from m2 import x1, x2, x3, x4")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m2 import x2
''').lstrip() + str(input))
assert output == expected
def test_fix_missing_imports_funcall_and_really_missing_1():
input = PythonBlock(dedent('''
def F1(x1, x2, x3): x1, x2, x3, x4, y1
x1
''').lstrip())
db = ImportDB("from m2 import x1, x2, x3, x4, x5")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m2 import x1, x4
''').lstrip() + str(input))
assert output == expected
def test_fix_unused_imports_local_1():
input = PythonBlock(dedent('''
from m1 import X1, X2
def F1():
X1 = 5
X1, X2
''').lstrip())
db = ImportDB("")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m1 import X2
def F1():
X1 = 5
X1, X2
''').lstrip())
assert output == expected
def test_fix_missing_imports_local_1():
input = PythonBlock(dedent('''
def F1():
x1 = 5
x1, x2
''').lstrip())
db = ImportDB("from m2 import x1, x2, x3")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m2 import x2
def F1():
x1 = 5
x1, x2
''').lstrip())
assert output == expected
def test_fix_missing_imports_nonlocal_post_store_1():
input = PythonBlock(dedent('''
def F1():
x1 = None
def F2():
x1, x2, x3, y1
x2 = None
''').lstrip())
db = ImportDB("from m2 import x1, x2, x3, x4, x5")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m2 import x3
''').lstrip() + str(input))
assert output == expected
def test_fix_missing_imports_nonlocal_post_del_1():
"""
Calling F2 after the deletion of x2 in the enclosing scope make no sens, it
would trigger a:
NameError: free variable 'x2' referenced before assignment in enclosing scope
Regardless as to whether x2 is defines at module scope or not.
Therefore we do not expect x2 to be imported
"""
input = PythonBlock(dedent('''
def F1():
x1 = x2 = None
def F2():
x1, x2, x3, y1
del x2
''').lstrip())
db = ImportDB("from m2 import x1, x2, x3, x4")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m2 import x3
''').lstrip() + str(input))
assert output == expected
def test_fix_unused_and_missing_imports_IfExp_1():
input = PythonBlock(dedent('''
x if y else z
''').lstrip())
db = ImportDB("from m1 import w, x, y, z")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m1 import x, y, z
x if y else z
''').lstrip())
assert output == expected
def test_fix_unused_and_missing_imports_ClassDef_1():
input = PythonBlock(dedent('''
@x1
class x2(x3(x4), x5):
@x6
def x7(x8=x9): x10, x8
def x11(): x11
''').lstrip())
db = ImportDB("from m1 import x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from m1 import x1, x10, x11, x3, x4, x5, x6, x9
@x1
class x2(x3(x4), x5):
@x6
def x7(x8=x9): x10, x8
def x11(): x11
''').lstrip())
assert output == expected
def test_fix_missing_imports_in_non_method():
"""
unlike test_fix_unused_and_missing_imports_ClassDef_1
Free standing functions can refer to themselves.
Currently this will not work for closure defined in
methods, as we'll see a class scope.
See https://github.com/deshaw/pyflyby/issues/179
"""
input = PythonBlock(
dedent(
"""
def selfref():
selfref.execute = True
"""
).lstrip()
)
db = ImportDB("")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(
dedent(
"""
def selfref():
selfref.execute = True
"""
).lstrip()
)
assert output == expected
def test_fix_unused_and_missing_continutation_1():
input = PythonBlock(dedent(r'''
a#\
b + '\
c#' + d
''').lstrip())
db = ImportDB("from m1 import a, b, c, d")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent(r'''
from m1 import a, b, d
a#\
b + '\
c#' + d
''').lstrip())
assert output == expected
def test_fix_unused_import_future_is_not_unused_1():
input = PythonBlock(dedent(r'''
from __future__ import division
''').lstrip())
db = ImportDB("")
output = fix_unused_and_missing_imports(input, db=db)
assert output == input
def test_fix_unused_and_missing_print_function_1():
input = PythonBlock(dedent(r'''
from __future__ import print_function
from m1 import print, m1a
from m2.print import m2a, m2b
m1a, m2a, m3a
''').lstrip())
db = ImportDB(
"from __future__ import print_function\n"
"from print.m3 import m3a, m3b")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent(r'''
from __future__ import print_function
from m1 import m1a
from m2.print import m2a
from print.m3 import m3a
m1a, m2a, m3a
''').lstrip())
assert output == expected
def test_last_line_no_trailing_newline_1():
input = PythonBlock("#x\ny")
db = ImportDB("from Y import y")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
#x
from Y import y
y''').lstrip())
assert output == expected
def test_last_line_comment_no_trailing_newline_1():
input = PythonBlock("y\n#x")
db = ImportDB("from Y import y")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from Y import y
y
#x''').lstrip())
assert output == expected
def test_last_line_multistring_no_trailing_newline_1():
input = PythonBlock(dedent('''
"""
#x""", y # comment ''').lstrip())
db = ImportDB("from Y import y")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from Y import y
"""
#x""", y # comment ''').lstrip())
assert output == expected
def test_last_line_escaped_string_no_trailing_newline_1():
input = PythonBlock(dedent('''
"\
#x", y # comment ''').lstrip())
db = ImportDB("from Y import y")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from Y import y
"\
#x", y # comment ''').lstrip())
assert output == expected
def test_remove_broken_imports_1():
input = PythonBlock(dedent('''
import sys, os, omgdoesntexist_95421787, keyword
from email.mime.audio import MIMEAudio, omgdoesntexist_8824165
code()
''').lstrip(), filename="/foo/test_remove_broken_imports_1.py")
output = remove_broken_imports(input)
expected = PythonBlock(dedent('''
import keyword
import os
import sys
from email.mime.audio import MIMEAudio
code()
''').lstrip(), filename="/foo/test_remove_broken_imports_1.py")
assert output == expected
def test_replace_star_imports_1():
m = types.ModuleType("fake_test_module_345489")
m.__all__ = ['f1', 'f2', 'f3', 'f4', 'f5']
sys.modules["fake_test_module_345489"] = m
input = PythonBlock(dedent('''
from mod1 import f1
from fake_test_module_345489 import *
from mod2 import f5
''').lstrip(), filename="/foo/test_replace_star_imports_1.py")
output = replace_star_imports(input)
expected = PythonBlock(dedent('''
from fake_test_module_345489 import f1, f2, f3, f4
from mod2 import f5
''').lstrip(), filename="/foo/test_replace_star_imports_1.py")
assert output == expected
def test_replace_star_imports_relative_1():
# Not implemented (semi-intentionally), but at least don't crash.
input = PythonBlock(dedent('''
from .x import *
''').lstrip(), filename="/foo/test_replace_star_imports_relative_1.py")
output = replace_star_imports(input)
expected = PythonBlock(dedent('''
from .x import *
''').lstrip(), filename="/foo/test_replace_star_imports_relative_1.py")
assert output == expected
def test_replace_star_imports_unknown_module_1():
input = PythonBlock(dedent('''
from omgnonexistentmodule75085477 import *
''').lstrip())
output = replace_star_imports(input)
expected = PythonBlock(dedent('''
from omgnonexistentmodule75085477 import *
''').lstrip())
assert output == expected
def test_transform_imports_1():
input = PythonBlock(dedent('''
from m import x
from m import x as X
import m.x
print(m.x, m.xx)
''').lstrip(), filename="/foo/test_transform_imports_1.py")
output = transform_imports(input, {"m.x": "m.y.z"})
expected = PythonBlock(dedent('''
import m.y.z
from m.y import z as X, z as x
print(m.y.z, m.xx)
''').lstrip(), filename="/foo/test_transform_imports_1.py")
assert output == expected
def test_canonicalize_imports_1():
input = PythonBlock(dedent('''
from m import x
from m import x as X
import m.x
print(m.x, m.xx)
''').lstrip(), filename="/foo/test_transform_imports_1.py")
db = ImportDB("""
__canonical_imports__ = {"m.x": "m.y.z"}
""")
output = canonicalize_imports(input, db=db)
expected = PythonBlock(dedent('''
import m.y.z
from m.y import z as X, z as x
print(m.y.z, m.xx)
''').lstrip(), filename="/foo/test_transform_imports_1.py")
assert output == expected
@pytest.mark.skipif(
PY2,
reason="Python 3-only syntax.")
def test_canonicalize_imports_f_string_1():
input = PythonBlock(dedent('''
a = 1
print(f"{a:2d}")
''').lstrip(), filename="/foo/test_canonicalize_imports_f_string_1.py")
db = ImportDB("""
""")
output = canonicalize_imports(input, db=db)
expected = PythonBlock(dedent('''
a = 1
print(f"{a:2d}")
''').lstrip(), filename="/foo/test_canonicalize_imports_f_string_1.py")
assert output == expected
def test_empty_file_1():
input = PythonBlock('', filename="/foo/test_empty_file_1.py")
db = ImportDB("")
output = canonicalize_imports(input, db=db)
expected = PythonBlock('', filename="/foo/test_empty_file_1.py")
assert output == expected
def test_empty_file_mandatory_1():
input = PythonBlock('', filename="/foo/test_empty_file_mandatory_1.py")
db = ImportDB("__mandatory_imports__ = ['from aa import cc,bb']")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock('from aa import bb, cc\n\n',
filename="/foo/test_empty_file_mandatory_1.py")
assert output == expected
def test_future_flags_1():
input = PythonBlock(dedent('''
from __future__ import print_function
print("", file=sys.stdout)
''').lstrip())
db = ImportDB("import os, sys")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from __future__ import print_function
import sys
print("", file=sys.stdout)
''').lstrip())
assert output == expected
def test_with_1():
input = PythonBlock(dedent('''
with closing(open("/etc/passwd")) as f:
pass
''').lstrip())
db = ImportDB("from contextlib import closing")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from contextlib import closing
with closing(open("/etc/passwd")) as f:
pass
''').lstrip())
assert expected == output
@pytest.mark.skipif(
sys.version_info < (2,7),
reason="Old Python doesn't support multiple context managers")
def test_with_multi_1():
input = PythonBlock(dedent('''
with aa as xx , bb as yy, cc as zz:
pass
''').lstrip())
db = ImportDB("from M import aa, bb, cc, dd, xx, yy, zz")
output = fix_unused_and_missing_imports(input, db=db)
expected = PythonBlock(dedent('''
from M import aa, bb, cc
with aa as xx | |
<reponame>ivanmm25/FLYCOPtools
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 11:52:44 2021
@author: <NAME>
"""
"""
DESCRIPTION (see detailed description in each plotting function)
Plotting script for Input & Output Analysis in:
- InputParametersAnalysis.py
- OutputParametersAnalysis.py
- Others (...)
NOTE THAT:
xxx
"""
import matplotlib.pyplot as plt
import seaborn as sns
# =============================================================================
# 2 subplots with the SAME x, y labels
# First subplot: the axes comprise the whole extension of the data (x, y dimensions)
# Second subplot: y-axis with UPPER limitation, 'subset_ylim', to amplify the scale
# =============================================================================
def two_subplots_subsetylim(x_label, y_label, DataFrame, subset_ylim, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(DataFrame[x_label], DataFrame[y_label], '^g')
# Second subplot: subset to amplify y-axis scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label)
subset_ratiosDataframe = DataFrame[DataFrame[y_label] < subset_ylim]
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# 2 subplots with the SAME x, y labels
# First subplot: the axes comprise the whole extension of the data (x, y dimensions)
# Second subplot: x-axis with UPPER limitation, 'subset_xlim', to amplify the scale
# =============================================================================
def two_subplots_subsetxlim(x_label, y_label, DataFrame, subset_xlim, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(DataFrame[x_label], DataFrame[y_label], '^g')
# Second subplot: subset to amplify x-axis scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label)
subset_ratiosDataframe = DataFrame[DataFrame[x_label] < subset_xlim]
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# 2 subplots with the SAME x, y labels
# First subplot: the axes comprise the whole extension of the data (x, y dimensions)
# Second subplot: x-axis with LOWER limitation, 'subset_xlim', to amplify the scale
# =============================================================================
def two_subplots_subset_x_lowerlim(x_label, y_label, DataFrame, subset_xlim, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(DataFrame[x_label], DataFrame[y_label], '^g')
# Second subplot: subset to amplify x-axis scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label)
subset_ratiosDataframe = DataFrame[DataFrame[x_label] > subset_xlim]
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# 2 subplots with the SAME x, y labels
# First subplot: the axes comprise the whole extension of the data (x, y dimensions)
# Second subplot: x-axis and y-axis with UPPER limitation, 'subset_xlim' & 'subset_ylim',
# to amplify the scale
# =============================================================================
def two_subplots_subsetlims(x_label, y_label, DataFrame, subset_xlim, subset_ylim, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(DataFrame[x_label], DataFrame[y_label], '^g')
# Second subplot: subset to amplify x,y-axes scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label)
subset_ratiosDataframe = DataFrame[DataFrame[x_label] < subset_xlim]
subset_ratiosDataframe = subset_ratiosDataframe[subset_ratiosDataframe[y_label] < subset_ylim]
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# 2 subplots with the TWO DIFFERENT y labels (same x label)
# First subplot: x_label, y_label1
# Second subplot: x_label, y_label2
# =============================================================================
def two_plots_twolabels(x_label, y_label1, y_label2, DataFrame, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label1)
plt.plot(DataFrame[x_label], DataFrame[y_label1], '^g')
# Second subplot: subset to amplify y-axis scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label2)
plt.plot(DataFrame[x_label], DataFrame[y_label2], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# 2 subplots with the TWO DIFFERENT y labels (same x label)
# x-axis with UPPER limitation, 'xlim', to amplify the scale
# First subplot: x_label, y_label1
# Second subplot: x_label, y_label2
# =============================================================================
def two_plots_twolabels_xlim(x_label, y_label1, y_label2, DataFrame, xlim, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
subset_ratiosDataframe = DataFrame[DataFrame[x_label] < xlim]
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label1)
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label1], '^g')
# Second subplot: subset to amplify y-axis scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label2)
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label2], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# 2 subplots with the TWO DIFFERENT y labels (same x label)
# x-axis with LOWER limitation, 'xlim', to amplify the scale
# First subplot: x_label, y_label1
# Second subplot: x_label, y_label2
# =============================================================================
def two_plots_twolabels_x_lowerlim(x_label, y_label1, y_label2, DataFrame, xlim, name_image, plot_title):
fig1 = plt.figure(num=0, clear=True, figsize=(7, 7))
subset_ratiosDataframe = DataFrame[DataFrame[x_label] > xlim]
plt.title(plot_title, fontsize = 14)
# First subplot
plt.subplot(211)
plt.xlabel(x_label)
plt.ylabel(y_label1)
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label1], '^g')
# Second subplot: subset to amplify y-axis scale
plt.subplot(212)
plt.xlabel(x_label)
plt.ylabel(y_label2)
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label2], '^c')
fig1.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig1)
# =============================================================================
# One plot (x_label, y_label)
# =============================================================================
def one_plot(x_label, y_label, DataFrame, name_image, plot_title):
fig2 = plt.figure(num=0, clear=True, figsize=(7, 7))
# One Plot
plt.subplot(111)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(DataFrame[x_label], DataFrame[y_label], '^c')
plt.title(plot_title, fontsize = 14)
fig2.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig2)
# =============================================================================
# One plot (x_label, y_label)
# x-axis with UPPER limitation, 'xlim', to amplify the scale
# =============================================================================
def one_plot_xlim(x_label, y_label, DataFrame, xlim, name_image, plot_title):
fig2 = plt.figure(num=0, clear=True, figsize=(7, 7))
subset_ratiosDataframe = DataFrame[DataFrame[x_label] < xlim]
# One Plot
plt.subplot(111)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label], '^c')
plt.title(plot_title, fontsize = 14)
fig2.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig2)
# =============================================================================
# One plot (x_label, y_label)
# x-axis with LOWER limitation, 'xlim', to amplify the scale
# =============================================================================
def one_plot_x_lowerlim(x_label, y_label, DataFrame, xlim, name_image, plot_title):
fig2 = plt.figure(num=0, clear=True, figsize=(7, 7))
subset_ratiosDataframe = DataFrame[DataFrame[x_label] > xlim]
# One Plot
plt.subplot(111)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(subset_ratiosDataframe[x_label], subset_ratiosDataframe[y_label], '^c')
plt.title(plot_title, fontsize = 14)
fig2.savefig(name_image+".png") # If it is desired to save the figure
plt.close(fig2)
# =============================================================================
# Basic BoxPlot (x_var, y_var)
# Default whiskers: 1.5*(IQR)
# =============================================================================
def basic_boxplot(dataframe, x_var, y_var, x_label, y_label, filename, plot_title):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
ax_boxplot = sns.boxplot(x = x_var, y = y_var, data = dataframe)
ax_boxplot.set(xlabel = x_label, ylabel = y_label)
plt.title(plot_title, fontsize = 14)
fig.savefig(filename+".png")
plt.close(fig)
# =============================================================================
# Basic BoxPlot (x_var, y_var)
# Default whiskers: 1.5*(IQR)
# 'ylims': (lower, upper) to limit y-axis scale
# Note that 'ylims' should be a tuple: ylims[0]: lower limit; ylims[1]: upper limit
# =============================================================================
def basic_boxplot_ylims(dataframe, x_var, y_var, x_label, y_label, filename, ylims, plot_title):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
plt.ylim(ylims[0], ylims[1])
ax_boxplot = sns.boxplot(x = x_var, y = y_var, data = dataframe)
ax_boxplot.set(xlabel = x_label, ylabel = y_label)
plt.title(plot_title, fontsize = 14)
fig.savefig(filename+".png")
plt.close(fig)
# =============================================================================
# Basic ScatterPlot (x_col, y_col)
# =============================================================================
def basic_scatter(dataframe, x_col, y_col, x_label, y_label, filename, plot_title):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
ax_cat = sns.stripplot(x=x_col, y=y_col, jitter = True, data = dataframe)
ax_cat.set(xlabel=x_label, ylabel=y_label)
plt.title(plot_title, fontsize = 14)
# plt.xticks(rotation=20)
fig.savefig(filename+".png")
plt.close(fig)
# =============================================================================
# Basic ScatterPlot (x_col, y_col)
# y-axis with UPPER limitation, 'ylim', to amplify the scale
# =============================================================================
def basic_scatter_ylim(dataframe, x_col, y_col, x_label, y_label, ylim, filename, plot_title):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
ax_cat = sns.stripplot(x=x_col, y=y_col, jitter = True, data = dataframe)
ax_cat.set(xlabel=x_label, ylabel=y_label)
plt.ylim(0, ylim)
plt.title(plot_title, fontsize = 14)
fig.savefig(filename+".png")
plt.close(fig)
# =============================================================================
# BoxPlot + ScatterPlot (x_var, y_var)
# Default whiskers: 1.5*(IQR)
# =============================================================================
# reverse_colum: if the dataframe series to be plotted have to be read from the bottom
# to the top, instead of the other way around
def basic_boxplot_scatter(dataframe, x_var, y_var, x_label, y_label, filename, plot_title, reverse_dataframe = False):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
if reverse_dataframe:
ax_boxplot = sns.boxplot(x = x_var, y = y_var, data = dataframe[::-1], boxprops=dict(alpha=0.2))
sns.stripplot(x=x_var, y=y_var, jitter = True, data = dataframe[::-1])
else:
ax_boxplot = sns.boxplot(x = x_var, y = y_var, data = dataframe, boxprops=dict(alpha=0.2))
sns.stripplot(x=x_var, y=y_var, jitter = True, data = dataframe)
ax_boxplot.set(xlabel = x_label, ylabel = y_label)
plt.title(plot_title, fontsize = 14)
# plt.xticks(rotation=20)
fig.savefig(filename+".png")
plt.close(fig)
# =============================================================================
# BoxPlot + ScatterPlot (x_var, y_var)
# Default whiskers: 1.5*(IQR)
# Upper y-limit:
# =============================================================================
def basic_boxplot_scatter_upper_ylim(dataframe, x_var, y_var, x_label, y_label, filename, plot_title, upper_ylim):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
ax_boxplot = sns.boxplot(x = x_var, y = y_var, data = dataframe, boxprops=dict(alpha=0.2))
sns.stripplot(x=x_var, y=y_var, jitter = | |
thread responsible for calling this
self.start()
def run(self):
logger = logging.getLogger(__name__+".RetrieveTwitterDatasetThread.run")
logger.info("Starting")
status_flag = True
dataset_source = "Twitter"
retrieval_details = {
'query': self.query,
'start_date': self.start_date,
'end_date': self.end_date,
}
data = {}
dataset = None
error_msg = ""
# tweepy auth
#TODO: user-level auth
consumer_key = self.consumer_key
consumer_secret = self.consumer_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
api = tweepy.API(auth) #TODO: create auth object in dialog and just pass in auth object
if self.dataset_type == "tweet":
#TODO: only update if called with twitter api flag? otherwise just import instead (like with reddit)
if True: # twitter_api_flag
try:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_TWITTER_DOWNLOADING_TWEETS_STEP}))
self.UpdateDataFiles(auth, self.dataset_name, self.query, self.start_date, self.end_date, "TW_") #TODO: TW == twitter, maybe TD? Twitter Document?
except RuntimeError:
status_flag = False
error_msg = GUIText.RETRIEVAL_FAILED_ERROR
if status_flag:
# wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_BEGINNING_MSG}))
#TODO: get data from files?
# tweets = tweepy.Cursor(api.search, self.query).items(10)
# wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_BUSY_PREPARING_TWITTER_MSG}))
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_TWITTER_IMPORTING_TWEET_STEP}))
tweets = self.ImportDataFiles(self.dataset_name, self.query, self.start_date, self.end_date, "TW_")
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_TWITTER_BUSY_PREPARING_DATA_STEP}))
tweets_data = {}
for tweet in tweets:
key = ("Twitter", "tweet", tweet['id'])
tweets_data[key] = {}
tweets_data[key]['data_source'] = "Twitter"
tweets_data[key]['data_type'] = "tweet"
tweets_data[key]['id'] = tweet['id']
tweets_data[key]["url"] = "https://twitter.com/" + tweet['user']['screen_name'] + "/status/" + tweet['id_str']
tweets_data[key]['created_utc'] = tweet['created_utc']
#TODO: is 'title' needed if tweets don't have titles?
if 'title' in tweet:
tweets_data[key]['title'] = tweet['title']
else:
tweets_data[key]['title'] = ""
if 'text' in tweet:
tweets_data[key]['text'] = [tweet['text']]
else:
tweets_data[key]['text'] = [""]
# tweet always has shortened 'text', but we should use 'full_text' if possible
status = None
try:
status_attempt = api.get_status(tweet['id'], tweet_mode="extended")
status = status_attempt
except tweepy.error.TweepError: # Could not retrieve status for this tweet id, so use shortened 'text'(?) TODO
tweets_data[key]['full_text'] = [tweet['text']]
if status is not None:
try:
tweets_data[key]['full_text'] = [status.retweeted_status.full_text]
except AttributeError: # Not a Retweet (no 'retweeted_status' field)
tweets_data[key]['full_text'] = [status.full_text]
for field in tweet:
tweets_data[key]["tweet."+field] = tweet[field]
#save as a document dataset
data = tweets_data
if status_flag:
if len(data) > 0:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_BUSY_CONSTRUCTING_STEP}))
dataset = DatasetsUtilities.CreateDataset(self.dataset_name, dataset_source, self.dataset_type, self.language, retrieval_details, data, self.available_fields_list, self.label_fields_list, self.computation_fields_list, self.main_frame)
DatasetsUtilities.TokenizeDataset(dataset, self._notify_window, self.main_frame)
else:
status_flag = False
error_msg = GUIText.NO_DATA_AVAILABLE_ERROR
#return dataset and associated information
result = {}
result['status_flag'] = status_flag
if dataset != None:
result['dataset_key'] = dataset.key
result['dataset'] = dataset
result['error_msg'] = error_msg
wx.PostEvent(self._notify_window, CustomEvents.RetrieveResultEvent(result))
logger.info("Finished")
def UpdateDataFiles(self, auth, name, query, start_date, end_date, prefix):
logger = logging.getLogger(__name__+".TwitterRetrieverDialog.UpdateDataFiles["+name+"]["+str(start_date)+"]["+str(end_date)+"]["+prefix+"]")
logger.info("Starting")
#check which months of the range are already downloaded
#data archives are by month so need which months have no data and which months are before months which have no data
dict_monthfiles = twr.FilesAvailable(name, start_date, end_date, prefix)
months_notfound = []
months_tocheck = []
errors = []
for month, filename in dict_monthfiles.items():
if filename == "":
months_notfound.append(month)
else:
months_tocheck.append(month)
rate_limit_reached = False
#retireve data of months that have not been downloaded
for month in months_notfound:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.RETRIEVING_DOWNLOADING_ALL_MSG+str(month)}))
try:
rate_limit_reached = twr.RetrieveMonth(auth, name, query, month, end_date, prefix)
if rate_limit_reached:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.WARNING+": "+GUIText.RETRIEVING_TWITTER_RATE_LIMIT_WARNING}))
wx.MessageBox(GUIText.RETRIEVING_TWITTER_RATE_LIMIT_WARNING, GUIText.WARNING, wx.OK | wx.ICON_WARNING)
break
except RuntimeError as error:
errors.append(error)
#check the exiting months of data for any missing data
for month in months_tocheck:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.RETRIEVING_DOWNLOADING_NEW_MSG+str(month)}))
try:
rate_limit_reached = twr.UpdateRetrievedMonth(auth, name, query, month, end_date, dict_monthfiles[month], prefix)
if rate_limit_reached:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.WARNING+": "+GUIText.RETRIEVING_TWITTER_RATE_LIMIT_WARNING}))
wx.MessageBox(GUIText.RETRIEVING_TWITTER_RATE_LIMIT_WARNING, GUIText.WARNING, wx.OK | wx.ICON_WARNING)
break
except RuntimeError as error:
errors.append(error)
if len(errors) != 0:
raise RuntimeError(str(len(errors)) + " Retrievals Failed")
logger.info("Finished")
def ImportDataFiles(self, name, query, start_date, end_date, prefix):
logger = logging.getLogger(__name__+".TwitterRetrieverDialog.ImportDataFiles["+name+"]["+str(start_date)+"]["+str(end_date)+"]["+prefix+"]")
logger.info("Starting")
#get names of files where data is to be loaded from
dict_monthfiles = twr.FilesAvailable(name, start_date, end_date, prefix)
files = []
for filename in dict_monthfiles.values():
if filename != "":
files.append(filename)
data = []
if len(files) != 0:
if len(files) > 1:
#retrieve only needed data from first file
with open(files[0], 'r') as infile:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.RETRIEVING_IMPORTING_FILE_MSG+str(files[0])}))
temp_data = json.load(infile)
temp_data.pop(0)
for entry in temp_data:
if entry['created_utc'] >= calendar.timegm((datetime.strptime(start_date, "%Y-%m-%d")).timetuple()):
data.append(entry)
if len(files) > 2:
#retrieve all data from middle files
for filename in files[1:(len(files)-2)]:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.RETRIEVING_IMPORTING_FILE_MSG+str(filename)}))
with open(filename, 'r') as infile:
new_data = json.load(infile)
new_data.pop(0)
data = data + new_data
#retrieve only needed data from last file
with open(files[(len(files)-1)], 'r') as infile:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.RETRIEVING_IMPORTING_FILE_MSG+str(files[(len(files)-1)])}))
temp_data = json.load(infile)
temp_data.pop(0)
for entry in temp_data:
if entry['created_utc'] < calendar.timegm((datetime.strptime(end_date, "%Y-%m-%d") + relativedelta(days=1)).timetuple()):
data.append(entry)
else:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'msg':GUIText.RETRIEVING_IMPORTING_FILE_MSG+str(files[0])}))
with open(files[0], 'r') as infile:
temp_data = json.load(infile)
temp_data.pop(0)
for entry in temp_data:
if entry['created_utc'] >= calendar.timegm((datetime.strptime(start_date, "%Y-%m-%d")).timetuple()):
if (entry['created_utc'] < calendar.timegm((datetime.strptime(end_date,"%Y-%m-%d") + relativedelta(days=1)).timetuple())):
data.append(entry)
logger.info("Finished")
return data
class RetrieveCSVDatasetThread(Thread):
"""Retrieve CSV Dataset Thread Class."""
def __init__(self, notify_window, main_frame, dataset_name, language, dataset_field, dataset_type, id_field, url_field, datetime_field, datetime_tz, available_fields_list, label_fields_list, computation_fields_list, combined_fields_list, filename):
"""Init Worker Thread Class."""
Thread.__init__(self)
self._notify_window = notify_window
self.main_frame = main_frame
self.dataset_name = dataset_name
self.language = language
self.dataset_field = dataset_field
self.dataset_type = dataset_type
self.id_field = id_field
self.url_field = url_field
self.datetime_field = datetime_field
self.datetime_tz = datetime_tz
self.available_fields_list = available_fields_list
self.label_fields_list = label_fields_list
self.computation_fields_list = computation_fields_list
self.combined_fields_list = combined_fields_list
self.filename = filename
self.start()
def run(self):
logger = logging.getLogger(__name__+".RetrieveCSVDatasetThread.run")
logger.info("Starting")
retrieval_details = {
'filename': self.filename,
'id_field': self.id_field,
'url_field': self.url_field,
'datetime_field': self.datetime_field,
'datetime_tz': self.datetime_tz
}
data = {}
dataset = None
datasets = {}
error_msg = ""
status_flag = True
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_CSV_IMPORTING_FILE_STEP + self.filename}))
file_data = self.ImportDataFiles(self.filename)
#convert the data into toolkit's dataset format
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_CSV_PREPARING_DATA_STEP}))
dataset_source = "CSV"
if self.dataset_field == "":
row_num = 0
for row in file_data:
row_num = row_num + 1
if self.id_field in row:
document_id = row[self.id_field]
else:
document_id = row_num
key = ("CSV", "document", document_id)
if key not in data:
data[key] = {}
data[key]['data_source'] = 'CSV'
data[key]['data_type'] = 'document'
data[key]['id'] = document_id
if self.url_field == "":
data[key]['url'] = ""
else:
data[key]['url'] = row[self.url_field]
if self.datetime_field == "":
data[key]['created_utc'] = 0
else:
datetime_value = row[self.datetime_field]
if datetime_value != '':
tmp_obj = dateparser.parse(datetime_value, settings={'TIMEZONE': 'US/Eastern', 'RETURN_AS_TIMEZONE_AWARE': False})
datetime_obj = datetime(tmp_obj.year, tmp_obj.month, tmp_obj.day,
tmp_obj.hour, tmp_obj.minute, tmp_obj.second,
tmp_obj.microsecond, pytz.timezone(self.datetime_tz))
if datetime_obj != None:
datetime_obj = datetime_obj.astimezone(timezone.utc)
datetime_utc = datetime_obj.replace(tzinfo=timezone.utc).timestamp()
data[key]['created_utc'] = datetime_utc
else:
data[key]['created_utc'] = 0
else:
data[key]['created_utc'] = 0
for field in row:
field_name = "csv."+field
if field_name in data[key]:
if field_name in self.combined_fields_list:
data[key][field_name].append(row[field])
else:
if field_name in self.combined_fields_list:
data[key][field_name] = [row[field]]
else:
data[key][field_name] = row[field]
#save as a document dataset
if len(data) > 0:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_BUSY_CONSTRUCTING_STEP}))
retrieval_details['row_count'] = row_num
if self.datetime_field != "":
start_datetime = None
end_datetime = None
for key in data:
if start_datetime == None or start_datetime > data[key]['created_utc']:
start_datetime = data[key]['created_utc']
if end_datetime == None or end_datetime < data[key]['created_utc']:
end_datetime = data[key]['created_utc']
retrieval_details['start_date'] = start_datetime
retrieval_details['end_date'] = end_datetime
dataset = DatasetsUtilities.CreateDataset(self.dataset_name, dataset_source, self.dataset_type, self.language, retrieval_details, data, self.available_fields_list, self.label_fields_list, self.computation_fields_list, self.main_frame)
DatasetsUtilities.TokenizeDataset(dataset, self._notify_window, self.main_frame)
else:
status_flag = False
error_msg = GUIText.NO_DATA_AVAILABLE_ERROR
else:
row_num = 0
dataset_row_num = {}
for row in file_data:
row_num = row_num + 1
new_dataset_type = row[self.dataset_field]
if new_dataset_type not in data:
data[new_dataset_type] = {}
dataset_row_num[new_dataset_type] = 1
else:
dataset_row_num[new_dataset_type] = dataset_row_num[new_dataset_type] + 1
if self.id_field in row:
document_id = row[self.id_field]
else:
document_id = row_num
key = ("CSV", row[self.dataset_field], document_id)
if key not in data[new_dataset_type]:
data[new_dataset_type][key] = {}
data[new_dataset_type][key]['data_source'] = 'CSV'
data[new_dataset_type][key]['data_type'] = row[self.dataset_field]
data[new_dataset_type][key]['id'] = document_id
if self.url_field == "":
data[new_dataset_type][key]['url'] = ""
else:
data[new_dataset_type][key]['url'] = row[self.url_field]
if self.datetime_field == "":
data[new_dataset_type][key]['created_utc'] = 0
else:
datetime_value = row[self.datetime_field]
if datetime_value != '':
tmp_obj = dateparser.parse(datetime_value, settings={'TIMEZONE': 'US/Eastern', 'RETURN_AS_TIMEZONE_AWARE': False})
datetime_obj = datetime(tmp_obj.year, tmp_obj.month, tmp_obj.day,
tmp_obj.hour, tmp_obj.minute, tmp_obj.second,
tmp_obj.microsecond, pytz.timezone(self.datetime_tz))
if datetime_obj != None:
datetime_obj = datetime_obj.astimezone(timezone.utc)
datetime_utc = datetime_obj.replace(tzinfo=timezone.utc).timestamp()
data[new_dataset_type][key]['created_utc'] = datetime_utc
else:
data[new_dataset_type][key]['created_utc'] = 0
else:
data[new_dataset_type][key]['created_utc'] = 0
for field in row:
data[new_dataset_type][key]["csv."+field] = [row[field]]
else:
for field in row:
if "csv."+field in data[new_dataset_type][key]:
data[new_dataset_type][key]["csv."+field].append(row[field])
else:
data[new_dataset_type][key]["csv."+field] = [row[field]]
#save as a document dataset
if len(data) > 0:
wx.PostEvent(self.main_frame, CustomEvents.ProgressEvent({'step':GUIText.RETRIEVING_BUSY_CONSTRUCTING_STEP}))
for new_dataset_type in data:
cur_retrieval_details = copy.deepcopy(retrieval_details)
cur_retrieval_details['row_count'] = dataset_row_num[new_dataset_type]
if self.datetime_field != "":
start_datetime = None
end_datetime = None
for key in data[new_dataset_type]:
if start_datetime == None or start_datetime > data[new_dataset_type][key]['created_utc']:
start_datetime = data[new_dataset_type][key]['created_utc']
if end_datetime == None or end_datetime < data[new_dataset_type][key]['created_utc']:
end_datetime = data[new_dataset_type][key]['created_utc']
cur_retrieval_details['start_date'] = start_datetime
cur_retrieval_details['end_date'] = end_datetime
new_dataset = DatasetsUtilities.CreateDataset(self.dataset_name, dataset_source, new_dataset_type, self.language, retrieval_details, data[new_dataset_type], self.available_fields_list, self.label_fields_list, self.computation_fields_list, self.main_frame)
datasets[new_dataset.key] = new_dataset
DatasetsUtilities.TokenizeDataset(new_dataset, self._notify_window, self.main_frame)
else:
status_flag = False
error_msg = GUIText.NO_DATA_AVAILABLE_ERROR
| |
<filename>openquake/risklib/riskmodels.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import re
import inspect
import functools
import numpy
from openquake.baselib.node import Node
from openquake.baselib.general import CallableDict, AccumDict
from openquake.baselib.hdf5 import ArrayWrapper
from openquake.hazardlib import valid, nrml
from openquake.hazardlib.sourcewriter import obj_to_node
from openquake.risklib import utils, scientific
U32 = numpy.uint32
F32 = numpy.float32
registry = CallableDict()
F64 = numpy.float64
COST_TYPE_REGEX = '|'.join(valid.cost_type.choices)
LOSS_TYPE_KEY = re.compile(
'(%s|occupants|fragility)_([\w_]+)' % COST_TYPE_REGEX)
def get_risk_files(inputs):
"""
:param inputs: a dictionary key -> path name
:returns: a pair (file_type, {cost_type: path})
"""
vfs = {}
names = set()
for key in inputs:
if key == 'fragility':
# backward compatibily for .ini files with key fragility_file
# instead of structural_fragility_file
vfs['structural'] = inputs['structural_fragility'] = inputs[key]
names.add('fragility')
del inputs['fragility']
continue
match = LOSS_TYPE_KEY.match(key)
if match and 'retrofitted' not in key and 'consequence' not in key:
vfs[match.group(1)] = inputs[key]
names.add(match.group(2))
if not names:
return None, {}
elif len(names) > 1:
raise ValueError('Found inconsistent keys %s in the .ini file'
% ', '.join(names))
return names.pop(), vfs
# ########################### vulnerability ############################## #
def filter_vset(elem):
return elem.tag.endswith('discreteVulnerabilitySet')
@obj_to_node.add('VulnerabilityFunction')
def build_vf_node(vf):
"""
Convert a VulnerabilityFunction object into a Node suitable
for XML conversion.
"""
nodes = [Node('imls', {'imt': vf.imt}, vf.imls),
Node('meanLRs', {}, vf.mean_loss_ratios),
Node('covLRs', {}, vf.covs)]
return Node(
'vulnerabilityFunction',
{'id': vf.id, 'dist': vf.distribution_name}, nodes=nodes)
def get_risk_models(oqparam, kind=None):
"""
:param oqparam:
an OqParam instance
:param kind:
vulnerability|vulnerability_retrofitted|fragility|consequence;
if None it is extracted from the oqparam.file_type attribute
:returns:
a dictionary taxonomy -> loss_type -> function
"""
kind = kind or oqparam.file_type
rmodels = AccumDict()
rmodels.limit_states = []
for key in sorted(oqparam.inputs):
mo = re.match('(occupants|%s)_%s$' % (COST_TYPE_REGEX, kind), key)
if mo:
key_type = mo.group(1) # the cost_type in the key
# can be occupants, structural, nonstructural, ...
rmodel = nrml.to_python(oqparam.inputs[key])
rmodels[key_type] = rmodel
if rmodel.lossCategory is None: # NRML 0.4
continue
cost_type = str(rmodel.lossCategory)
rmodel_kind = rmodel.__class__.__name__
kind_ = kind.replace('_retrofitted', '') # strip retrofitted
if not rmodel_kind.lower().startswith(kind_):
raise ValueError(
'Error in the file "%s_file=%s": is '
'of kind %s, expected %s' % (
key, oqparam.inputs[key], rmodel_kind,
kind.capitalize() + 'Model'))
if cost_type != key_type:
raise ValueError(
'Error in the file "%s_file=%s": lossCategory is of type '
'"%s", expected "%s"' % (key, oqparam.inputs[key],
rmodel.lossCategory, key_type))
rdict = AccumDict(accum={})
rdict.limit_states = []
if kind == 'fragility':
limit_states = []
for loss_type, fm in sorted(rmodels.items()):
# build a copy of the FragilityModel with different IM levels
newfm = fm.build(oqparam.continuous_fragility_discretization,
oqparam.steps_per_interval)
for (imt, taxo), ffl in newfm.items():
if not limit_states:
limit_states.extend(fm.limitStates)
# we are rejecting the case of loss types with different
# limit states; this may change in the future
assert limit_states == fm.limitStates, (
limit_states, fm.limitStates)
rdict[taxo][loss_type] = ffl
# TODO: see if it is possible to remove the attribute
# below, used in classical_damage
ffl.steps_per_interval = oqparam.steps_per_interval
rdict.limit_states = [str(ls) for ls in limit_states]
elif kind == 'consequence':
rdict = rmodels
else: # vulnerability
cl_risk = oqparam.calculation_mode in ('classical', 'classical_risk')
# only for classical_risk reduce the loss_ratios
# to make sure they are strictly increasing
for loss_type, rm in rmodels.items():
for (imt, taxo), rf in rm.items():
rdict[taxo][loss_type] = (
rf.strictly_increasing() if cl_risk else rf)
return rdict
def get_values(loss_type, assets, time_event=None):
"""
:returns:
a numpy array with the values for the given assets, depending on the
loss_type.
"""
return numpy.array([a.value(loss_type, time_event) for a in assets])
class RiskModel(object):
"""
Base class. Can be used in the tests as a mock.
"""
time_event = None # used in scenario_risk
compositemodel = None # set by get_risk_model
kind = None # must be set in subclasses
def __init__(self, taxonomy, risk_functions, insured_losses):
self.taxonomy = taxonomy
self.risk_functions = risk_functions
self.insured_losses = insured_losses
@property
def loss_types(self):
"""
The list of loss types in the underlying vulnerability functions,
in lexicographic order
"""
return sorted(self.risk_functions)
def get_loss_types(self, imt):
"""
:param imt: Intensity Measure Type string
:returns: loss types with risk functions of the given imt
"""
return [lt for lt in self.loss_types
if self.risk_functions[lt].imt == imt]
def get_output(self, assets, data_by_lt, epsgetter):
"""
:param assets: a list of assets with the same taxonomy
:param data_by_lt: hazards for each loss type
:param epsgetter: an epsilon getter function
:returns: an ArrayWrapper of shape (L, ...)
"""
out = [self(lt, assets, data, epsgetter)
for lt, data in zip(self.loss_types, data_by_lt)]
return ArrayWrapper(numpy.array(out), dict(assets=assets))
def __toh5__(self):
risk_functions = {lt: func for lt, func in self.risk_functions.items()}
if hasattr(self, 'retro_functions'):
for lt, func in self.retro_functions.items():
risk_functions[lt + '_retrofitted'] = func
return risk_functions, {'taxonomy': self.taxonomy}
def __fromh5__(self, dic, attrs):
vars(self).update(attrs)
self.risk_functions = dic
def __repr__(self):
return '<%s%s>' % (self.__class__.__name__, list(self.risk_functions))
def rescale(curves, values):
"""
Multiply the losses in each curve of kind (losses, poes) by the
corresponding value.
"""
n = len(curves)
assert n == len(values), (n, len(values))
losses = [curves[i, 0] * values[i] for i in range(n)]
poes = curves[:, 1]
return numpy.array([[losses[i], poes[i]] for i in range(n)])
@registry.add('classical_risk', 'classical', 'disaggregation')
class Classical(RiskModel):
"""
Classical PSHA-Based RiskModel. Computes loss curves and insured curves.
"""
kind = 'vulnerability'
def __init__(self, taxonomy, vulnerability_functions,
hazard_imtls, lrem_steps_per_interval,
conditional_loss_poes, poes_disagg,
insured_losses=False):
"""
:param imt:
Intensity Measure Type for this riskmodel
:param taxonomy:
Taxonomy for this riskmodel
:param vulnerability_functions:
Dictionary of vulnerability functions by loss type
:param hazard_imtls:
The intensity measure types and levels of the hazard computation
:param lrem_steps_per_interval:
Configuration parameter
:param poes_disagg:
Probability of Exceedance levels used for disaggregate losses by
taxonomy.
:param bool insured_losses:
ignored since insured loss curves are not implemented
See :func:`openquake.risklib.scientific.classical` for a description
of the other parameters.
"""
self.taxonomy = taxonomy
self.risk_functions = vulnerability_functions
self.hazard_imtls = hazard_imtls
self.lrem_steps_per_interval = lrem_steps_per_interval
self.conditional_loss_poes = conditional_loss_poes
self.poes_disagg = poes_disagg
self.insured_losses = insured_losses
self.loss_ratios = {
lt: vf.mean_loss_ratios_with_steps(self.lrem_steps_per_interval)
for lt, vf in self.risk_functions.items()}
def __call__(self, loss_type, assets, hazard_curve, _eps=None):
"""
:param str loss_type:
the loss type considered
:param assets:
assets is an iterator over N
:class:`openquake.risklib.scientific.Asset` instances
:param hazard_curve:
an array of poes
:param _eps:
ignored, here only for API compatibility with other calculators
:returns:
an array of shape (C, N, 2)
"""
n = len(assets)
vf = self.risk_functions[loss_type]
imls = self.hazard_imtls[vf.imt]
values = get_values(loss_type, assets)
lrcurves = numpy.array(
[scientific.classical(
vf, imls, hazard_curve, self.lrem_steps_per_interval)] * n)
# if in the future we wanted to implement insured_losses the
# following lines could be useful
# deductibles = [a.deductible(loss_type) for a in assets]
# limits = [a.insurance_limit(loss_type) for a in assets]
# insured_curves = rescale(
# utils.numpy_map(scientific.insured_loss_curve,
# lrcurves, deductibles, limits), values)
return rescale(lrcurves, values).transpose(2, 0, 1)
# transpose array from shape (N, 2, C) -> (C, N, 2)
# otherwise .get_output would fail
@registry.add('event_based_risk', 'event_based', 'event_based_rupture',
'ucerf_rupture', 'ucerf_hazard', 'ucerf_risk')
class ProbabilisticEventBased(RiskModel):
"""
Implements the Probabilistic Event Based riskmodel.
Computes loss ratios and event IDs.
"""
kind = 'vulnerability'
def __init__(
self, taxonomy, vulnerability_functions,
conditional_loss_poes, insured_losses=False):
"""
See :func:`openquake.risklib.scientific.event_based` for a description
of the input parameters.
"""
self.taxonomy = taxonomy
self.risk_functions = vulnerability_functions
self.conditional_loss_poes = conditional_loss_poes
self.insured_losses = insured_losses
def __call__(self, loss_type, assets, gmvs_eids, epsgetter):
"""
:param str loss_type:
the loss type considered
:param assets:
a list of assets on the same site and with the same taxonomy
:param gmvs_eids:
a pair (gmvs, eids) with E values each
:param epsgetter:
a callable returning the correct epsilons for the given gmvs
:returns:
a :class:
`openquake.risklib.scientific.ProbabilisticEventBased.Output`
instance.
"""
gmvs, eids = gmvs_eids
E = len(gmvs)
I = self.insured_losses + 1
A = len(assets)
loss_ratios = numpy.zeros((A, E, I), F32)
vf = self.risk_functions[loss_type]
means, covs, idxs = vf.interpolate(gmvs)
for i, asset in enumerate(assets):
epsilons = epsgetter(asset.ordinal, eids)
ratios = vf.sample(means, covs, | |
# Copyright 2001-2007 by <NAME>. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python, and influenced by Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2007 <NAME>. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, types, time, string, cStringIO, traceback
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "<NAME> <<EMAIL>>"
__status__ = "production"
__version__ = "0.5.0.2"
__date__ = "16 February 2007"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_traceback.tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates Handlers and so
#might arbitrary user threads. Since Handler.__init__() updates the shared
#dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
global _lock
if (not _lock) and thread:
_lock = threading.RLock()
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord:
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by <NAME>.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and (len(args) == 1) and args[0] and (type(args[0]) == types.DictType):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except:
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.currentThread().getName()
else:
self.thread = None
self.threadName = None
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not hasattr(types, "UnicodeType"): #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if type(msg) not in (types.UnicodeType, types.StringType):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter:
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value | |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LISPMapRequest(Base):
__slots__ = ()
_SDM_NAME = 'lISPMapRequest'
_SDM_ATT_MAP = {
'HeaderType': 'lISPMapRequest.header.type-1',
'HeaderA': 'lISPMapRequest.header.a-2',
'HeaderM': 'lISPMapRequest.header.M-3',
'HeaderP': 'lISPMapRequest.header.p-4',
'HeaderS': 'lISPMapRequest.header.S-5',
'HeaderReserved': 'lISPMapRequest.header.reserved-6',
'HeaderIRC_RLOC_Count': 'lISPMapRequest.header.IRC_RLOC_Count-7',
'HeaderRecordcount': 'lISPMapRequest.header.recordcount-8',
'HeaderNonce': 'lISPMapRequest.header.nonce-9',
'Ipv4sourceeidSourceeidipv4afi': 'lISPMapRequest.header.Source_EID_AFI_Address.ipv4sourceeid.sourceeidipv4afi-10',
'Ipv4sourceeidSourceeidprefix': 'lISPMapRequest.header.Source_EID_AFI_Address.ipv4sourceeid.sourceeidprefix-11',
'Ipv6sourceeidSourceeidipv6afi': 'lISPMapRequest.header.Source_EID_AFI_Address.ipv6sourceeid.sourceeidipv6afi-12',
'Ipv6sourceeidSourceeidprefix': 'lISPMapRequest.header.Source_EID_AFI_Address.ipv6sourceeid.sourceeidprefix-13',
'AfiAfi': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.afi-14',
'AfiRsvd1': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.rsvd1-15',
'AfiFlags': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.flags-16',
'AfiType': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.type-17',
'AfiRsvd2': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.rsvd2-18',
'AfiLength': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.length-19',
'AfiInstanceid': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.instanceid-20',
'Ipv4eidEidipv4afi': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.eidafiprefix.ipv4eid.eidipv4afi-21',
'Ipv4eidEidprefix': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.eidafiprefix.ipv4eid.eidprefix-22',
'Ipv6eidEidipv6afi': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.eidafiprefix.ipv6eid.eidipv6afi-23',
'Ipv6eidEidprefix': 'lISPMapRequest.header.Source_EID_AFI_Address.afi.eidafiprefix.ipv6eid.eidprefix-24',
'Ipv4itrrlocItrrlocipv4afi': 'lISPMapRequest.header.ITR_RLOC_AFI_Address.ipv4itrrloc.itrrlocipv4afi-25',
'Ipv4itrrlocItrrlocaddress': 'lISPMapRequest.header.ITR_RLOC_AFI_Address.ipv4itrrloc.itrrlocaddress-26',
'Ipv6itrrlocItrrlocipv6afi': 'lISPMapRequest.header.ITR_RLOC_AFI_Address.ipv6itrrloc.itrrlocipv6afi-27',
'Ipv6itrrlocItrrlocaddress': 'lISPMapRequest.header.ITR_RLOC_AFI_Address.ipv6itrrloc.itrrlocaddress-28',
'RecordRecreserved': 'lISPMapRequest.header.EID Record.record.recreserved-29',
'RecordEIDmasklen': 'lISPMapRequest.header.EID Record.record.eIDmasklen-30',
'EidafiprefixIpv4eidEidipv4afi': 'lISPMapRequest.header.EID Record.record.eidafiprefix.ipv4eid.eidipv4afi-31',
'EidafiprefixIpv4eidEidprefix': 'lISPMapRequest.header.EID Record.record.eidafiprefix.ipv4eid.eidprefix-32',
'EidafiprefixIpv6eidEidipv6afi': 'lISPMapRequest.header.EID Record.record.eidafiprefix.ipv6eid.eidipv6afi-33',
'EidafiprefixIpv6eidEidprefix': 'lISPMapRequest.header.EID Record.record.eidafiprefix.ipv6eid.eidprefix-34',
'EidafiprefixAfiAfi': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.afi-35',
'EidafiprefixAfiRsvd1': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.rsvd1-36',
'EidafiprefixAfiFlags': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.flags-37',
'EidafiprefixAfiType': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.type-38',
'EidafiprefixAfiRsvd2': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.rsvd2-39',
'EidafiprefixAfiLength': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.length-40',
'EidafiprefixAfiInstanceid': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.instanceid-41',
'AfiEidafiprefixIpv4eidEidipv4afi': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.eidafiprefix.ipv4eid.eidipv4afi-42',
'AfiEidafiprefixIpv4eidEidprefix': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.eidafiprefix.ipv4eid.eidprefix-43',
'AfiEidafiprefixIpv6eidEidipv6afi': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.eidafiprefix.ipv6eid.eidipv6afi-44',
'AfiEidafiprefixIpv6eidEidprefix': 'lISPMapRequest.header.EID Record.record.eidafiprefix.afi.eidafiprefix.ipv6eid.eidprefix-45',
'RecordTtl': 'lISPMapRequest.header.Map-Reply Record.Record.ttl-46',
'RecordLoccnt': 'lISPMapRequest.header.Map-Reply Record.Record.loccnt-47',
'RecordEIDmasklen': 'lISPMapRequest.header.Map-Reply Record.Record.eIDmasklen-48',
'RecordACT': 'lISPMapRequest.header.Map-Reply Record.Record.aCT-49',
'RecordA': 'lISPMapRequest.header.Map-Reply Record.Record.a-50',
'RecordRecreserved': 'lISPMapRequest.header.Map-Reply Record.Record.recreserved-51',
'RecordMapvernumber': 'lISPMapRequest.header.Map-Reply Record.Record.mapvernumber-52',
'RecordEidafiprefixIpv4eidEidipv4afi': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.ipv4eid.eidipv4afi-53',
'RecordEidafiprefixIpv4eidEidprefix': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.ipv4eid.eidprefix-54',
'RecordEidafiprefixIpv6eidEidipv6afi': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.ipv6eid.eidipv6afi-55',
'RecordEidafiprefixIpv6eidEidprefix': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.ipv6eid.eidprefix-56',
'RecordEidafiprefixAfiAfi': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.afi-57',
'RecordEidafiprefixAfiRsvd1': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.rsvd1-58',
'RecordEidafiprefixAfiFlags': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.flags-59',
'RecordEidafiprefixAfiType': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.type-60',
'RecordEidafiprefixAfiRsvd2': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.rsvd2-61',
'RecordEidafiprefixAfiLength': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.length-62',
'RecordEidafiprefixAfiInstanceid': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.instanceid-63',
'EidafiprefixAfiEidafiprefixIpv4eidEidipv4afi': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.eidafiprefix.ipv4eid.eidipv4afi-64',
'EidafiprefixAfiEidafiprefixIpv4eidEidprefix': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.eidafiprefix.ipv4eid.eidprefix-65',
'EidafiprefixAfiEidafiprefixIpv6eidEidipv6afi': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.eidafiprefix.ipv6eid.eidipv6afi-66',
'EidafiprefixAfiEidafiprefixIpv6eidEidprefix': 'lISPMapRequest.header.Map-Reply Record.Record.eidafiprefix.afi.eidafiprefix.ipv6eid.eidprefix-67',
'LocatorPriority': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.priority-68',
'LocatorWeight': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.weight-69',
'LocatorMpriority': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.mpriority-70',
'LocatorMweight': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.mweight-71',
'LocatorUnusedflags': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.unusedflags-72',
'LocatorL': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.l-73',
'LocatorLocp': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.locp-74',
'LocatorR': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.r-75',
'Ipv4locLocipv4afi': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.locafiprefix.ipv4loc.locipv4afi-76',
'Ipv4locLocprefix': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.locafiprefix.ipv4loc.locprefix-77',
'Ipv6locLocipv6afi': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.locafiprefix.ipv6loc.locipv6afi-78',
'Ipv6locLocprefix': 'lISPMapRequest.header.Map-Reply Record.Record.locatorrecords.locator.locafiprefix.ipv6loc.locprefix-79',
'HeaderMapping_Protocol_Data': 'lISPMapRequest.header.Mapping_Protocol_Data-80',
}
def __init__(self, parent, list_op=False):
super(LISPMapRequest, self).__init__(parent, list_op)
@property
def HeaderType(self):
"""
Display Name: Type
Default Value: 0x1
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderType']))
@property
def HeaderA(self):
"""
Display Name: A
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderA']))
@property
def HeaderM(self):
"""
Display Name: M
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderM']))
@property
def HeaderP(self):
"""
Display Name: P
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderP']))
@property
def HeaderS(self):
"""
Display Name: S
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderS']))
@property
def HeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0x000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderReserved']))
@property
def HeaderIRC_RLOC_Count(self):
"""
Display Name: IRC
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderIRC_RLOC_Count']))
@property
def HeaderRecordcount(self):
"""
Display Name: Record Count
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderRecordcount']))
@property
def HeaderNonce(self):
"""
Display Name: Nonce
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderNonce']))
@property
def Ipv4sourceeidSourceeidipv4afi(self):
"""
Display Name: IPv4 AFI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4sourceeidSourceeidipv4afi']))
@property
def Ipv4sourceeidSourceeidprefix(self):
"""
Display Name: IPv4 Source EID Prefix
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4sourceeidSourceeidprefix']))
@property
def Ipv6sourceeidSourceeidipv6afi(self):
"""
Display Name: IPv6 AFI
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6sourceeidSourceeidipv6afi']))
@property
def Ipv6sourceeidSourceeidprefix(self):
"""
Display Name: IPv6 Source EID Prefix
Default Value: 00::00
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6sourceeidSourceeidprefix']))
@property
def AfiAfi(self):
"""
Display Name: AFI
Default Value: 16387
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiAfi']))
@property
def AfiRsvd1(self):
"""
Display Name: Rsvd1
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiRsvd1']))
@property
def AfiFlags(self):
"""
Display Name: Flags
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiFlags']))
@property
def AfiType(self):
"""
Display Name: Type
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiType']))
@property
def AfiRsvd2(self):
"""
Display Name: Rsvd2
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiRsvd2']))
@property
def AfiLength(self):
"""
Display Name: Length
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiLength']))
@property
def AfiInstanceid(self):
"""
Display Name: InstanceID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiInstanceid']))
@property
def Ipv4eidEidipv4afi(self):
"""
Display Name: IPv4 AFI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4eidEidipv4afi']))
@property
def Ipv4eidEidprefix(self):
"""
Display Name: IPv4 EID Prefix
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4eidEidprefix']))
@property
def Ipv6eidEidipv6afi(self):
"""
Display Name: IPv6 AFI
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6eidEidipv6afi']))
@property
def Ipv6eidEidprefix(self):
"""
Display Name: IPv6 EID Prefix
Default Value: 00::00
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6eidEidprefix']))
@property
def Ipv4itrrlocItrrlocipv4afi(self):
"""
Display Name: IPv4 AFI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4itrrlocItrrlocipv4afi']))
@property
def Ipv4itrrlocItrrlocaddress(self):
"""
Display Name: IPv4 EID Address
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4itrrlocItrrlocaddress']))
@property
def Ipv6itrrlocItrrlocipv6afi(self):
"""
Display Name: IPv6 AFI
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6itrrlocItrrlocipv6afi']))
@property
def Ipv6itrrlocItrrlocaddress(self):
"""
Display Name: IPv6 EID Prefix
Default Value: 00::00
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6itrrlocItrrlocaddress']))
@property
def RecordRecreserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RecordRecreserved']))
@property
def RecordEIDmasklen(self):
"""
Display Name: EID Mask Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RecordEIDmasklen']))
@property
def EidafiprefixIpv4eidEidipv4afi(self):
"""
Display Name: IPv4 AFI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixIpv4eidEidipv4afi']))
@property
def EidafiprefixIpv4eidEidprefix(self):
"""
Display Name: IPv4 EID Prefix
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixIpv4eidEidprefix']))
@property
def EidafiprefixIpv6eidEidipv6afi(self):
"""
Display Name: IPv6 AFI
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixIpv6eidEidipv6afi']))
@property
def EidafiprefixIpv6eidEidprefix(self):
"""
Display Name: IPv6 EID Prefix
Default Value: 00::00
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixIpv6eidEidprefix']))
@property
def EidafiprefixAfiAfi(self):
"""
Display Name: AFI
Default Value: 16387
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiAfi']))
@property
def EidafiprefixAfiRsvd1(self):
"""
Display Name: Rsvd1
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiRsvd1']))
@property
def EidafiprefixAfiFlags(self):
"""
Display Name: Flags
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiFlags']))
@property
def EidafiprefixAfiType(self):
"""
Display Name: Type
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiType']))
@property
def EidafiprefixAfiRsvd2(self):
"""
Display Name: Rsvd2
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiRsvd2']))
@property
def EidafiprefixAfiLength(self):
"""
Display Name: Length
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiLength']))
@property
def EidafiprefixAfiInstanceid(self):
"""
Display Name: InstanceID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EidafiprefixAfiInstanceid']))
@property
def AfiEidafiprefixIpv4eidEidipv4afi(self):
"""
Display Name: IPv4 AFI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiEidafiprefixIpv4eidEidipv4afi']))
@property
def AfiEidafiprefixIpv4eidEidprefix(self):
"""
Display Name: IPv4 EID Prefix
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiEidafiprefixIpv4eidEidprefix']))
@property
def AfiEidafiprefixIpv6eidEidipv6afi(self):
"""
Display Name: IPv6 AFI
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiEidafiprefixIpv6eidEidipv6afi']))
@property
def AfiEidafiprefixIpv6eidEidprefix(self):
"""
Display Name: IPv6 EID Prefix
Default Value: 00::00
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AfiEidafiprefixIpv6eidEidprefix']))
@property
def RecordTtl(self):
"""
Display Name: ttl
Default Value: 1440
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RecordTtl']))
@property
def RecordLoccnt(self):
"""
Display Name: Locator Count
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RecordLoccnt']))
@property
def RecordEIDmasklen(self):
"""
Display Name: EID Mask Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return | |
#!/usr/bin/env python
# coding: utf-8
import time
import atexit
import weakref
import pybullet
import threading
from qibullet.tools import *
from qibullet.controller import Controller
class BaseController(Controller):
"""
Class describing a robot base controller
"""
# _instances = set()
FRAME_WORLD = 1
FRAME_ROBOT = 2
def __init__(self, robot_model, physicsClientId=0):
"""
Constructor
Parameters:
robot_model - the pybullet model of the robot
physicsClientId - The id of the simulated instance in which the
robot will be controlled
"""
Controller.__init__(self, robot_model, physicsClientId)
self.linear_velocity = 0
self.angular_velocity = 0
self.linear_acceleration = 0
self.angular_acceleration = 0
self.frame = BaseController.FRAME_ROBOT
self.pose_init = {}
self.pose_goal = {}
def _setGoal(self, x, y, theta, frame):
"""
INTERNAL METHOD, set the position of the goal to a specific frame.
Parameters:
x - position of the goal on the x axis, in meters
y - position of the goal on the y axis, in meters
theta - orientation of the goal around the z axis, in radians
frame - The frame in which the goal is expressed: FRAME_WORLD = 1,
FRAME_ROBOT = 2
"""
self.goal = [x, y, theta]
self.frame = frame
def _updateGoal(self):
"""
INTERNAL METHOD, update the position of the goal.
"""
# get actual position in frame world
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
x, y, theta = self.goal
# pose x, y, z
pose_requested = [x, y, 0]
# orientation requested (euler)
orn_requested = [0, 0, theta]
# if we are in frame robot express the position in the frame world
if self.frame == BaseController.FRAME_ROBOT:
orn_euler = pybullet.getEulerFromQuaternion(actual_orn)
pose_requested = [
pose_requested[0] * math.cos(orn_euler[2])
- pose_requested[1] * math.sin(orn_euler[2])
+ actual_pos[0],
pose_requested[0] * math.sin(orn_euler[2])
+ pose_requested[1] * math.cos(orn_euler[2])
+ actual_pos[1],
0]
orn_requested = [
orn_euler[0],
orn_euler[1],
orn_euler[2] + theta]
self.pose_goal["position"] = pose_requested
self.pose_goal["orientation"] = orn_requested
def setLinearVelocity(self, linear_velocity):
"""
Set the linear velocity.
Parameter:
linear_velocity : The linear velocity value in m/s
"""
self.linear_velocity = linear_velocity
def _setAngularVelocity(self, angular_velocity):
"""
INTERNAL METHOD, set the angular velocity.
Parameter:
angular_velocity : The angular velocity value in rad/s
"""
self.angular_velocity = angular_velocity
def _setLinearAcceleration(self, linear_acceleration):
"""
INTERNAL METHOD, set the linear acceleration.
Parameter:
linear_acceleration : The linear acceleration value in m/s^2
"""
self.linear_acceleration = linear_acceleration
def _setAngularAcceleration(self, angular_acceleration):
"""
INTERNAL METHOD, set the angular acceleration.
Parameter:
angular_acceleration : The angular acceleration value in rad/s^2
"""
self.angular_acceleration = angular_acceleration
class PepperBaseController(BaseController):
"""
Class describing a Pepper base controller
"""
MAX_LINEAR_VELOCITY = 0.55
MIN_LINEAR_VELOCITY = 0.1
MAX_ANGULAR_VELOCITY = 2.0
MIN_ANGULAR_VELOCITY = 0.3
MAX_LINEAR_ACCELERATION = 0.55
MIN_LINEAR_ACCELERATION = 0.1
MAX_ANGULAR_ACCELERATION = 3.0
MIN_ANGULAR_ACCELERATION = 0.1
def __init__(
self,
robot_model,
speed,
acceleration,
motion_constraint,
physicsClientId=0):
"""
Constructor
Parameters:
robot_model - the pybullet model of the robot
speed - list containing the linear velocity and the angular
velocity values, in m/s
acceleration - list containing the linear acceleration and angular
acceleration values, in m/s^2
motion_constraint - the pybullet motion constraint applied on the
robot
physicsClientId - The id of the simulated instance in which Pepper
will be controlled
"""
BaseController.__init__(
self,
robot_model,
physicsClientId=physicsClientId)
# Set the different speeds and accelerations
self.setLinearVelocity(speed[0])
self._setAngularVelocity(speed[1])
self._setLinearAcceleration(acceleration[0])
self._setAngularAcceleration(acceleration[1])
# force applied in the movement
self.force = 100
# The robot will stop the movement with a precisio of 0.01 m and 0.02
# rads
self.linear_threshold = 0.01
self.angular_threshold = 0.02
self.motion_constraint = motion_constraint
def setLinearVelocity(self, linear_velocity):
"""
Set the linear velocity.
Parameter:
linear_velocity : The linear velocity value in m/s
"""
if linear_velocity > PepperBaseController.MAX_LINEAR_VELOCITY:
linear_velocity = PepperBaseController.MAX_LINEAR_VELOCITY
elif linear_velocity < PepperBaseController.MIN_LINEAR_VELOCITY:
linear_velocity = PepperBaseController.MIN_LINEAR_VELOCITY
BaseController.setLinearVelocity(self, linear_velocity)
def _setAngularVelocity(self, angular_velocity):
"""
INTERNAL METHOD, set the angular velocity.
Parameter:
angular_velocity : The angular velocity value in rad/s
"""
if angular_velocity > PepperBaseController.MAX_ANGULAR_VELOCITY:
angular_velocity = PepperBaseController.MAX_ANGULAR_VELOCITY
elif angular_velocity < PepperBaseController.MIN_ANGULAR_VELOCITY:
angular_velocity = PepperBaseController.MIN_ANGULAR_VELOCITY
BaseController._setAngularVelocity(self, angular_velocity)
def _setLinearAcceleration(self, linear_acceleration):
"""
INTERNAL METHOD, set the linear acceleration.
Parameter:
linear_acceleration : The linear acceleration value in m/s^2
"""
if linear_acceleration > PepperBaseController.MAX_LINEAR_ACCELERATION:
linear_acceleration = PepperBaseController.MAX_LINEAR_ACCELERATION
elif linear_acceleration <\
PepperBaseController.MIN_LINEAR_ACCELERATION:
linear_acceleration = PepperBaseController.MIN_LINEAR_ACCELERATION
BaseController._setLinearAcceleration(self, linear_acceleration)
def _setAngularAcceleration(self, angular_acceleration):
"""
INTERNAL METHOD, set the angular acceleration.
Parameter:
angular_acceleration : The angular acceleration value in rad/s^2
"""
if angular_acceleration >\
PepperBaseController.MAX_ANGULAR_ACCELERATION:
angular_acceleration =\
PepperBaseController.MAX_ANGULAR_ACCELERATION
elif angular_acceleration <\
PepperBaseController.MIN_ANGULAR_ACCELERATION:
angular_acceleration =\
PepperBaseController.MIN_ANGULAR_ACCELERATION
BaseController._setAngularAcceleration(self, angular_acceleration)
def moveTo(self, x, y, theta, frame, _async=False):
"""
Move the robot in frame world or robot (FRAME_WORLD=1, FRAME_ROBOT=2).
This method can be called synchonously or asynchronously. In the
asynchronous mode, the function can be called when it's already
launched, this will update the goal of the motion.
Parameters:
x - position of the goal on the x axis, in meters
y - position of the goal on the y axis, in meters
theta - orientation of the goal around the z axis, in radians
frame - The frame in which the goal is expressed: FRAME_WORLD = 1,
FRAME_ROBOT = 2
_async - The method is launched in async mode if True, in synch
mode if False (False by default)
"""
self._setGoal(x, y, theta, frame)
if self.module_process.isAlive():
if _async is False:
raise pybullet.error(
"Already a moveTo asynchronous. Can't "
"launch moveTo synchronous")
self._initProcess()
elif _async:
self.module_process = threading.Thread(target=self._moveToProcess)
self.module_process.start()
else:
self._moveToProcess()
def move(self, x, y, theta):
"""
Apply a speed on the robot's base.
Parameters:
x - Speed on the x axis, in m/s
y - Speed on the y axis, in m/s
theta - Rotational speed around the z axis, in rad/s
"""
# Kill any previous moveTo process running
self.moveTo(0, 0, 0, frame=BaseController.FRAME_ROBOT, _async=True)
# Bound the velocity. The max acceleration is not taken into account
# here, this is a potential improvment
if abs(x) > PepperBaseController.MAX_LINEAR_VELOCITY:
x = PepperBaseController.MAX_LINEAR_VELOCITY * (x/abs(x))
if abs(y) > PepperBaseController.MAX_LINEAR_VELOCITY:
y = PepperBaseController.MAX_LINEAR_VELOCITY * (y/abs(y))
if abs(theta) > PepperBaseController.MAX_ANGULAR_VELOCITY:
theta = PepperBaseController.MAX_ANGULAR_VELOCITY *\
(theta/abs(theta))
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert actual_orn into euler
actual_orn = pybullet.getEulerFromQuaternion(actual_orn)
linear_world_velocity = [
x * math.cos(actual_orn[2]) - y * math.sin(actual_orn[2]),
x * math.sin(actual_orn[2]) + y * math.cos(actual_orn[2]),
0]
time.sleep(0.02)
pybullet.resetBaseVelocity(
self.robot_model,
linear_world_velocity,
[0, 0, theta],
physicsClientId=self.physics_client)
def stopMove(self):
"""
If an aynchronous moveTo has been launched, calling this method will
stop the asynchronous process. Calling this method is equivalent to
calling moveTo(0.0, 0.0, 0.0, BaseController.FRAME_ROBOT, _async=True)
"""
self.moveTo(0.0, 0.0, 0.0, BaseController.FRAME_ROBOT, _async=True)
def _updateConstraint(self):
"""
INTERNAL METHOD, update the robot's constraint.
"""
# Change the constraint to the requested position and orientation
pybullet.changeConstraint(
self.motion_constraint,
self.pose_goal["position"],
jointChildFrameOrientation=pybullet.getQuaternionFromEuler(
self.pose_goal["orientation"]),
maxForce=self.force,
physicsClientId=self.physics_client)
def _initProcess(self):
"""
INTERNAL METHOD, initialize the motion process and all variables
needed.
"""
# Get actual position in frame world
self.pose_init["position"], self.pose_init["orientation"] =\
pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert pose_init orientation in orn_euler
self.pose_init["orientation"] = pybullet.getEulerFromQuaternion(
self.pose_init["orientation"]
)
self._updateGoal()
self._updateConstraint()
# Compute the ratio distance requested on the total distance
distance = getDistance(
self.pose_init["position"],
self.pose_goal["position"])
self.p_x = 0
self.p_y = 0
self.p_theta = 0
if distance:
self.p_x = (
self.pose_goal["position"][0] -
self.pose_init["position"][0]) / distance
self.p_y = (
self.pose_goal["position"][1] -
self.pose_init["position"][1]) / distance
theta_to_do = getOrientation(
self.pose_init["orientation"],
self.pose_goal["orientation"])
if abs(theta_to_do):
self.p_theta = abs(theta_to_do) / theta_to_do
def _endProcess(self):
"""
INTERNAL METHOD, stop the robot movement.
"""
# Change the constraint to the actual position and orientation in
# order to stop the robot's motion. The force applied is purposely huge
# to avoid oscillations.
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
pybullet.changeConstraint(
self.motion_constraint,
actual_pos,
jointChildFrameOrientation=actual_orn,
maxForce=self.force * 10,
physicsClientId=self.physics_client)
pybullet.resetBaseVelocity(
self.robot_model,
[0, 0, 0],
[0, 0, 0],
physicsClientId=self.physics_client)
def _moveToProcess(self):
"""
INTERNAL METHOD, process allowing to move the robot's base.
"""
self._initProcess()
# actual_pos = self.pose_init["position"]
# actual_orn = self.pose_init["orientation"]
init_pos = self.pose_init["position"]
init_orn = self.pose_init["orientation"]
actual_pos = init_pos
actual_orn = init_orn
while not self._module_termination:
translation_distance = getDistance(
actual_pos,
self.pose_goal["position"])
# Modulo the orientation pose goal with conversion in quaternion
modulo_quater_pose_goal = pybullet.getQuaternionFromEuler(
self.pose_goal["orientation"])
# Conversion into euler
modulo_euler_pose_goal = pybullet.getEulerFromQuaternion(
modulo_quater_pose_goal)
rotation_distance = abs(getOrientation(
actual_orn,
modulo_euler_pose_goal))
if translation_distance < self.linear_threshold and\
rotation_distance < self.angular_threshold:
break
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert actual_orn into euler
actual_orn = pybullet.getEulerFromQuaternion(actual_orn)
linear_vel_x = computeVelocity(
self.linear_acceleration,
0.05,
self.linear_velocity,
getDistance(actual_pos, init_pos),
getDistance(actual_pos, self.pose_goal["position"]))
linear_vel_y = linear_vel_x
angular_vel = computeVelocity(
self.angular_acceleration,
| |
<filename>tasty.py
from position import PositionType
import pandas as pd
from history import History
from transaction import Transaction
import math
from money import Money
from typing import List
import logging
from pathlib import Path
import pprint
from dataclasses import dataclass
from dataclasses_json import dataclass_json
@dataclass_json
@dataclass
class Values:
"""store all data here"""
withdrawal: Money = Money()
transfer: Money = Money()
balanceAdjustment: Money = Money()
fee: Money = Money()
deposit: Money = Money()
creditInterest: Money = Money()
debitInterest: Money = Money()
dividend: Money = Money()
stockAndOptionsSum: Money = Money()
stockSum: Money = Money()
optionSum: Money = Money()
grossOptionsDifferential: Money = Money()
stockProfits: Money = Money()
stockLoss: Money = Money()
otherLoss: Money = Money()
stockFees: Money = Money()
otherFees: Money = Money()
def __str__(self):
"""pretty prints all the contained Values
>>> values = Values()
"""
j = self.to_json()
return str(json.dumps(j, indent=4, sort_keys=True))
class Tasty(object):
yearValues = dict()
history: History
positions: pd.DataFrame
closedTrades: pd.DataFrame
def __init__(self, path: str):
logger = logging.getLogger()
if not logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.handlers
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.yearValues.clear()
self.history = History.fromFile(path)
self.closedTrades: pd.DataFrame = pd.DataFrame()
self.positions = pd.DataFrame()
def year(self, year):
"""used to access the dictionary and create the year if it doesn't exist
"""
if not year in self.yearValues:
self.yearValues[year] = Values()
return self.yearValues[year]
def moneyMovement(self, row: Transaction):
"""handles moneyMovement entries
>>> t = Tasty("test/merged.csv")
# known good entry of 2020
>>> t.moneyMovement(t.history.iloc[117])
>>> str(t.year(2020).withdrawal)
"{'eur': 1980.251453985631, 'usd': 2315.31}"
# first entry is deposit -> transfer
>>> t.moneyMovement(t.history.iloc[-1])
>>> str(t.year(2018).transfer)
"{'eur': 966.10578858385, 'usd': 1200.0}"
# balance adjustment
>>> t.moneyMovement(t.history.iloc[322])
>>> str(t.year(2018).balanceAdjustment)
"{'eur': -0.008085599547206425, 'usd': -0.01}"
# fee
>>> t.moneyMovement(t.history.iloc[323])
>>> str(t.year(2018).fee)
"{'eur': -35.02348938927588, 'usd': -43.24}"
# deposit
>>> t.moneyMovement(t.history.iloc[262])
>>> str(t.year(2019).deposit)
"{'eur': 0.009070294784580499, 'usd': 0.01}"
# credit interest
>>> t.moneyMovement(t.history.iloc[8])
>>> str(t.year(2020).creditInterest)
"{'eur': 0.02461235540241201, 'usd': 0.03}"
# debit interest
>>> t = Tasty("test/merged2.csv")
>>> t.moneyMovement(t.history.iloc[48])
>>> str(t.year(2021).debitInterest)
"{'eur': -0.7164621592687145, 'usd': -0.87}"
# dividend
>>> t = Tasty("test/merged2.csv")
>>> t.moneyMovement(t.history.iloc[12])
>>> str(t.year(2021).dividend)
"{'eur': -2.470559169892119, 'usd': -3.0}"
"""
t = Transaction(row)
m = Money(row=row)
if t.loc["Transaction Subcode"] == "Transfer":
self.year(t.getYear()).transfer += m
elif t.loc["Transaction Subcode"] == "Withdrawal":
self.year(t.getYear()).withdrawal += m
elif t.loc["Transaction Subcode"] == "Balance Adjustment":
self.year(t.getYear()).balanceAdjustment += m
elif t.loc["Transaction Subcode"] == "Fee":
self.year(t.getYear()).fee += m
elif t.loc["Transaction Subcode"] == "Deposit" and t.loc["Description"] == "INTEREST ON CREDIT BALANCE":
self.year(t.getYear()).deposit += m
elif t.loc["Transaction Subcode"] == "Credit Interest":
self.year(t.getYear()).creditInterest += m
elif t.loc["Transaction Subcode"] == "Debit Interest":
self.year(t.getYear()).debitInterest += m
elif t.loc["Transaction Subcode"] == "Dividend":
self.year(t.getYear()).dividend += m
else:
raise KeyError("Found unkonwn money movement subcode: '{}'".format(
t.loc["Transaction Subcode"]))
def receiveDelivery(self, row):
""" sub function to process the column namend "Receive Deliver" in the csv file
# assigned -200 LFIN stock
>>> t = Tasty("test/merged.csv")
>>> t.addPosition(Transaction(t.history.iloc[330]))
>>> t.positions.iloc[0]["Symbol"]
'LFIN'
>>> closing = Transaction(t.history.iloc[330])
>>> closing["Transaction Subcode"] = "Buy to Close"
>>> t.addPosition(closing)
>>> t.positions.size
0
>>> t.closedTrades.iloc[0]["Quantity"]
200.0
>>> t = Tasty("test/merged.csv")
>>> t.addPosition(Transaction(t.history.iloc[332]))
>>> t.addPosition(Transaction(t.history.iloc[329]))
>>> len(t.closedTrades.index)
1
>>> t.positions.size
0
# Expiration
>>> t = Tasty("test/merged.csv")
>>> t.addPosition(Transaction(t.history.iloc[315]))
>>> t.addPosition(Transaction(t.history.iloc[304]))
>>> len(t.closedTrades.index)
1
>>> len(t.closedTrades)
1
>>> t.positions.size
0
# reverse split
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[520])) # 6 P@2
>>> t.addPosition(Transaction(t.history.iloc[519])) # -6 P@3.5
>>> t.addPosition(Transaction(t.history.iloc[516])) # -6 P@3.5
>>> t.addPosition(Transaction(t.history.iloc[514])) # 6 P@3.5
>>> len(t.closedTrades.index)
2
>>> t.positions.size
0
# Symbol Change
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[46]))
"""
t = Transaction(row)
if t.loc["Transaction Subcode"] == "Buy to Open":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Sell to Close":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Buy to Close":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Sell to Open":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Assignment":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Expiration":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Reverse Split":
self.addPosition(t)
# That's incorrect. It's not really a sale
elif t.loc["Transaction Subcode"] == "Symbol Change":
self.addPosition(t)
else:
raise ValueError("unknown subcode for receive deliver: {}".format(
t.loc["Transaction Subcode"]))
def trade(self, row):
t = Transaction(row)
if t.loc["Transaction Subcode"] == "Buy to Open":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Sell to Close":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Buy to Close":
self.addPosition(t)
elif t.loc["Transaction Subcode"] == "Sell to Open":
self.addPosition(t)
else:
raise ValueError("unknown subcode for Trade:{}".format(
t.loc["Transaction Subcode"]))
def addPosition(self, transaction):
""" adds a position to the internal ledger. If it resolves against a previous position, profit and loss is calculated and recorded
# 2 LFIN calls open,
>>> t.addPosition(Transaction(t.history.iloc[333]))
>>> t.positions.iloc[0]["Symbol"]
'LFIN'
# one closing, absolute positions should be 1 afterwards
>>> t.addPosition(Transaction(t.history.iloc[328]))
>>> t.positions.iloc[0]["Quantity"]
1.0
>>> t.closedTrades.iloc[0]["Quantity"]
-1.0
# close this up and check if it's gone from the positions
>>> t.addPosition(Transaction(t.history.iloc[328]))
>>> t.positions.size
0
>>> t.closedTrades.iloc[1]["Quantity"]
-1.0
# add nearly equal position but with different strike
>>> t.addPosition(Transaction(t.history.iloc[333]))
>>> wrongStrike = Transaction(t.history.iloc[328])
>>> wrongStrike["Transaction Subcode"] = "Sell to Open"
>>> wrongStrike.Strike = 5
>>> wrongStrike.Quantity
1
>>> t.addPosition(wrongStrike)
>>> t.positions.iloc[0].Quantity
2.0
>>> t.positions.iloc[1].Quantity
1.0
>>> t.positions.iloc[1].Strike
5.0
# multiple options of the same type
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[238]))
>>> t.positions.iloc[0].Quantity
1.0
>>> t.addPosition(Transaction(t.history.iloc[237]))
>>> t.positions.iloc[0].Quantity
2.0
>>> t.addPosition(Transaction(t.history.iloc[236]))
>>> t.positions.iloc[0].Quantity
3.0
# 2x receive deliver
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[171]))
>>> t.addPosition(Transaction(t.history.iloc[144]))
>>> t.positions.iloc[0].Quantity
400.0
# blackberry BB
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[263]))
>>> t.addPosition(Transaction(t.history.iloc[249]))
>>> t.addPosition(Transaction(t.history.iloc[238]))
>>> t.addPosition(Transaction(t.history.iloc[237]))
>>> t.addPosition(Transaction(t.history.iloc[236]))
>>> t.addPosition(Transaction(t.history.iloc[229]))
>>> t.addPosition(Transaction(t.history.iloc[197]))
>>> t.addPosition(Transaction(t.history.iloc[171]))
>>> t.addPosition(Transaction(t.history.iloc[170]))
>>> t.addPosition(Transaction(t.history.iloc[168]))
>>> t.addPosition(Transaction(t.history.iloc[167]))
>>> t.addPosition(Transaction(t.history.iloc[161]))
>>> t.addPosition(Transaction(t.history.iloc[159]))
>>> t.addPosition(Transaction(t.history.iloc[144]))
>>> t.addPosition(Transaction(t.history.iloc[143]))
>>> t.addPosition(Transaction(t.history.iloc[141]))
>>> t.addPosition(Transaction(t.history.iloc[132]))
>>> t.addPosition(Transaction(t.history.iloc[130]))
>>> t.addPosition(Transaction(t.history.iloc[120]))
>>> t.addPosition(Transaction(t.history.iloc[119]))
>>> t.addPosition(Transaction(t.history.iloc[118]))
>>> t.addPosition(Transaction(t.history.iloc[106]))
>>> t.addPosition(Transaction(t.history.iloc[98]))
>>> t.addPosition(Transaction(t.history.iloc[97]))
>>> t.addPosition(Transaction(t.history.iloc[92]))
>>> t.addPosition(Transaction(t.history.iloc[85]))
>>> t.addPosition(Transaction(t.history.iloc[80]))
>>> t.addPosition(Transaction(t.history.iloc[78]))
>>> t.addPosition(Transaction(t.history.iloc[38]))
>>> t.addPosition(Transaction(t.history.iloc[37]))
>>> t.addPosition(Transaction(t.history.iloc[35]))
>>> t.addPosition(Transaction(t.history.iloc[32]))
>>> t.addPosition(Transaction(t.history.iloc[31]))
>>> t.addPosition(Transaction(t.history.iloc[30]))
>>> t.addPosition(Transaction(t.history.iloc[23]))
>>> t.addPosition(Transaction(t.history.iloc[22]))
>>> len(t.positions.index)
2
# LFIN again
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[679])) # Bought 2 LFIN 06/15/18 Call 40.00 @ 2.20
>>> t.positions.iloc[0].Amount
-440.0
>>> t.addPosition(Transaction(t.history.iloc[678])) # Sold 2 LFIN 06/15/18 Call 30.00 @ 7.10
>>> t.positions.iloc[1].Amount
1420.0
>>> t.addPosition(Transaction(t.history.iloc[676])) # Sell to Open 200 LFIN @ 30.00
>>> t.positions.iloc[2].Amount
6000.0
>>> t.addPosition(Transaction(t.history.iloc[675])) # Removal of option due to assignment Call @30
>>> len(t.positions.index) # removed
2
>>> t.closedTrades.iloc[0]["Amount"]
1420.0
>>> t.closedTrades.iloc[0]["Fees"]
2.324
>>> t.addPosition(Transaction(t.history.iloc[674])) # Sold 1 LFIN 06/15/18 Call 40.00 @ 16.78
>>> t.positions.iloc[0].Amount # only half the opening value
-220.0
>>> t.positions.iloc[0].Fees # only half the opening value
1.14
>>> t.positions.iloc[1].Amount
6000.0
>>> t.closedTrades.iloc[1]["Fees"]
1.3219999999999998
>>> t.addPosition(Transaction(t.history.iloc[673])) # Bought 100 LFIN @ 56.76
>>> t.positions.iloc[1].Amount # half remains open
3000.0
>>> t.positions.iloc[1].Fees # half remains open
2.582
>>> t.closedTrades.iloc[2].Amount # -3000 + 5676
-2676.0
>>> t.closedTrades.iloc[2].Fees # 2.582 + 0.08
2.662
>>> t.addPosition(Transaction(t.history.iloc[672])) # Bought 100 LFIN @ 57.20
>>> t.addPosition(Transaction(t.history.iloc[671])) # Sold 1 LFIN 06/15/18 Call 40.00 @ 17.15
>>> t.closedTrades["Amount"].sum()
-1023.0
>>> len(t.positions.index)
0
# SPRT
>>> t = Tasty("test/merged2.csv")
>>> t.addPosition(Transaction(t.history.iloc[21]))
>>> t.addPosition(Transaction(t.history.iloc[20]))
>>> t.addPosition(Transaction(t.history.iloc[19]))
>>> t.addPosition(Transaction(t.history.iloc[18]))
>>> t.addPosition(Transaction(t.history.iloc[17]))
>>> t.addPosition(Transaction(t.history.iloc[16])) # this is the last buy
>>> t.positions.iloc[0].Amount
-1934.9999999999998
>>> t.addPosition(Transaction(t.history.iloc[8]))
>>> t.addPosition(Transaction(t.history.iloc[7]))
>>> t.addPosition(Transaction(t.history.iloc[6]))
>>> t.addPosition(Transaction(t.history.iloc[5]))
>>> t.addPosition(Transaction(t.history.iloc[4]))
>>> t.addPosition(Transaction(t.history.iloc[3]))
>>> t.addPosition(Transaction(t.history.iloc[2]))
>>> len(t.positions.index)
0
>>> t.closedTrades["Amount"].sum()
469.99999999999994
"""
for index, row in self.positions.iterrows():
entry = Transaction(row)
if entry.getSymbol() == transaction.getSymbol() and entry.getType() == transaction.getType() and transaction.getQuantity() != 0 and (entry.getType() == PositionType.stock or entry.getStrike() == transaction.getStrike() and
entry.getExpiry() == transaction.getExpiry()):
trade = Transaction()
logging.info("{} found an open position: {} {} and adding {}".format(
entry.getDateTime(), entry.getQuantity(), entry.getSymbol(), transaction.getQuantity()))
if (transaction.getType() == PositionType.call or transaction.getType() == PositionType.put):
trade["Expiry"] = transaction.getExpiry()
trade["Strike"] = transaction.getStrike()
(newPositionQuantity, newTransactionQuantity, tradeQuantity) = Tasty._updatePosition(
entry.getQuantity(), transaction.getQuantity())
# percentage which is used in a trade
# percentage = (entry.getQuantity() / transaction.getQuantity)
percentageClosed = abs(tradeQuantity / entry.getQuantity())
trade["Amount"] = percentageClosed * \
entry["Amount"] + transaction["Amount"]
trade["AmountEuro"] = percentageClosed * \
entry["AmountEuro"] + transaction["AmountEuro"]
trade["Fees"] = percentageClosed * \
entry["Fees"] + transaction["Fees"]
trade["FeesEuro"] = percentageClosed * \
entry["FeesEuro"] + transaction["FeesEuro"]
trade["Symbol"] = transaction.getSymbol()
trade["callPutStock"] = transaction.getType()
trade["Opening Date"] = entry.getDateTime()
trade["Closing Date"] = transaction.getDateTime()
percentage = (transaction.getQuantity() -
tradeQuantity) / transaction.getQuantity()
entry["Amount"] = (1-percentageClosed) * entry["Amount"] + \
percentage * transaction["Amount"]
entry["AmountEuro"] = (1-percentageClosed) * entry["AmountEuro"] + \
percentage * transaction["AmountEuro"]
entry["Fees"] = (1-percentageClosed) * entry["Fees"] + \
percentage * transaction["Fees"]
entry["FeesEuro"] = (1-percentageClosed) * entry["FeesEuro"] + \
| |
sample and this is known.
outfile = open(fasta_out, 'w')
iter_fst = util.iter_fst
seq_counter = 0
for record in iter_fst(fasta_in):
sid = record[0][1:] # id
seq = record[1] # sequence
record[0] = '>' + sampleID + '_' + str(seq_counter)
outfile.write('\n'.join(record) + '\n')
seq_counter += 1
outfile.close()
def dereplicate_and_sort(fasta_in, fasta_out, OTU_database, separator, processing_summary_file, min_count):
# Dereplicate and sort sequences by size
print "[[ Dereplicating and sorting ]] Discarding sequences with fewer than " + str(min_count) + " reads"
os.system('python ~/scripts/3.dereplicate.py -f ' + fasta_in + " -s '" + separator + "' -o " + OTU_database + ' -d ' + fasta_out + ' -P ' + processing_summary_file + ' -M ' + str(min_count))
print "[[ Dereplicating and sorting ]] Complete."
return None
def remove_chimeras_and_cluster_OTUs(fasta_in, OTU_sequences_fasta, clustering_results, cluster_percentage=97.0, relabel=False):
# Remove chimeric sequences and then cluster OTUs with default similarity of 97%.
print "[[ Removing chimeras and clustering OTUs ]] ..."
max_cluster_diff = 100.0 - float(cluster_percentage)
if relabel == True:
os.system('/home/ubuntu/bin/usearch8 -cluster_otus ' + fasta_in + ' -otus ' + OTU_sequences_fasta + ' -otu_radius_pct ' + str(max_cluster_diff) + ' -sizein -uparseout ' + clustering_results + ' -relabel denovo')
else:
os.system('/home/ubuntu/bin/usearch8 -cluster_otus ' + fasta_in + ' -otus ' + OTU_sequences_fasta + ' -otu_radius_pct ' + str(max_cluster_diff) + ' -sizein -uparseout ' + clustering_results)
print "[[ Removing chimeras and clustering OTUs ]] Complete."
return None
def separate_GG_reads(fasta_dereplicated, OTU_GG_dict, output_GG_reads, output_denovo_reads):
# Takes as input a dereplicated set of reads and an OTU alignment dictionary, and separates the reads into GG-referenced and not GG-referenced
iter_fst = util.iter_fst
with open(output_GG_reads, 'w') as GGfid:
with open(output_denovo_reads, 'w') as dnfid:
for record in iter_fst(fasta_dereplicated):
sid = record[0][1:] # sequence ID
if sid in OTU_GG_dict:
GGfid.write('\n'.join(record) + '\n')
else:
newsid = sid
record[0] = record[0][0] + newsid
dnfid.write('\n'.join(record) + '\n')
def collapse_oligotypes(oligotype_table_filename, output_OTU_table):
# Collapse oligotypes onto OTUs (1.1, 1.2, 1.3 --> 1) and print an OTU table
x = pd.read_csv(oligotype_table_filename, sep='\t')
oligos = x['#Oligotype']
columns = x.columns
oligo_dict = {}
OTU_dict = {}
for i in range(len(oligos)):
counts = np.array(x.ix[i])
oligo_dict[oligos[i]] = counts[1:]
OTU_ID = str(str(oligos[i]).split('.')[0])
if OTU_ID not in OTU_dict:
OTU_dict[OTU_ID] = counts[1:]
else:
OTU_dict[OTU_ID] = OTU_dict[OTU_ID] + counts[1:]
# Write OTU table
with open(output_OTU_table, 'w') as fid:
firstline = 'OTU_ID' + '\t' + '\t'.join(columns[1:])
fid.write(firstline + '\n')
for OTU in OTU_dict:
abundance_string = '\t'.join(OTU_dict[OTU].astype(int).astype(str))
line = OTU + '\t' + abundance_string
fid.write(line + '\n')
def build_GG_OTU_table(dereplication_map, OTU_GG_dict, OTU_table_gg):
# Builds a GG-referenced OTU table from a list of sequences (FASTA), a dereplication map (from dereplicate_and_sort()) and a dictionary mapping the fasta sequence IDs to OTU GG IDs.
gg_dereplication_map = dereplication_map + '.gg'
OTU_GG_dict_new = {}
for key in OTU_GG_dict:
OTU_GG_dict_new[key.split(';')[0]] = OTU_GG_dict[key]
# write a new dereplication map which has only the seqs that mapped to GGID
# also store this info in gg_derep_dict, which has {sampleID: {original_OTU: count}}
gg_derep_dict = defaultdict(lambda: defaultdict(str))
with open(gg_dereplication_map,'w') as newfid:
with open(dereplication_map,'r') as fid:
derepmap = fid.readlines()
new_derepmap = [line for line in derepmap if line.split()[0] in OTU_GG_dict_new]
for line in new_derepmap:
linespl = line.split('\t')
# First, grab the original ID and sample map info to update the gg_derep_dict (bc dicts can't handle having duplicate keys)
seqid = linespl[0]
smpl_map = linespl[1].split(' ')
smpl_map = {s.split(':')[0]: s.split(':')[1] for s in smpl_map}
for sid in smpl_map:
gg_derep_dict[sid][seqid] = smpl_map[sid]
# Now update the line with new OTU GG ID and write to gg derep file
linespl[0] = OTU_GG_dict_new[linespl[0]]
linejoined = '\t'.join(linespl)
newfid.write(linejoined)
# write GG OTU table
with open(OTU_table_gg, 'w') as f:
gg_otus = list(set(OTU_GG_dict.values()))
f.write('sample\t' + '\t'.join(gg_otus) + '\n')
for smpl in gg_derep_dict:
f.write(smpl + '\t')
seq_dict = dict.fromkeys(gg_otus, 0)
for orig_otu in gg_derep_dict[smpl]:
new_otu = OTU_GG_dict_new[orig_otu]
seq_dict[new_otu] = seq_dict[new_otu] + int(gg_derep_dict[smpl][orig_otu])
counts = [str(seq_dict[otu]) for otu in gg_otus]
f.write('\t'.join(counts) + '\n')
def parse_multihit_alignment(uc_file):
# Parses a UC file from a usearch alignment where -maxaccepts is set to 10, i.e. there are a maximum of ten hits per query.
with open(uc_file, 'r') as fid:
all_lines = fid.readlines()
alignment_dict = {}
for line in all_lines:
query = line.split()[8]
if query not in alignment_dict.keys():
alignment_dict[query] = []
hit = line.split()[9]
alignment_dict[query].append(hit)
return alignment_dict
def parse_multihit_alignment_test(uc_file):
# Parses a UC file from a usearch alignment where -maxaccepts is set to 10, i.e. there are a maximum of ten hits per query.
with open(uc_file, 'r') as fid:
all_lines = fid.readlines()
alignment_dict = {}
sum1 = 0
for line in all_lines:
print str(sum1) + " / " + str(len(all_lines))
sum1 += 1
query = line.split()[8].split(';')[0]
if query not in alignment_dict.keys():
alignment_dict[query] = []
hit = line.split()[9]
alignment_dict[query].append(hit)
return alignment_dict
def parse_alignment(alignment_file):
# Parses an alignment of OTU sequences against a reference database. Returns a dict of query indices (OTU IDs) and associated reference database IDs: {'OTU_ID': DB_ID}
with open(alignment_file, 'r') as fid:
all_lines = fid.readlines()
line_nums = [i for i in range(len(all_lines)) if all_lines[i][:6] == " Query"]
alignment_dict = {}
for line_num in line_nums:
line1 = all_lines[line_num]
line2 = all_lines[line_num + 1]
query = line1.split()[2]
query = query[1:]
target = line2.split()[2]
target = target[1:]
alignment_dict[query] = target
return alignment_dict
def renumber_sequences(fasta_files, separator):
# Renumbers sequences IDs for each sample so sampleID_1, sampleID_2 etc. only occur once
def update_numbers(fasta_in, fasta_out, sample_counts):
iter_fst = util.iter_fst
with open(fasta_out, 'w') as outfile:
for record in iter_fst(fasta_in):
sid = record[0][1:] # id
sid = sid.split(separator)
sampleID = ''.join(sid[:len(sid)-1])
seq = record[1] # sequence
sample_count = sid[len(sid)-1]
sample_counts[sampleID] = sample_counts.get(sampleID, 0) + 1 # Increment sample count
new_sid = '>%s_%d' %(sampleID, sample_counts[sampleID])
record[0] = new_sid
outfile.write('\n'.join(record) + '\n')
return sample_counts
print "[[ Renumbering sequences ]] ..."
sample_counts = {}
for filename in fasta_files:
sample_counts = update_numbers(filename, filename + '.tmp', sample_counts)
os.system('mv ' + filename + '.tmp ' + filename)
print "[[ Renumbering sequences ]] Complete."
def pull_counts(raw_sequence_filename, separator):
# Builds a dict of counts of each unique sequence in each sample, x[seq][sampleID]
x = {}
fn = raw_sequence_filename
iter_fst = util.iter_fst
samples = []
for record in iter_fst(fn):
[sid, seq] = record[:2]
sid = sid[1:]
sa = sid.split(separator)
sa = separator.join(sa[:len(sa)-1])
if seq not in x:
x[seq] = {}
if sa not in x[seq]:
x[seq][sa] = 0
if sa not in samples:
samples.append(sa)
x[seq][sa] += 1
return x, samples
def compute_oligotype_table(raw_trimmed, raw_dereplicated, clustering_file, separator, oligotype_table_filename):
# Inputs:
# 'raw_trimmed' = raw reads, trimmed to final form (FASTA)
# 'raw_dereplicated' = dereplicated reads (FASTA)
# 'clustering_file' = output clustering file from 'usearch8 -cluster_otus' (-parseout option)
# 'separator' = separator character, e.g. '_' for '>sampleID_sequenceNumber' sequence IDs
# 'oligotype_table_filename' = output filename
#
# 1. get sequence counts
# x has x[seq][sample] = count (of that sequence in that sample),
# where seq is the ATCG sequence in raw_trimmed fasta file
x, samples = pull_counts(raw_trimmed, separator)
# 2. populate sequence lookup
sequence_lookup = bidict({}) # seq <--> seqID
iter_fst = util.iter_fst
for record in iter_fst(raw_dereplicated):
[sid, seq] = record[:2]
sid = int(sid[1:].split(';')[0])
sequence_lookup[seq] = sid
# 3. Populate clustering_lookup (from otu_clustering.tab)
clustering_lookup = {} # seqID <--> 'otu' or 'match'
OTU_lookup = {} # seqID <--> OTU_ID centroid
with open(clustering_file, 'r') as fid:
all_lines = fid.readlines()
for line in all_lines:
split_line = line.split()
seqID = int(split_line[0].split(';')[0])
clustering_lookup[seqID] = split_line[1]
if split_line[1] == 'match' or split_line[1] == "otu":
OTU_lookup[seqID] = split_line[4]
# 4. Populate dictionaries with each sequence within an OTU. Each of the three dictionaries contain lists whose
# entries are ordered in the same manner in each dict.
OTU_oligos = {} # OTU_ID <--> ['ACAGT','ACAAT', ...]
OTU_original_seqIDs = {} # OTU_ID <--> [seqID1, seqID2, ...] (original sequence IDs for oligotypes)
OTU_oligo_IDs = {} # OTU_ID <--> [0, 1, ...] (oligotype IDs)
for seq in sequence_lookup:
seqID = sequence_lookup[seq]
if clustering_lookup[seqID] != "chimera":
OTU_centroid = OTU_lookup[seqID]
if OTU_centroid not in OTU_oligos:
OTU_oligos[OTU_centroid] = []
OTU_oligo_IDs[OTU_centroid] = []
OTU_original_seqIDs[OTU_centroid] = []
if seq not in OTU_oligos[OTU_centroid]:
OTU_oligos[OTU_centroid].append(seq)
OTU_original_seqIDs[OTU_centroid].append(seqID)
if len(OTU_oligo_IDs[OTU_centroid]) > 0:
OTU_oligo_IDs[OTU_centroid].append(OTU_oligo_IDs[OTU_centroid][-1] + | |
return
def adsorbate_placement(system, molecule_file, ads_vector):
coords, coords2, atom_type, ads_frac, com_frac, mol_com_rotated = [], [], [], [], [], []
# Read a pre-defined molecule file
mol = Molecule.from_file(molecule_file)
com_xyz = mol.center_of_mass # get CoM in cartesian
diff_com_ads_vector = np.array([c - a for c, a in zip(com_xyz, ads_vector)])
# shift com of molecule to the position of ads_vector
mol.translate_sites([i for i in range(0, len(mol))], -diff_com_ads_vector)
# RANDOM ROTATION OF MOLECULE AROUND CoM
mol_com_origin, rotated_xyz = [], []
# mol = Molecule(atom_type, coords2)
com_origin = mol.center_of_mass
mol.translate_sites([i for i in range(0, len(mol))], -com_origin)
# for line in coords2:
# x = float(line[0]) - float(com_origin[0])
# y = float(line[1]) - float(com_origin[1])
# z = float(line[2]) - float(com_origin[2])
# mol_com_origin.append([x, y, z])
# building rotation matrix R
R = np.zeros((3, 3), float)
R[:, 0] = RandomNumberOnUnitSphere()
m = RandomNumberOnUnitSphere()
R[:, 1] = m - np.dot(m, R[:, 0]) * R[:, 0] # subtract of component along co1 so it is orthogonal
R[:, 1] = R[:, 1] / np.linalg.norm(R[:, 1])
R[:, 2] = np.cross(R[:, 0], R[:, 1])
R = R.tolist()
# rotate the molecule
rotated_xyz = np.dot(mol.cart_coords, R) + com_origin
rotated_moledule = Structure(system.lattice, mol.species, rotated_xyz,
coords_are_cartesian=True)
# put adsorbate inside the simulation cell in fractional coordinates
inverse_matrix = system.lattice.inv_matrix
for j, line in enumerate(rotated_xyz):
s_x = inverse_matrix[0][0] * line[0] + inverse_matrix[0][1] * line[1] + inverse_matrix[0][2] * line[2]
s_y = inverse_matrix[1][0] * line[0] + inverse_matrix[1][1] * line[1] + inverse_matrix[1][2] * line[2]
s_z = inverse_matrix[2][0] * line[0] + inverse_matrix[2][1] * line[1] + inverse_matrix[2][2] * line[2]
ads_frac.append([float(s_x), float(s_y), float(s_z)])
atom_type = [str(e) for e in rotated_moledule.species]
return rotated_moledule.frac_coords, atom_type
def adsorbate_framework_overlap(system, com_frac, atom_type):
for i, dist in enumerate(com_frac):
distances = system.lattice.get_all_distances(com_frac[i],
system.frac_coords)
for j, dist2 in enumerate(distances[0]):
if (dist2 - (ap.get_vdf_radius(str(atom_type[i])) + ap.get_vdf_radius(str(system.species[j])))) < -1e-4:
return True
return False
def add_adsorbate_simple(system, oms_index, molecule_file):
ads_dist = 3.0
ads_vector = []
overlap = True
counter = 0
print("Initial adsorption site is ", ads_dist, "Å away from OMS.")
while overlap is True:
counter += 1
print("insertion attempts: ", counter)
# find a position *ads_dist* away from oms
ads_vector = find_adsorption_site(system,
system.cart_coords[oms_index],
ads_dist) # cartesian coordinate as an output
ads_frac, atom_type = adsorbate_placement(system, molecule_file,
ads_vector)
overlap = adsorbate_framework_overlap(system, ads_frac, atom_type)
ads = Structure(system.lattice, atom_type, ads_frac)
mof_with_adsorbate = merge_structures(ads, system)
cif = CifWriter(mof_with_adsorbate)
cif.write_file(str(ads_dist) + str(counter) + '.cif')
if overlap is True:
if counter > 4:
ads_dist += 0.5 # increase the distance from adsorption site by 0.5 Å
counter = 0 # reset the counter
print("New Site is ", ads_dist, "Å away from OMS.")
else:
continue
else:
break
mol = Structure(system.lattice, atom_type, ads_frac)
return mol
def RandomNumberOnUnitSphere():
thetha = 0.0
phi = 0.0
theta = 2*PI*np.random.random_sample()
phi = np.arccos(2*np.random.random_sample()-1.0)
x = np.cos(theta)*np.sin(phi)
y = np.sin(theta)*np.sin(phi)
z = np.cos(phi)
return x, y, z
def find_adsorption_site(system, center, prob_dist):
# find the adsorption site by maximizing the distance
# from all the atoms while keep the distance fixed
# at some predefined distance
tries = 1000
sum_distance = []
probe_positions = []
for i in range(0, tries):
# probe_pos=generate_random_position(system.cart_coords[0],center,(prob_dist)-atom.get_vdf_radius(center_e))
probe_pos = generate_random_position(center, prob_dist)
probe_pos_f = system.lattice.get_fractional_coords(probe_pos)
sum_distance.append(sum([1.0/(r**12)
for r in system.
lattice.
get_all_distances(probe_pos_f,
system.frac_coords)[0]]))
probe_positions.append(probe_pos)
new_position = probe_positions[sum_distance.index(min(sum_distance))]
return new_position
def calc_plane(x, y, z):
v1 = [y[0] - x[0], y[1] - x[1], y[2] - x[2]]
v2 = [z[0] - x[0], z[1] - x[1], z[2] - x[2]]
cross_product = [v1[1]*v2[2]-v1[2]*v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0]]
d = cross_product[0] * x[0] - cross_product[1] * x[1] \
+ cross_product[2] * x[2]
a = cross_product[0]
b = cross_product[1]
c = cross_product[2]
plane_v = [a, b, c]
plane_v_norm = plane_v/np.linalg.norm(plane_v)
return plane_v_norm
def check_if_plane_on_metal(m_i, indeces, system, tolerance):
crit = 180
# Set to 12.5 so that ferocene type coordination spheres are dected
# correctly. eg. BUCROH
tol = tolerance['plane_on_metal'] # 12.5
# tol = 25.0
for i in range(1, len(indeces)):
for j in range(1, len(indeces)):
for k in range(1, len(indeces)):
if i == j or i == k or j == k:
pass
else:
dihedral = abs(system.get_dihedral(m_i, indeces[i],
indeces[j], indeces[k]))
if abs(dihedral-crit) < tol or abs(dihedral-crit+180) < tol:
return True
return False
def check_positive(n_list):
for n in n_list:
if n > 0:
return True
def check_negative(n_list):
for n in n_list:
if n < 0:
return True
def find_other_indeces(indeces, num):
other_indeces = []
for index in range(0, num):
if index not in indeces:
other_indeces.append(index)
return other_indeces
def center_around_metal(system):
# return system
center = system.frac_coords[0]
tmp1 = []
tmp2 = []
tmp1.append(str(system.species[0]))
tmp2.append([system.frac_coords[0][0], system.frac_coords[0][1],
system.frac_coords[0][2]])
system_centered = Structure(system.lattice, tmp1, tmp2)
for i in range(1, system.num_sites):
c_i = system.frac_coords[i]
dist_vector = center-c_i
dist_vector_r = []
for j in range(0, 3):
dist_vector_r.append(round(dist_vector[j]))
dist_before = np.linalg.norm(system.lattice.get_cartesian_coords(center)
- system.lattice.get_cartesian_coords(c_i))
c_i_centered = c_i+dist_vector_r
dist_after = np.linalg.norm(system.lattice.get_cartesian_coords(center)
- system.lattice.
get_cartesian_coords(c_i_centered))
if dist_after > dist_before:
for j in range(0, 3):
dist_vector_r[j] = np.rint(dist_vector[j])
c_i_centered = c_i+dist_vector_r
if dist_after > dist_before:
c_i_centered = c_i
system_centered.append(system.species[i], c_i_centered)
return system_centered
def merge_structures(s1, s2):
sites = []
posistions = []
for e, c in zip(s1.species, s1.frac_coords):
sites.append(e)
posistions.append(c)
for e, c in zip(s2.species, s2.frac_coords):
sites.append(e)
posistions.append(c)
if s1.lattice != s2.lattice:
sys.exit('Trying to merger two structures with different lattices')
return Structure(s1.lattice, sites, posistions)
def get_metal_surface_areas(metal, system):
sa_list = []
for m, m_coor in enumerate(metal.frac_coords):
#make a structure of atoms withn 7.0 the metal
sub_system = make_subsystem(m_coor, system, 5.0)
sa_list.append(get_metal_surface_area(m_coor,
str(metal.species[m]),
sub_system))
s_max = max(sa_list)
return s_max
def make_subsystem(coord, system, dist_check):
distances = system.lattice.get_all_distances(coord, system.frac_coords)
coords = []
elements = []
for i, dist in enumerate(distances[0]):
if dist < dist_check:
if dist > 0.1: # exclude the central metal
elements.append(system.species[i])
coords.append(system.frac_coords[i])
return Structure(system.lattice, elements, coords)
def get_metal_surface_area(fcenter, metal_element, system):
center = system.lattice.get_cartesian_coords(fcenter)
vdw_probe = 1.86 # 1.52 #0.25 #2.1 #1.52 #vdw radius for oxygen
# use 0.0 for vdw_probe to get sphere of metal
metal_full_surface_area = sphere_area(metal_element, vdw_probe)
count = 0
mc_tries = 5000
params_file = open('test.txt', 'w')
for i in range(0, mc_tries):
dist = ap.get_vdf_radius(metal_element)+vdw_probe
pos = generate_random_position(center, dist) # vdw_probe
# pos=generate_random_position(center,metal_element,vdw_probe) #vdw_probe
print('xx', pos[0], pos[1], pos[2], file=params_file)
pos_f = system.lattice.get_fractional_coords(pos)
if not check_for_overlap(center, pos_f, system, vdw_probe):
count += 1
sa_frac = float(count)/float(mc_tries) # metal_full_surface_area
sa = metal_full_surface_area*sa_frac
return sa_frac, sa
def check_for_overlap(center, pos, system, r_probe):
# pos=[0.0,0.0,0.0]
# print 'N 1.0',pos[0],pos[1],pos[2],'biso 1.0 N'
distances = system.lattice.get_all_distances(pos, system.frac_coords)
for i, dist in enumerate(distances[0]):
# if not check_if_center(center,system.cart_coords[i],system):
# print dist-r_probe+atom.get_vdf_radius(str(system.species[i]))
# input()
if (dist - (r_probe+ap.get_vdf_radius(str(system.species[i])))) < -1e-4:
return True
return False
def check_if_center(center, test_coords, system):
distances = system.lattice.get_all_distances(test_coords, center)
if distances[0] < 0.1:
return True
else:
return False
def sphere_area(metal_element, probe_r):
r = ap.get_vdf_radius(metal_element)
r = r+probe_r
return 4*math.pi*r*r
def generate_random_position(center, dist):
r = generate_random_vector()
# dist=atom.get_vdf_radius(metal)+vdw_probe
pos = list(rr*dist for rr in r)
pos = [i+j for i, j in zip(pos, center)]
return pos
def generate_random_vector():
zeta_sq = 2.0
ran_vec = []
while zeta_sq > 1.0:
xi1 = random.random()
xi2 = random.random()
zeta1 = 1.0-2.0*xi1
zeta2 = 1.0-2.0*xi2
zeta_sq = (zeta1*zeta1+zeta2*zeta2)
ranh = 2.0*math.sqrt(1.0-zeta_sq)
ran_vec.append(zeta1*ranh)
ran_vec.append(zeta2*ranh)
ran_vec.append(1.0-2.0*zeta_sq)
return ran_vec
def find_coordination_sequence(center, structure, all_coord_spheres):
"""computes the coordination sequence up to the Nth coordination shell
as input it takes the MOF as a pymatgen Structure and the index of the
center metal in the Structure
"""
# dist_all = structure.lattice.get_all_distances(structure.frac_coords,
# structure.frac_coords)
# all_coord_spheres = []
# for a in range(0, len(structure)):
# all_coord_spheres.append(find_coord_sphere_using_dist(a, structure, dist_all[a])[0])
# The shell_list is a set with the index of each atom and its unit
# cell index realtive to a cetral unit cell
shell_list = {(center, (0, 0, 0))}
shell_list_prev = set([])
all_shells = set(shell_list)
n_shells = 6
cs = []
ele = [(str(structure.species[center]))]
coords = [[structure.frac_coords[center][0],
structure.frac_coords[center][1],
structure.frac_coords[center][2]]]
# coordination_structure = (Structure(structure.lattice, ele, coords))
coord_sphere_time = 0.0
count_total = 0
for n in range(0, n_shells):
c_set = set([])
for a_uc in shell_list:
a = a_uc[0]
lattice = a_uc[1]
t0 = time.time()
#TODO make finding coordination sphere faster
# coord_sphere = find_coord_sphere_using_dist(a, structure,
# dist_all[a])[0]
# print(a)
# print(coord_sphere)
# print(find_coord_sphere_using_dist(a, structure,
# dist_all[a])[1])
# input()
coord_sphere = all_coord_spheres[a]
count_total += 1
t1 = time.time()
coord_sphere_time += t1-t0
coord_sphere_with_uc = | |
type: (...) -> Iterable["models.CollectionOfProfilePhoto"]
"""Get photos from users.
Get photos from users.
:param user_id: key: id of user.
:type user_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~users.models.Enum93]
:param select: Select properties to be returned.
:type select: list[str or ~users.models.Enum94]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfProfilePhoto or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~users.models.CollectionOfProfilePhoto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfProfilePhoto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_photos.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfProfilePhoto', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_photos.metadata = {'url': '/users/{user-id}/photos'} # type: ignore
def create_photos(
self,
user_id, # type: str
body, # type: "models.MicrosoftGraphProfilePhoto"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphProfilePhoto"
"""Create new navigation property to photos for users.
Create new navigation property to photos for users.
:param user_id: key: id of user.
:type user_id: str
:param body: New navigation property.
:type body: ~users.models.MicrosoftGraphProfilePhoto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphProfilePhoto, or the result of cls(response)
:rtype: ~users.models.MicrosoftGraphProfilePhoto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphProfilePhoto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_photos.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphProfilePhoto')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphProfilePhoto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_photos.metadata = {'url': '/users/{user-id}/photos'} # type: ignore
def get_photos(
self,
user_id, # type: str
profile_photo_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum95"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphProfilePhoto"
"""Get photos from users.
Get photos from users.
:param user_id: key: id of user.
:type user_id: str
:param profile_photo_id: key: id of profilePhoto.
:type profile_photo_id: str
:param select: Select properties to be returned.
:type select: list[str or ~users.models.Enum95]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphProfilePhoto, or the result of cls(response)
:rtype: ~users.models.MicrosoftGraphProfilePhoto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphProfilePhoto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_photos.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'profilePhoto-id': self._serialize.url("profile_photo_id", profile_photo_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphProfilePhoto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_photos.metadata = {'url': '/users/{user-id}/photos/{profilePhoto-id}'} # type: ignore
def update_photos(
self,
user_id, # type: str
profile_photo_id, # type: str
body, # type: "models.MicrosoftGraphProfilePhoto"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property photos in users.
Update the navigation property photos in users.
:param user_id: key: id of user.
:type user_id: str
:param profile_photo_id: key: id of profilePhoto.
:type profile_photo_id: str
:param body: New navigation property values.
:type body: ~users.models.MicrosoftGraphProfilePhoto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_photos.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'profilePhoto-id': self._serialize.url("profile_photo_id", profile_photo_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphProfilePhoto')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_photos.metadata = {'url': '/users/{user-id}/photos/{profilePhoto-id}'} # type: ignore
def delete_photos(
self,
user_id, # type: str
profile_photo_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property photos for users.
Delete navigation property photos for users.
:param user_id: key: id of user.
:type user_id: str
:param profile_photo_id: key: id of profilePhoto.
:type profile_photo_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_photos.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'profilePhoto-id': self._serialize.url("profile_photo_id", profile_photo_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_photos.metadata = {'url': '/users/{user-id}/photos/{profilePhoto-id}'} # type: ignore
def list_registered_devices(
self,
user_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum96"]]]
select=None, # type: Optional[List[Union[str, "models.Enum97"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfDirectoryObject4"]
"""Get registeredDevices from users.
Get registeredDevices from users.
:param user_id: key: | |
<filename>src/mushme.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from src import app
import os
import shutil
from flask import Flask, render_template, session, request, flash, url_for, redirect
from Forms import ContactForm, LoginForm, editForm, ReportForm, CommentForm, searchForm, AddPlaylist
from flask.ext.mail import Message, Mail
from werkzeug import secure_filename
from werkzeug import SharedDataMiddleware
from api import API
from songs import SONG
from playlist import playlist
from admin import admin
from artist import artist
import pymysql
import hashlib
from flask import g
mail = Mail()
mail.init_app(app)
#For the collector script.
app.register_blueprint(API);
#For the songs
app.register_blueprint(SONG);
#For the playlist
app.register_blueprint(playlist);
#for the admin pages
app.register_blueprint(admin);
#for the artist pages
app.register_blueprint(artist);
UPLOAD_FOLDER = "img/ProfilePic/"
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = 'src/static/' + UPLOAD_FOLDER
@app.route('/')
def index():
session["login"] = False
session["signup"] = False
session["logged_in"] = False
return render_template('homepage/index.html', form1=LoginForm(prefix='form1'), form2=ContactForm(prefix='form2'))
#For database connections.
@app.before_request
def before_request():
g.conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='<PASSWORD>', db='MuShMe', charset='utf8')
g.database = g.conn.cursor()
@app.teardown_request
def teardown_request(exception):
g.conn.close()
@app.route('/login', methods=['POST'])
def login():
session["login"] = True
session["signup"] = False
if request.method == 'POST':
loginform = LoginForm(request.form, prefix='form1')
if loginform.validate_on_submit():
check_login = g.database.execute("""SELECT User_id from MuShMe.entries WHERE Email_id="%s" AND Pwdhash="%s" """ %
(loginform.email.data, hashlib.sha1(loginform.password.data).hexdigest()))
if check_login:
userid= g.database.fetchone()
g.database.execute("""UPDATE MuShMe.entries SET Last_Login=CURRENT_TIMESTAMP() WHERE User_id="%s" """ % (userid))
g.conn.commit()
for uid in userid:
session['userid'] = uid
g.database.execute("""SELECT Username from MuShMe.entries WHERE User_id="%s" """ % uid )
session['UserName']=g.database.fetchone()[0]
g.database.execute("""SELECT Privilege FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['privilege'] = g.database.fetchone()[0]
g.database.execute("""SELECT Profile_pic FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['profilepic'] = g.database.fetchone()[0]
g.database.execute("""SELECT Name from MuShMe.entries WHERE User_id="%s" """ % uid )
session["Name"]=g.database.fetchone()
g.database.execute("""SELECT DOB from MuShMe.entries WHERE User_id="%s" """ % uid )
session["dob"]=str(g.database.fetchone())
session['logged_in'] = True
session['logged_in']=True
#print uid
#print userid
return redirect(url_for('userProfile', userid=uid))
else:
flash("Incorrect Email-Id or Password")
else:
flash("Incorrect Email-Id or Password")
return render_template('homepage/index.html', form1=loginform, form2=ContactForm(prefix='form2'))
else:
return redirect(url_for(('index')))
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
))
@app.route('/signup', methods=['POST'])
def signup():
session["signup"] = True
session["login"] = False
contactform = ContactForm(request.form, prefix='form2')
if contactform.validate_on_submit():
if validate(contactform.email.data,contactform.username.data):
check_signup = g.database.execute("""INSERT into MuShMe.entries (Username,Email_id,Pwdhash,Name) VALUES ("%s","%s","%s","%s")""" %
(contactform.username.data,
contactform.email.data,
hashlib.sha1(contactform.password.data).hexdigest(),contactform.name.data,
))
if check_signup:
g.conn.commit()
g.database.execute("""SELECT User_id from MuShMe.entries WHERE Email_id="%s" AND Pwdhash="%s" """ %
(contactform.email.data, hashlib.sha1(contactform.password.data).hexdigest()))
user_id = g.database.fetchone()
for uid in user_id:
session['userid'] = uid
g.database.execute("""SELECT Username from MuShMe.entries WHERE User_id="%s" """ % uid )
session['UserName']=g.database.fetchone()[0]
g.database.execute("""SELECT Privilege FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['privilege'] = g.database.fetchone()[0]
g.database.execute("""SELECT Profile_Pic FROM MuShMe.entries WHERE User_id="%s" """ % uid)
session['profilepic'] = g.database.fetchone()[0]
session['logged_in'] = True
g.database.execute("""SELECT Name from MuShMe.entries WHERE User_id="%s" """ % uid )
session["Name"]=g.database.fetchone()
g.database.execute("""SELECT DOB from MuShMe.entries WHERE User_id="%s" """ % uid )
session["dob"]=str(g.database.fetchone())
newPlaylist = session['UserName'] + ' default collection'
g.database.execute("""INSERT INTO MuShMe.playlists (Playlist_name, User_id) VALUES ("%s","%s")""" % (newPlaylist,uid))
g.conn.commit()
return redirect(url_for('userProfile',userid=uid))
else:
flash("Please enter valid data !")
else:
flash("Username or Email has been taken")
else:
flash_errors(contactform)
return render_template('homepage/index.html', form1=LoginForm(prefix='form1'), form2=contactform)
def validate(email,username):
email = g.database.execute(""" SELECT * from MuShMe.entries where Email_id="%s" """ % email)
name = g.database.execute(""" SELECT * from MuShMe.entries where Username="%s" """ % username)
if email or name:
return False
else:
return True
@app.route('/user/<userid>',methods=['GET'])
def userProfile(userid):
if session['logged_in'] == False:
return render_template('error.html'), 404
else:
if request.method == 'GET':
User=getUserData(userid)
return render_template('userprofile/index.html', userid=userid,
form4=CommentForm(prefix='form4'), form3=editForm(prefix='form3'),
form6=searchForm(prefix='form6'), form5=ReportForm(prefix='form5'),form7=AddPlaylist(prefix='form7'),
friend=getFriend(userid), playlist=getPlaylist(userid), User=getUserData(userid), Comments=getComments(userid),
songs=getSong(userid), Recommends=getRecommend(userid), Requests=getRequest(userid),frnd=checkFriend(userid,User),
AllComments=getAllComments(userid), AllRecommends=getAllRecommend(userid))
def checkFriend(userid,User):
friendName =[]
g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" """ % (userid))
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
friendName.append(data)
for f in friendName:
a=g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" and User_id2="%s" """ % (userid,f['friendid']))
b=g.database.execute("""SELECT User_id2 from friends WHERE User_id2="%s" and User_id1="%s" """ % (userid,f['friendid']))
if a or b:
return True
elif userid == f['friendid']:
return True
else:
return False
g.database.execute("""SELECT User_id1 from friends WHERE User_id2="%s" """ % userid)
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
friendName.append(data)
for f in friendName:
a=g.database.execute("""SELECT User_id2 from friends WHERE User_id2="%s" and User_id1="%s" """ % (userid,f['friendid']))
b=g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" and User_id2="%s" """ % (userid,f['friendid']))
if a or b:
return True
elif userid == f['friendid']:
return True
else:
return False
def getAllComments(userid):
g.database.execute("SELECT Comment_id FROM user_comments WHERE User_id=%s ORDER BY Comment_id DESC" % (userid))
commentids = g.database.fetchall()
retval = []
for commentid in commentids:
g.database.execute("SELECT Comment, User_id FROM comments WHERE Comment_id=%s", (commentid[0]))
commentdata = g.database.fetchone()
data = {}
data['comment'] = commentdata[0]
data['userid'] = commentdata[1]
data['commentid'] = commentid[0]
g.database.execute("SELECT Username FROM entries WHERE User_id=%s", (data['userid']))
data['username'] = g.database.fetchone()[0]
retval.append(data)
return retval
def getComments(userid):
g.database.execute("SELECT Comment_id FROM user_comments WHERE User_id=%s ORDER BY Comment_id DESC LIMIT 5" % (userid))
commentids = g.database.fetchall()
retval = []
for commentid in commentids:
g.database.execute("SELECT Comment, User_id FROM comments WHERE Comment_id=%s", (commentid[0]))
commentdata = g.database.fetchone()
data = {}
data['comment'] = commentdata[0]
data['userid'] = commentdata[1]
data['commentid'] = commentid[0]
g.database.execute("SELECT Username FROM entries WHERE User_id=%s", (data['userid']))
data['username'] = g.database.fetchone()[0]
retval.append(data)
return retval
def getFriend(userid):
friendName =[]
g.database.execute("""SELECT User_id2 from friends WHERE User_id1="%s" """ % userid)
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id, Profile_pic from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
data['friendpic']=a[2]
friendName.append(data)
g.database.execute("""SELECT User_id1 from friends WHERE User_id2="%s" """ % userid)
for user in g.database.fetchall():
data = {}
g.database.execute("""SELECT Username, User_id, Profile_pic from MuShMe.entries WHERE User_id="%s" """ % user[0])
for a in g.database.fetchall():
data['friendname']=a[0]
data['friendid']=a[1]
data['friendpic']=a[2]
friendName.append(data)
print friendName
return friendName
def getPlaylist(userid):
playlist = []
g.database.execute("""SELECT Playlist_name,Playlist_id from MuShMe.playlists WHERE User_id="%s" """ % userid)
for p in g.database.fetchall():
data = {}
data['pname']=p[0]
data['pid']=p[1]
playlist.append(data)
return playlist
def getSong(userid):
songName = []
g.database.execute("""SELECT Song_id from MuShMe.user_song WHERE User_id=%s LIMIT 5""" % userid)
for song in g.database.fetchall():
data = {}
g.database.execute("""SELECT Song_title,Song_id,Song_Album from MuShMe.songs WHERE Song_id="%s" """ % song)
for a in g.database.fetchall():
data['songname']=a[0]
data['songid']=a[1]
g.database.execute("SELECT Album_pic FROM albums WHERE Album_id=%s " % (a[2]))
g.conn.commit()
data['art'] = g.database.fetchone()[0]
songName.append(data)
return songName
def getUserData(userid):
User = []
g.database.execute(""" SELECT Username,User_id,Profile_pic,Privilege,Email_id,Name,DOB from entries where User_id="%s" """ % userid)
for a in g.database.fetchall():
data={}
data['username']=a[0]
data['userid']=a[1]
data['profilepic'] = a[2]
data['privilege']=a[3]
data['email']=a[4]
data['name']=a[5]
data['dob']=str(a[6])
User.append(data)
return User
def getAllRecommend(userid):
recommend =[]
g.database.execute(""" SELECT Recommend_id,User_id_from,User_id_to from recommend where User_id_to="%s" """ % userid)
for a in g.database.fetchall():
data={}
data['rid']=a[0]
data['userfrom'] = a[1]
data['userto']=a[2]
g.database.execute(""" SELECT Username from entries where User_id='%s' """ % a[1])
data['userfromname'] = g.database.fetchone()[0]
check_song = g.database.execute(""" SELECT Song_id from recommend_songs where Recommend_id="%s" """ % a[0])
if check_song:
songid = g.database.fetchone()[0]
data['song'] = []
g.database.execute(""" SELECT Song_title,Song_Album,Genre,Publisher from songs where Song_id="%s" """ % songid)
for song in g.database.fetchall():
d = {}
d['title']=song[0]
d['album'] = song[1]
d['genre'] = song[2]
d['publisher'] = song[3]
d['songid'] = songid
data['song'].append(d)
check_playlist = g.database.execute(""" SELECT Playlist_id from recommend_playlists where Recommend_id="%s" """ % a[0])
if check_playlist:
playlistid = g.database.fetchone()[0]
data['playlist'] = []
g.database.execute(""" SELECT Playlist_name,Playlist_id,User_id from playlists where Playlist_id="%s" """ % playlistid)
for p in g.database.fetchall():
d= {}
d['pname']=p[0]
d['pid']=p[1]
g.database.execute(""" SELECT Username, Name,User_id from MuShMe.entries WHERE User_id="%s" """ % p[2])
for k in g.database.fetchall():
d['username']=k[0]
d['uname']=k[1]
d['userid']=k[2]
data['playlist'].append(d)
recommend.append(data)
return recommend
def getRecommend(userid):
recommend =[]
g.database.execute(""" SELECT Recommend_id,User_id_from,User_id_to from recommend where User_id_to="%s" LIMIT 5 """ % userid)
for a in g.database.fetchall():
data={}
data['rid']=a[0]
data['userfrom'] = a[1]
data['userto']=a[2]
g.database.execute(""" SELECT Username from entries where User_id='%s' """ % a[1])
data['userfromname'] = g.database.fetchone()[0]
print data['userfromname']
check_song = g.database.execute(""" SELECT Song_id from recommend_songs where Recommend_id="%s" """ % a[0])
if check_song:
songid = g.database.fetchone()[0]
data['song'] = []
g.database.execute(""" SELECT Song_title,Song_Album,Genre,Publisher from songs where Song_id="%s" """ % songid)
for song in g.database.fetchall():
d = {}
d['title']=song[0]
d['album'] = song[1]
d['genre'] = song[2]
d['publisher'] = song[3]
d['songid'] = songid
d['songart'] = getSongArt(songid)
data['song'].append(d)
check_playlist = g.database.execute(""" SELECT Playlist_id from recommend_playlists where Recommend_id="%s" """ % a[0])
if check_playlist:
playlistid = g.database.fetchone()[0]
data['playlist'] = []
g.database.execute(""" SELECT Playlist_name,Playlist_id,User_id from playlists where Playlist_id="%s" """ % playlistid)
for p in g.database.fetchall():
d= {}
d['pname']=p[0]
d['pid']=p[1]
g.database.execute(""" SELECT Username, Name,User_id from MuShMe.entries WHERE User_id="%s" """ % p[2])
for k in g.database.fetchall():
d['username']=k[0]
d['uname']=k[1]
d['userid']=k[2]
data['playlist'].append(d)
recommend.append(data)
return recommend
def getRequest(userid):
request =[]
g.database.execute(""" SELECT Request_id,Request_from,Request_to,Status from requests where Request_to="%s" """ % userid)
for a in g.database.fetchall():
data={}
data['reqid']=a[0]
data['reqfrom'] = a[1]
data['reqto']=a[2]
data['status']=a[3]
data['reqfromuser'] = []
g.database.execute(""" | |
<gh_stars>0
#!/usr/bin/env python
'''
log analysis program
<NAME> December 2014
'''
import sys, struct, time, os, datetime
import math, re
import Queue
import fnmatch
import threading, multiprocessing
from math import *
from MAVProxy.modules.lib import rline
from MAVProxy.modules.lib import wxconsole
from MAVProxy.modules.lib import grapher
from MAVProxy.modules.lib import mavmemlog
from pymavlink.mavextra import *
from MAVProxy.modules.lib.mp_menu import *
import MAVProxy.modules.lib.mp_util as mp_util
from pymavlink import mavutil
from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting
from MAVProxy.modules.lib import wxsettings
from MAVProxy.modules.lib.graphdefinition import GraphDefinition
from lxml import objectify
import pkg_resources
#Global var to hold the GUI menu element
TopMenu = None
class MEStatus(object):
'''status object to conform with mavproxy structure for modules'''
def __init__(self):
self.msgs = {}
class MEState(object):
'''holds state of MAVExplorer'''
def __init__(self):
self.input_queue = Queue.Queue()
self.rl = None
self.console = wxconsole.MessageConsole(title='MAVExplorer')
self.exit = False
self.status = MEStatus()
self.settings = MPSettings(
[ MPSetting('marker', str, '+', 'data marker', tab='Graph'),
MPSetting('condition', str, None, 'condition'),
MPSetting('xaxis', str, None, 'xaxis'),
MPSetting('linestyle', str, None, 'linestyle'),
MPSetting('show_flightmode', bool, True, 'show flightmode'),
MPSetting('legend', str, 'upper left', 'legend position'),
MPSetting('legend2', str, 'upper right', 'legend2 position')
]
)
self.mlog = None
self.command_map = command_map
self.completions = {
"set" : ["(SETTING)"],
"condition" : ["(VARIABLE)"],
"graph" : ['(VARIABLE) (VARIABLE) (VARIABLE) (VARIABLE) (VARIABLE) (VARIABLE)'],
"map" : ['(VARIABLE) (VARIABLE) (VARIABLE) (VARIABLE) (VARIABLE)']
}
self.aliases = {}
self.graphs = []
self.flightmode_selections = []
self.last_graph = GraphDefinition('Untitled', '', '', [], None)
def have_graph(name):
'''return true if we have a graph of the given name'''
for g in mestate.graphs:
if g.name == name:
return True
return False
def menu_callback(m):
'''called on menu selection'''
if m.returnkey.startswith('# '):
cmd = m.returnkey[2:]
if m.handler is not None:
if m.handler_result is None:
return
cmd += m.handler_result
process_stdin(cmd)
elif m.returnkey == 'menuSettings':
wxsettings.WXSettings(mestate.settings)
elif m.returnkey.startswith("mode-"):
idx = int(m.returnkey[5:])
mestate.flightmode_selections[idx] = m.IsChecked()
elif m.returnkey.startswith("loadLog"):
print "File: " + m.returnkey[8:]
elif m.returnkey == 'quit':
mestate.console.close()
mestate.exit = True
print "Exited. Press Enter to continue."
sys.exit(0)
else:
print('Unknown menu selection: %s' % m.returnkey)
def flightmode_menu():
'''construct flightmode menu'''
modes = mestate.mlog.flightmode_list()
ret = []
idx = 0
for (mode,t1,t2) in modes:
modestr = "%s %us" % (mode, (t2-t1))
ret.append(MPMenuCheckbox(modestr, modestr, 'mode-%u' % idx))
idx += 1
mestate.flightmode_selections.append(False)
return ret
def graph_menus():
'''return menu tree for graphs (recursive)'''
ret = MPMenuSubMenu('Graphs', [])
for i in range(len(mestate.graphs)):
g = mestate.graphs[i]
path = g.name.split('/')
name = path[-1]
path = path[:-1]
ret.add_to_submenu(path, MPMenuItem(name, name, '# graph :%u' % i))
return ret
def setup_file_menu():
global TopMenu
TopMenu = MPMenuTop([])
TopMenu.add(MPMenuSubMenu('MAVExplorer',
items=[MPMenuItem('Settings', 'Settings', 'menuSettings'),
MPMenuItem('&Open\tCtrl+O', 'Open Log', '# loadLog ',
handler=MPMenuCallFileDialog(
flags=('open',),
title='Logfile Load',
wildcard='*.tlog;*.log;*.BIN;*.bin')),
MPMenuItem('&Quit\tCtrl+Q', 'Quit', 'quit')]))
mestate.console.set_menu(TopMenu, menu_callback)
def setup_menus():
'''setup console menus'''
global TopMenu
TopMenu.add(MPMenuSubMenu('Display',
items=[MPMenuItem('Map', 'Map', '# map'),
MPMenuItem('Save Graph', 'Save', '# save'),
MPMenuItem('Reload Graphs', 'Reload', '# reload')]))
TopMenu.add(graph_menus())
TopMenu.add(MPMenuSubMenu('FlightMode', items=flightmode_menu()))
mestate.console.set_menu(TopMenu, menu_callback)
def expression_ok(expression):
'''return True if an expression is OK with current messages'''
expression_ok = True
fields = expression.split()
for f in fields:
try:
if f.endswith(':2'):
f = f[:-2]
if mavutil.evaluate_expression(f, mestate.status.msgs) is None:
expression_ok = False
except Exception:
expression_ok = False
break
return expression_ok
def load_graph_xml(xml, filename, load_all=False):
'''load a graph from one xml string'''
ret = []
try:
root = objectify.fromstring(xml)
except Exception:
return []
if root.tag != 'graphs':
return []
if not hasattr(root, 'graph'):
return []
for g in root.graph:
name = g.attrib['name']
expressions = [e.text for e in g.expression]
if load_all:
ret.append(GraphDefinition(name, e, g.description.text, expressions, filename))
continue
if have_graph(name):
continue
for e in expressions:
if expression_ok(e):
ret.append(GraphDefinition(name, e, g.description.text, expressions, filename))
break
return ret
def load_graphs():
'''load graphs from mavgraphs.xml'''
mestate.graphs = []
gfiles = ['mavgraphs.xml']
if 'HOME' in os.environ:
for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['HOME'], ".mavproxy")):
for filename in filenames:
if filename.lower().endswith('.xml'):
gfiles.append(os.path.join(dirname, filename))
elif 'LOCALAPPDATA' in os.environ:
for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy")):
for filename in filenames:
if filename.lower().endswith('.xml'):
gfiles.append(os.path.join(dirname, filename))
for file in gfiles:
if not os.path.exists(file):
continue
graphs = load_graph_xml(open(file).read(), file)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % file)
# also load the built in graphs
dlist = pkg_resources.resource_listdir("MAVProxy", "tools/graphs")
for f in dlist:
raw = pkg_resources.resource_stream("MAVProxy", "tools/graphs/%s" % f).read()
graphs = load_graph_xml(raw, None)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % f)
mestate.graphs = sorted(mestate.graphs, key=lambda g: g.name)
def graph_process(fields, mavExpLog, mavExpFlightModeSel, mavExpSettings):
'''process for a graph'''
mavExpLog.reduce_by_flightmodes(mavExpFlightModeSel)
mg = grapher.MavGraph()
mg.set_marker(mavExpSettings.marker)
mg.set_condition(mavExpSettings.condition)
mg.set_xaxis(mavExpSettings.xaxis)
mg.set_linestyle(mavExpSettings.linestyle)
mg.set_show_flightmode(mavExpSettings.show_flightmode)
mg.set_legend(mavExpSettings.legend)
mg.add_mav(mavExpLog)
for f in fields:
mg.add_field(f)
mg.process()
mg.show()
def display_graph(graphdef):
'''display a graph'''
mestate.console.write("Expression: %s\n" % ' '.join(graphdef.expression.split()))
child = multiprocessing.Process(target=graph_process, args=[graphdef.expression.split(), mestate.mlog, mestate.flightmode_selections, mestate.settings])
child.start()
def cmd_graph(args):
'''graph command'''
usage = "usage: graph <FIELD...>"
if len(args) < 1:
print(usage)
return
if args[0][0] == ':':
i = int(args[0][1:])
g = mestate.graphs[i]
expression = g.expression
args = expression.split()
mestate.console.write("Added graph: %s\n" % g.name)
if g.description:
mestate.console.write("%s\n" % g.description, fg='blue')
mestate.rl.add_history("graph %s" % ' '.join(expression.split()))
mestate.last_graph = g
else:
expression = ' '.join(args)
mestate.last_graph = GraphDefinition('Untitled', expression, '', [expression], None)
display_graph(mestate.last_graph)
def map_process(args, MAVExpLog, MAVExpFlightModes, MAVExpSettings):
'''process for a graph'''
from mavflightview import mavflightview_mav, mavflightview_options
MAVExpLog.reduce_by_flightmodes(MAVExpFlightModes)
options = mavflightview_options()
options.condition = MAVExpSettings.condition
if len(args) > 0:
options.types = ','.join(args)
mavflightview_mav(MAVExpLog, options)
def cmd_map(args):
'''map command'''
child = multiprocessing.Process(target=map_process, args=[args, mestate.mlog, mestate.flightmode_selections, mestate.settings])
child.start()
def cmd_set(args):
'''control MAVExporer options'''
mestate.settings.command(args)
def cmd_condition(args):
'''control MAVExporer conditions'''
if len(args) == 0:
print("condition is: %s" % mestate.settings.condition)
return
mestate.settings.condition = ' '.join(args)
if len(mestate.settings.condition) == 0 or mestate.settings.condition == 'clear':
mestate.settings.condition = None
def cmd_reload(args):
'''reload graphs'''
mestate.console.writeln('Reloading graphs', fg='blue')
load_graphs()
setup_menus()
mestate.console.write("Loaded %u graphs\n" % len(mestate.graphs))
def save_graph(graphdef, mestate):
'''save a graph as XML'''
if graphdef.filename is None:
if 'HOME' in os.environ:
dname = os.path.join(os.environ['HOME'], '.mavproxy')
if os.path.exists(dname):
mp_util.mkdir_p(dname)
graphdef.filename = os.path.join(dname, 'mavgraphs.xml')
elif 'LOCALAPPDATA' in os.environ:
dname = os.path.join(os.environ['LOCALAPPDATA'], 'MAVProxy')
if os.path.exists(dname):
mp_util.mkdir_p(dname)
graphdef.filename = os.path.join(dname, 'mavgraphs.xml')
else:
graphdef.filename = 'mavgraphs.xml'
if graphdef.filename is None:
mestate.console.writeln("No file to save graph to", fg='red')
return
try:
graphs = load_graph_xml(open(graphdef.filename).read(), graphdef.filename, load_all=True)
except Exception:
graphs = []
found_name = False
for i in range(len(graphs)):
if graphs[i].name == graphdef.name:
graphs[i] = graphdef
found_name = True
break
if not found_name:
graphs.append(graphdef)
mestate.console.writeln("Saving %u graphs to %s" % (len(graphs), graphdef.filename))
f = open(graphdef.filename, "w")
f.write("<graphs>\n\n")
for g in graphs:
f.write(" <graph name='%s'>\n" % g.name.strip())
if g.description is None:
g.description = ''
f.write(" <description>%s</description>\n" % g.description.strip())
for e in g.expressions:
f.write(" <expression>%s</expression>\n" % e.strip())
f.write(" </graph>\n\n")
f.write("</graphs>\n")
f.close()
def save_callback(operation, graphdef):
'''callback from save thread'''
if operation == 'test':
for e in graphdef.expressions:
if expression_ok(e):
graphdef.expression = e
display_graph(graphdef)
return
mestate.console.writeln('Invalid graph expressions', fg='red')
return
if operation == 'save':
save_graph(graphdef, mestate)
def save_process(MAVExpLastGraph):
'''process for saving a graph'''
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
from MAVProxy.modules.lib.wxgrapheditor import GraphDialog
app = wx.App(False)
frame = GraphDialog('Graph Editor',
MAVExpLastGraph,
save_callback)
frame.ShowModal()
frame.Destroy()
def cmd_save(args):
'''save a graph'''
child = multiprocessing.Process(target=save_process, args=[mestate.last_graph])
child.start()
def cmd_param(args):
'''show parameters'''
if len(args) > 0:
wildcard = args[0]
else:
wildcard = '*'
k = sorted(mestate.mlog.params.keys())
for p in k:
if fnmatch.fnmatch(str(p).upper(), wildcard.upper()):
print("%-16.16s %f" % (str(p), mestate.mlog.params[p]))
def cmd_loadfile(args):
'''callback from menu to load a log file'''
if len(args) != 1:
print "Error loading file"
return
loadfile(args[0])
def loadfile(args):
'''load a log file (path given by arg)'''
mestate.console.write("Loading %s...\n" % args)
t0 = time.time()
mlog = mavutil.mavlink_connection(args, notimestamps=False,
zero_time_base=False)
mestate.mlog = mavmemlog.mavmemlog(mlog, progress_bar)
mestate.status.msgs = mlog.messages
t1 = time.time()
mestate.console.write("\ndone (%u messages in %.1fs)\n" % (mestate.mlog._count, t1-t0))
load_graphs()
setup_menus()
def process_stdin(line):
'''handle commands from user'''
if line is None:
sys.exit(0)
line = line.strip()
if not line:
return
args = line.split()
cmd = args[0]
if cmd == 'help':
k = command_map.keys()
k.sort()
for cmd in k:
(fn, help) = command_map[cmd]
print("%-15s : %s" % (cmd, help))
return
if cmd == 'exit':
mestate.exit = True
return
if not cmd in command_map:
print("Unknown command '%s'" % line)
return
(fn, help) = command_map[cmd]
try:
fn(args[1:])
except Exception as e:
print("ERROR in command %s: %s" % (args[1:], str(e)))
def input_loop():
'''wait for user input'''
while mestate.exit != True:
try:
if mestate.exit != True:
line = raw_input(mestate.rl.prompt)
except EOFError:
mestate.exit = True
sys.exit(1)
mestate.input_queue.put(line)
def main_loop():
'''main processing loop, display graphs and maps'''
while True:
if mestate is None or mestate.exit:
return
while not mestate.input_queue.empty():
line = mestate.input_queue.get()
cmds = line.split(';')
for c in cmds:
process_stdin(c)
time.sleep(0.1)
command_map = {
'graph' : (cmd_graph, 'display a graph'),
'set' : (cmd_set, 'control settings'),
'reload' : (cmd_reload, 'reload | |
<reponame>jormono/Vinyl_Inventory
#! python3
from PyQt5 import QtCore, QtGui, QtWidgets
import sqlite3
# TODO: Error Handling on integer inputs
conn = sqlite3.connect('vinyl_inventory.db')
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS vinyl(id INTEGER, rack INTEGER, shelf INTEGER, box INTEGER, album TEXT, artist TEXT, year INTEGER, revisions INTEGER)")
def assign_address():
# pulls address of highest address item from db
c = conn.cursor()
c.execute('SELECT MAX(rack), MAX(shelf), MAX(box) FROM vinyl')
address = list(c.fetchall())
rack = address[0][0]
# first itteration will yield None while db is empty
if rack == None:
rack = 1
shelf = address[0][1]
if shelf == None:
shelf = 1
box = address[0][2]
if box == None:
box = 1
address_output = [str(int(rack)), str(int(shelf)), str(int(box))]
c.close()
return address_output
def retrieve_info(item_num):
c = conn.cursor()
c.execute('SELECT * FROM vinyl WHERE id = ?', str(item_num))
from_db = c.fetchall()
return from_db
def item_max():
c = conn.cursor()
c.execute('SELECT MAX(id) FROM vinyl')
max_id = c.fetchone()
return int(max_id[0])
def item_min():
c = conn.cursor()
c.execute('SELECT MIN(id) FROM vinyl')
min_id = c.fetchone()
return int(min_id[0])
class Ui_Vinyl_Inventory_Main(object):
def setupUi(self, Vinyl_Inventory_Main):
Vinyl_Inventory_Main.setObjectName("Vinyl_Inventory_Main")
Vinyl_Inventory_Main.resize(803, 619)
self.centralwidget = QtWidgets.QWidget(Vinyl_Inventory_Main)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(-4, -1, 801, 581))
self.tabWidget.setObjectName("tabWidget")
current_address = assign_address()
# add inventory tab start
self.Inventory_Add = QtWidgets.QWidget()
self.Inventory_Add.setObjectName("Inventory_Add")
self.gridLayoutWidget_3 = QtWidgets.QWidget(self.Inventory_Add)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.gridLayout_Add = QtWidgets.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_Add.setContentsMargins(0, 0, 0, 0)
self.gridLayout_Add.setObjectName("gridLayout_Add")
self.Box_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Box_Label_Add.setFont(font)
self.Box_Label_Add.setObjectName("Box_Label_Add")
self.gridLayout_Add.addWidget(self.Box_Label_Add, 2, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Rack_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Rack_Label_Add.setFont(font)
self.Rack_Label_Add.setObjectName("Rack_Label_Add")
self.gridLayout_Add.addWidget(self.Rack_Label_Add, 2, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Year_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Year_Input_Add.setObjectName("Year_Input_Add") # year input (add tab)
self.gridLayout_Add.addWidget(self.Year_Input_Add, 5, 3, 1, 1)
self.Artist_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Artist_Label_Add.setFont(font)
self.Artist_Label_Add.setObjectName("Artist_Label_Add")
self.gridLayout_Add.addWidget(self.Artist_Label_Add, 4, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Rack_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Rack_Input_Add.setObjectName("Rack_Input_Add") # rack input (add tab)
self.Rack_Input_Add.setText(str(current_address[0]))
self.gridLayout_Add.addWidget(self.Rack_Input_Add, 3, 1, 1, 1)
self.Submit_Data_Add = QtWidgets.QPushButton(self.gridLayoutWidget_3) # submit data button (add tab)
font = QtGui.QFont() # submit data button (add tab)
font.setPointSize(14) # submit data button (add tab)
self.Submit_Data_Add.setFont(font) # submit data button (add tab)
self.Submit_Data_Add.setObjectName("Submit_Data_Add") # submit data button (add tab)
self.gridLayout_Add.addWidget(self.Submit_Data_Add, 8, 1, 3, 3) # submit data button (add tab)
self.Box_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Box_Input_Add.setObjectName("Box_Input_Add") # box input (add tab)
self.Box_Input_Add.setText(str(current_address[2]))
self.gridLayout_Add.addWidget(self.Box_Input_Add, 3, 3, 1, 1)
self.Year_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Year_Label_Add.setFont(font)
self.Year_Label_Add.setObjectName("Year_Label_Add")
self.gridLayout_Add.addWidget(self.Year_Label_Add, 4, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Item_Num_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.Item_Num_Label_Add.setObjectName("Item_Num_Label_Add")
self.gridLayout_Add.addWidget(self.Item_Num_Label_Add, 1, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Shelf_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Shelf_Input_Add.setObjectName("Shelf_Input_Add") # shelf input (add tab)
self.Shelf_Input_Add.setText(str(current_address[1]))
self.gridLayout_Add.addWidget(self.Shelf_Input_Add, 3, 2, 1, 1)
self.Album_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Album_Label_Add.setFont(font)
self.Album_Label_Add.setObjectName("Album_Label_Add")
self.gridLayout_Add.addWidget(self.Album_Label_Add, 4, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Album_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Album_Input_Add.setObjectName("Album_Input_Add") # album input (add tab)
self.gridLayout_Add.addWidget(self.Album_Input_Add, 5, 1, 1, 1)
self.Artist_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Artist_Input_Add.setObjectName("Artist_Input_Add") # artist input (add tab)
self.gridLayout_Add.addWidget(self.Artist_Input_Add, 5, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Add.addItem(spacerItem, 1, 4, 10, 1)
self.Shelf_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Shelf_Label_Add.setFont(font)
self.Shelf_Label_Add.setObjectName("Shelf_Label_Add")
self.gridLayout_Add.addWidget(self.Shelf_Label_Add, 2, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Add.addItem(spacerItem1, 1, 0, 10, 1)
self.Warning_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.Warning_Label_Add.setObjectName("Warning_Label_Add")
self.gridLayout_Add.addWidget(self.Warning_Label_Add, 7, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.Warning_Label_Add_2 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.Warning_Label_Add_2.setObjectName("Warning_Label_Add_2")
self.gridLayout_Add.addWidget(self.Warning_Label_Add_2, 6, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.tabWidget.addTab(self.Inventory_Add, "")
# edit inventory tab start
self.Inventory_Edit = QtWidgets.QWidget()
self.Inventory_Edit.setObjectName("Inventory_Edit")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.Inventory_Edit)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_Edit = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_Edit.setContentsMargins(0, 0, 0, 0)
self.gridLayout_Edit.setObjectName("gridLayout_Edit")
self.Submit_Data_Edit = QtWidgets.QPushButton(self.gridLayoutWidget_2) # submit data button (edit tab)
font = QtGui.QFont() # submit data button (edit tab)
font.setPointSize(14) # submit data button (edit tab)
self.Submit_Data_Edit.setFont(font) # submit data button (edit tab)
self.Submit_Data_Edit.setObjectName("Submit__Data_Edit") # submit data button (edit tab)
self.gridLayout_Edit.addWidget(self.Submit_Data_Edit, 8, 1, 3, 3) # submit data button (edit tab)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Edit.addItem(spacerItem2, 1, 0, 10, 1)
self.Rack_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Rack_Input_Edit.setObjectName("Rack_Input_Edit")
self.gridLayout_Edit.addWidget(self.Rack_Input_Edit, 3, 1, 1, 1)
self.Album_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Album_Label_Edit.setFont(font)
self.Album_Label_Edit.setObjectName("Album_Label_Edit")
self.gridLayout_Edit.addWidget(self.Album_Label_Edit, 4, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Artist_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Artist_Label_Edit.setFont(font)
self.Artist_Label_Edit.setObjectName("Artist_Label_Edit")
self.gridLayout_Edit.addWidget(self.Artist_Label_Edit, 4, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Shelf_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Shelf_Input_Edit.setObjectName("Shelf_Input_Edit")
self.gridLayout_Edit.addWidget(self.Shelf_Input_Edit, 3, 2, 1, 1)
self.Shelf_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Shelf_Label_Edit.setFont(font)
self.Shelf_Label_Edit.setObjectName("Shelf_Label_Edit")
self.gridLayout_Edit.addWidget(self.Shelf_Label_Edit, 2, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Box_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Box_Input_Edit.setObjectName("Box_Input_Edit")
self.gridLayout_Edit.addWidget(self.Box_Input_Edit, 3, 3, 1, 1)
self.Year_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Year_Input_Edit.setObjectName("Year_Input_Edit")
self.gridLayout_Edit.addWidget(self.Year_Input_Edit, 5, 3, 1, 1)
self.Year_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Year_Label_Edit.setFont(font)
self.Year_Label_Edit.setObjectName("Year_Label_Edit")
self.gridLayout_Edit.addWidget(self.Year_Label_Edit, 4, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Album_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Album_Input_Edit.setObjectName("Album_Input_Edit")
self.gridLayout_Edit.addWidget(self.Album_Input_Edit, 5, 1, 1, 1)
self.Artist_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Artist_Input_Edit.setObjectName("Artist_Input_Edit")
self.gridLayout_Edit.addWidget(self.Artist_Input_Edit, 5, 2, 1, 1)
self.Rack_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Rack_Label_Edit.setFont(font)
self.Rack_Label_Edit.setObjectName("Rack_Label_Edit")
self.gridLayout_Edit.addWidget(self.Rack_Label_Edit, 2, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Box_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Box_Label_Edit.setFont(font)
self.Box_Label_Edit.setObjectName("Box_Label_Edit")
self.gridLayout_Edit.addWidget(self.Box_Label_Edit, 2, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Edit.addItem(spacerItem3, 1, 4, 10, 1)
self.Warning_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.Warning_Label_Edit.setObjectName("Warning_Label_Edit")
self.gridLayout_Edit.addWidget(self.Warning_Label_Edit, 6, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Warning_Label_2_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.Warning_Label_2_Edit.setObjectName("Warning_Label_2_Edit")
self.gridLayout_Edit.addWidget(self.Warning_Label_2_Edit, 7, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.Item_Num_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.Item_Num_Label_Edit.setObjectName("Item_Num_Label_Edit")
self.gridLayout_Edit.addWidget(self.Item_Num_Label_Edit, 1, 1, 1, 1, QtCore.Qt.AlignRight)
self.Item_Num_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Item_Num_Input_Edit.setObjectName("Item_Num_Input_Edit")
self.gridLayout_Edit.addWidget(self.Item_Num_Input_Edit, 1, 2, 1, 1) # item number view button (edit tab)
self.Item_Num_View_Edit = QtWidgets.QPushButton(self.gridLayoutWidget_2) # item number view button (edit tab)
self.Item_Num_View_Edit.setObjectName("Item_Num_View_Edit") # item number view button (edit tab)
self.gridLayout_Edit.addWidget(self.Item_Num_View_Edit, 1, 3, 1, 1) # item number view button (edit tab)
self.tabWidget.addTab(self.Inventory_Edit, "")
# View inventory tab start
input_range = 10
self.Inventory_View = QtWidgets.QWidget()
self.Inventory_View.setObjectName("Inventory_View")
self.tableWidget = QtWidgets.QTableWidget(self.Inventory_View)
self.tableWidget.setGeometry(QtCore.QRect(20, 50, 761, 491))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(8)
self.tableWidget.setRowCount(input_range)
self.tableWidget.verticalHeader().setVisible(False)
self.horizontalLayoutWidget = QtWidgets.QWidget(self.Inventory_View)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(19, 0, 761, 51))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.Viewing_item_num_label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.Viewing_item_num_label.setObjectName("Viewing_item_num_label")
self.horizontalLayout.addWidget(self.Viewing_item_num_label)
self.item_num_input_1_view = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.item_num_input_1_view.setObjectName("item_num_input_1_view")
self.horizontalLayout.addWidget(self.item_num_input_1_view)
self.Viewing_item_num_label_2 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.Viewing_item_num_label_2.setObjectName("Viewing_item_num_label_2")
self.horizontalLayout.addWidget(self.Viewing_item_num_label_2)
self.item_num_input_2_view = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.item_num_input_2_view.setObjectName("item_num_input_2_view")
input_high = item_max()
if input_high - input_range < item_min():
input_low = item_min()
else:
input_low = input_high - input_range
self.item_num_input_1_view.setText(str(input_low))
self.item_num_input_2_view.setText(str(input_high))
header_labels = ['ID', 'Rack', 'Shelf', 'Box', 'Album', 'Artist', 'Year', 'Revisions']
self.tableWidget.setHorizontalHeaderLabels(header_labels)
self.tableWidget.resizeColumnsToContents()
self.tableWidget.setColumnWidth(4, 250)
self.tableWidget.setColumnWidth(5, 250)
self.update_view_data()
#self.tableWidget.setEditTriggers(
self.horizontalLayout.addWidget(self.item_num_input_2_view)
self.update_view = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.update_view.setObjectName("update_view")
self.horizontalLayout.addWidget(self.update_view)
self.tabWidget.addTab(self.Inventory_View, "")
Vinyl_Inventory_Main.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Vinyl_Inventory_Main)
self.menubar.setGeometry(QtCore.QRect(0, 0, 803, 21))
self.menubar.setObjectName("menubar")
Vinyl_Inventory_Main.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Vinyl_Inventory_Main)
self.statusbar.setObjectName("statusbar")
Vinyl_Inventory_Main.setStatusBar(self.statusbar)
# button calls
self.Submit_Data_Add.clicked.connect(self.submit_data_add)
self.Submit_Data_Edit.clicked.connect(self.submit_data_edit)
self.Item_Num_View_Edit.clicked.connect(self.lookup_item)
self.update_view.clicked.connect(self.update_view_data)
# GUI calls
self.retranslateUi(Vinyl_Inventory_Main)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Vinyl_Inventory_Main)
def retranslateUi(self, Vinyl_Inventory_Main):
_translate = QtCore.QCoreApplication.translate
Vinyl_Inventory_Main.setWindowTitle(_translate("Vinyl_Inventory_Main", "Vinyl Inventory"))
self.Box_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Box Number"))
self.Rack_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Rack Number"))
self.Artist_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Artist Name"))
self.Submit_Data_Add.setText(_translate("Vinyl_Inventory_Main", "Submit Data"))
self.Year_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Year of Album"))
item_num_label_add = "Item Number: " + str(item_max()+1)
self.Item_Num_Label_Add.setText(_translate("Vinyl_Inventory_Main", item_num_label_add)) ### text label, insert item num variable here (add tab)
self.Album_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Album Name"))
self.Shelf_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Shelf Number"))
self.Warning_Label_Add.setText(_translate("Vinyl_Inventory_Main", "All Submitted Data is Final! There is no \"Undo\" Functionality!"))
self.Warning_Label_Add_2.setText(_translate("Vinyl_Inventory_Main", "WARNING!"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Inventory_Add), _translate("Vinyl_Inventory_Main", "Add Inventory"))
self.Submit_Data_Edit.setText(_translate("Vinyl_Inventory_Main", "Submit Data"))
self.Album_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Album Name"))
self.Artist_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Artist Name"))
self.Shelf_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Shelf Number"))
self.Year_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Year of Album"))
self.Rack_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Rack Number"))
self.Box_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Box Number"))
self.Warning_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "WARNING!"))
self.Warning_Label_2_Edit.setText(_translate("Vinyl_Inventory_Main", "All Submitted Data is Final! There is no \"Undo\" Functionality!"))
self.Item_Num_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Viewing Item Number:"))
self.Item_Num_View_Edit.setText(_translate("Vinyl_Inventory_Main", "View Item"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Inventory_Edit), _translate("Vinyl_Inventory_Main", "Edit Inventory"))
self.Viewing_item_num_label.setText(_translate("Vinyl_Inventory_Main", "Viewing Item Numbers:"))
self.Viewing_item_num_label_2.setText(_translate("Vinyl_Inventory_Main", "to"))
self.update_view.setText(_translate("Vinyl_Inventory_Main", "Update"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Inventory_View), _translate("Vinyl_Inventory_Main", "View Inventory"))
def submit_data_add(self, data_add):
_translate = QtCore.QCoreApplication.translate
rack_value = int(float(self.Rack_Input_Add.text()))
shelf_value = int(self.Shelf_Input_Add.text())
box_value = int(self.Box_Input_Add.text())
item_value = item_max() + 1
album_value = self.Album_Input_Add.text()
artist_value = self.Artist_Input_Add.text()
year_value = int(self.Year_Input_Add.text())
c.execute("INSERT INTO vinyl (id, rack, shelf, box, album, artist, year, revisions) values (?, ?, ?, ?, ?, ?, ?, 0)", (item_value, rack_value, shelf_value, box_value, album_value, artist_value, year_value))
conn.commit()
new_item_num_label_add = "Item Number: " + str(item_max + 1)
self.Item_Num_Label_Add.setText(_translate("Vinyl_Inventory_Main", new_item_num_label_add))
self.Album_Input_Add.clear()
self.Artist_Input_Add.clear()
self.Year_Input_Add.clear()
def submit_data_edit(self, data_edit):
item_value_edit = self.Item_Num_Input_Edit.text()
rack_value_edit = int(float(self.Rack_Input_Edit.text()))
shelf_value_edit = int(float(self.Shelf_Input_Edit.text()))
box_value_edit = int(float(self.Box_Input_Edit.text()))
album_value_edit = self.Album_Input_Edit.text()
artist_value_edit = self.Artist_Input_Edit.text()
year_value_edit = int(float(self.Year_Input_Edit.text()))
edit_item_lookup = retrieve_info(item_value_edit)
revision_num_edit = 1 + edit_item_lookup[0][7]
c.execute("UPDATE vinyl SET rack = ?, shelf = ?, box = ?, album = ?, artist = ?, year = ?, revisions = ? WHERE id = ?",
(rack_value_edit, shelf_value_edit, box_value_edit, album_value_edit, artist_value_edit, year_value_edit, revision_num_edit, item_value_edit))
conn.commit()
self.Rack_Input_Edit.clear()
self.Shelf_Input_Edit.clear()
self.Box_Input_Edit.clear()
self.Album_Input_Edit.clear()
self.Artist_Input_Edit.clear()
self.Year_Input_Edit.clear()
def lookup_item(self):
item_selection_edit = int(self.Item_Num_Input_Edit.text())
edit_item_lookup = retrieve_info(item_selection_edit)
rack_lookup = edit_item_lookup[0][1]
shelf_lookup = edit_item_lookup[0][2]
box_lookup = edit_item_lookup[0][3]
album_lookup = edit_item_lookup[0][4]
artist_lookup = edit_item_lookup[0][5]
year_lookup = edit_item_lookup[0][6]
self.Rack_Input_Edit.setText(str(rack_lookup))
self.Shelf_Input_Edit.setText(str(shelf_lookup))
self.Box_Input_Edit.setText(str(box_lookup))
self.Album_Input_Edit.setText(str(album_lookup))
self.Artist_Input_Edit.setText(str(artist_lookup))
self.Year_Input_Edit.setText(str(year_lookup))
def update_view_data(self):
while self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(0)
self.tableWidget.hideRow(0)
list_min = int(self.item_num_input_1_view.text())
list_max = int(self.item_num_input_2_view.text())
# makes sure data is within database
min_id = item_min()
max_id = item_max()
if max_id == None:
return
elif list_max > max_id:
list_max = max_id
| |
<filename>generate-grammars/grammar_to_ply.py
#!/usr/bin/env python
# Written by <NAME>
# Copyright (c) 2008 by Dalke Scientific, AB
# Modified by <NAME>, 2016
#
# (This is the MIT License with the serial numbers scratched off and my
# name written in in crayon. I would prefer "share and enjoy" but
# apparently that isn't a legally acceptable.)
#
# Copyright (c) 2008 <NAME> <<EMAIL>>
# Dalke Scientific Software, AB
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This program converts Python's 'Grammar' file into a PLY grammar
The Grammar file is pretty simple but not designed for LALR(1) and
similar parsers. This program tweaks the grammar slightly and
flattens the results to a more usable form. It might prove useful for
other parsers.
"""
import sys
import itertools
import importlib
import time
import datetime
import histogrammar
from histogrammar.pycparser.ply import lex, yacc
inputGrammar, grammarActions, inputLex, outputPLY = sys.argv[1:]
outputTable = outputPLY.split("/")[-1].replace(".py", "")
literal_to_name = importlib.import_module(inputLex, histogrammar).literal_to_name
grammarActionsDict = {}
exec open(grammarActions).read() in grammarActionsDict
actions = grammarActionsDict["actions"]
asts = grammarActionsDict["asts"]
duplicates = {
"varargslist_star2": "varargslist_star",
"print_stmt_star": "print_stmt_plus",
"import_from_star": "import_from_plus",
}
tokens = ("LEXER_NAME", "PARSER_NAME", "STRING",
"NL", "LPAR", "RPAR", "COLON")
def t_comment(t):
r"\#.*"
pass
t_ignore = " \t"
def t_NL(t):
r"\n"
t.value = t.lexer.lineno
t.lexer.lineno += 1
if getattr(t.lexer, "paren_depth", 0) == 0:
return t
def t_word(t):
r"[a-zA-Z_0-9]+"
if t.value == t.value.upper():
t.type = "LEXER_NAME"
return t
if t.value == t.value.lower():
t.type = "PARSER_NAME"
return t
raise AssertionError("Unknown word: %r" % t.value)
t_STRING = r"'[^']+'"
def t_LPAR(t):
r"\("
t.lexer.paren_depth = getattr(t.lexer, "paren_depth", 0)+1
return t
def t_RPAR(t):
r"\)"
t.lexer.paren_depth = getattr(t.lexer, "paren_depth", 0)-1
assert t.lexer.paren_depth >= 0
return t
def t_COLON(t):
r":"
t.value = t.lexer.lineno
return t
literals = ('[', ']', '|', '+', '*')
def t_error(t):
raise AssertionError(t)
lexer = lex.lex()
class Definition(object):
def __init__(self, name, expr, first_line, last_line):
self.name = name
self.expr = expr
self.first_line = first_line
self.last_line = last_line
def __repr__(self):
return "Definition(%r, %r, %r, %r)" % (
self.name, self.expr, self.first_line, self.last_line)
class Star(object):
def __init__(self, child):
self.child = child
def __repr__(self):
return "Star(%r)" % (self.child,)
class Plus(object):
def __init__(self, child):
self.child = child
def __repr__(self):
return "Plus(%r)" % (self.child,)
class Opt(object):
def __init__(self, child):
self.child = child
def __repr__(self):
return "Opt(%r)" % (self.child,)
class Or(object):
def __init__(self, left, right):
self.left = left
self.right = right
def __repr__(self):
return "Or(%r, %r)" % (self.left, self.right)
class Seq(object):
def __init__(self, first, next):
self.first = first
self.next = next
def __repr__(self):
return "Seq(%r, %r)" % (self.first, self.next)
def p_datafile1(p):
"""datafile : definition
| datafile definition"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_datafile(p):
"""datafile : NL
| datafile NL"""
if len(p) == 3:
p[0] = p[1]
else:
p[0] = []
def p_definition(p):
"""definition : PARSER_NAME COLON expr NL"""
p[0] = Definition(p[1], p[3], p[2], p[4])
def p_expr(p):
"""expr : sequential_terms
| expr '|' sequential_terms"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = Or(p[1], p[3])
def p_sequential_terms(p):
"""sequential_terms : term
| sequential_terms term"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = Seq(p[1], p[2])
def p_term(p):
"""term : element '*'
| element '+'
| element
"""
if len(p) == 3:
if p[2] == "+":
p[0] = Plus(p[1])
elif p[2] == "*":
p[0] = Star(p[1])
else:
raise AssertionError(p[2])
else:
p[0] = p[1] # no repeat
def p_element(p):
"""element : '[' expr ']'
| LPAR expr RPAR
| STRING
| LEXER_NAME
| PARSER_NAME"""
if len(p) == 4:
if p[1] == '[':
p[0] = Opt(p[2])
else:
p[0] = p[2] # no repeat
elif p[1].startswith("'"):
# Quoted string; turn into a token name
literal = p[1][1:-1]
p[0] = literal_to_name[literal]
else:
p[0] = p[1]
def p_error(p):
raise AssertionError(p)
yacc.yacc(debug=False, write_tables=False)
s = open(inputGrammar).read()
# Both of these map to NOTEQUAL
# Easiest way to fix it is to patch the grammar
grammar_text = s.replace("'<>'|'!='", "'!='")
definition_list = yacc.parse(grammar_text)
def add_flattened_definition(name, flat_expr):
print name, ":", flat_expr
_seen_names = set()
def new_name(name):
if name in _seen_names:
for i in itertools.count(2):
name2 = name + str(i)
if name2 not in _seen_names:
break
name = name2
_seen_names.add(name)
return name
def flatten(name, expr, need_list):
if isinstance(expr, Seq):
for first_terms in flatten(name, expr.first, need_list):
for next_terms in flatten(name, expr.next, need_list):
yield first_terms + next_terms
elif isinstance(expr, Or):
for left_terms in flatten(name, expr.left, need_list):
yield left_terms
for right_terms in flatten(name, expr.right, need_list):
yield right_terms
elif isinstance(expr, Star):
yield []
child_name = new_name(name + "_star")
yield [child_name]
need_list.append( (child_name, expr.child) )
elif isinstance(expr, Plus):
child_name = new_name(name + "_plus")
yield [child_name]
need_list.append( (child_name, expr.child) )
elif isinstance(expr, Opt):
yield []
for term in flatten(name, expr.child, need_list):
yield term
elif isinstance(expr, str):
yield [expr]
else:
raise AssertionError(expr)
f = open(outputPLY, "w")
def W(s):
f.write(s + "\n")
W('''#!/usr/bin/env python
# generated at %s by "python %s"
import re
import ast
import inspect
from histogrammar.pycparser.ply import yacc''' % (datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%S'), " ".join(sys.argv[1:])))
W('''from %s import PythonLexer, tokens
''' % inputLex)
for x in asts:
W(x)
W('''def inherit_lineno(p0, px, alt=True):
if isinstance(px, dict):
p0.lineno = px["lineno"]
p0.col_offset = px["col_offset"]
else:
if alt and hasattr(px, "alt"):
p0.lineno = px.alt["lineno"]
p0.col_offset = px.alt["col_offset"]
else:
p0.lineno = px.lineno
p0.col_offset = px.col_offset
def ctx_to_store(obj, store=ast.Store):
if isinstance(obj, list):
for i, x in enumerate(obj):
obj[i] = ctx_to_store(x, store)
return obj
elif isinstance(obj, (ast.Attribute, ast.Subscript)):
obj.ctx = store()
return obj
elif isinstance(obj, ast.AST):
for attrib in obj._fields:
value = getattr(obj, attrib)
if isinstance(value, ast.Load):
setattr(obj, attrib, store())
elif isinstance(value, ast.Param):
setattr(obj, attrib, store())
elif isinstance(value, list):
for i, x in enumerate(value):
value[i] = ctx_to_store(x, store)
elif isinstance(value, ast.AST):
setattr(obj, attrib, ctx_to_store(value, store))
return obj
else:
return obj
def iskeyword(x): return isinstance(x, ast.keyword)
def notkeyword(x): return not isinstance(x, ast.keyword)
def unwrap_left_associative(args, rule, alt=False):
out = ast.BinOp(args[0], args[1], args[2], rule=rule)
inherit_lineno(out, args[0])
args = args[3:]
while len(args) > 0:
out = ast.BinOp(out, args[0], args[1], rule=rule)
inherit_lineno(out, out.left)
if alt:
out.alt = {"lineno": out.lineno, "col_offset": out.col_offset}
inherit_lineno(out, out.op)
args = args[2:]
return out
def unpack_trailer(atom, power_star):
out = atom
for trailer in power_star:
if isinstance(trailer, ast.Call):
trailer.func = out
inherit_lineno(trailer, out)
out = trailer
elif isinstance(trailer, ast.Attribute):
trailer.value = out
inherit_lineno(trailer, out, alt=False)
if hasattr(out, "alt"):
trailer.alt = out.alt
out = trailer
elif isinstance(trailer, ast.Subscript):
trailer.value = out
inherit_lineno(trailer, out)
out = trailer
else:
assert False
return out
''')
def numbers(s):
return " ".join(("%" + str(len(x)) + "d") % (i+1) for i, x in enumerate(s.split()))
def format_function(name, rules):
if len(rules) == 1:
W("def p_%s(p):" % name)
W(" '''%s : %s'''" % (name, rules[0]))
W(" # %s %s" % (" " * len(name), numbers(rules[0])))
r = "%s : %s" % (name, rules[0])
if r in actions:
W(actions[r])
else:
W(" raise NotImplementedError")
else:
for i, rule in enumerate(rules):
W("def p_%s_%d(p):" % (name, i+1))
W(" '''%s : %s'''" % (name, rule))
W(" # %s %s" % (" " * len(name), numbers(rule)))
r = "%s : %s" % (name, rule)
if r in actions:
W(actions[r])
else:
W(" raise NotImplementedError")
grammar_lines = grammar_text.splitlines()
for definition in definition_list:
if definition.name in ("single_input", "eval_input"):
continue
rules = []
need_list = []
for terms in flatten(definition.name, definition.expr, need_list):
terms = [duplicates.get(term, term) for term in terms]
rules.append( " ".join(terms) )
W("\n# " +
"\n# ".join(grammar_lines[definition.first_line-1:definition.last_line]))
format_function(definition.name, rules)
while need_list:
name, expr = need_list.pop(0)
if name in duplicates:
continue
rules = []
for terms in flatten(name, expr, need_list):
terms = [duplicates.get(term, term) for term in terms]
rules.append( " ".join(terms) )
rules = rules + [name + " " + rule | |
headers['Accept'] = 'application/vnd.quantized-mesh,application/octet-stream;q=0.9'
response = gHttpClient['tiles'].get(url.request_uri, headers)
if response and response.status_code == 200:
if '.terrain' in tilepath:
with gzip.GzipFile(fileobj=StringIO.StringIO(response.read())) as f1:
ret1 = f1.read()
if gIsSaveTileToDB:
gevent.spawn(gridfs_tile_save, tiletype, subtype, tilepath, mimetype, ret1).join()
else:
ret1 = response.read()
if len(ret1) == gClientMetadata[arr[1]][subtype]['missing_file_size']:
ret1 = gClientMetadata[arr[1]][subtype]['missing_file_content']
#print('get blank tile size=%d' % len(ret1))
else:
if gIsSaveTileToDB:
gevent.spawn(gridfs_tile_save, tiletype, subtype, tilepath, mimetype, ret1).join()
except:
print('error')
ret1 = None
return ret1
if href:
print('downloading tile from %s' % href)
ret = fetch_and_save_by_urlstr(href)
elif len(url_list)>0:
for i in url_list:
print('downloading tile from %s' % i)
ret = fetch_and_save_by_urlstr(i)
if ret and len(ret) != gClientMetadata[arr[1]][subtype]['missing_file_size']:
break
except:
raise
return mimetype, ret
#def arcgis_tile1(tiletype, subtype, tilepath, x, y, level):
#global gConfig
#mimetype = str(gConfig['mime_type'][gConfig[tiletype][subtype]['mimetype']])
#ret = None
#tileroot = gConfig[tiletype][subtype]['file_root']
#lvl = 'L%02d' % level
#row = 'R%08x' % int(hex(y), 16)
#col = 'C%08x' % int(hex(x), 16)
#p = os.path.join(tileroot, lvl, row, col+'.jpg')
#print(p)
#if os.path.exists(p):
#with open(p, 'rb') as f:
#f1 = gevent.fileobject.FileObjectThread(f, 'rb')
#ret = f1.read()
#else:
#STATICRESOURCE_DIR = os.path.join(module_path(), 'static')
#if gConfig['web'].has_key('webroot') and len(gConfig['web']['webroot'])>0:
#if os.path.exists(gConfig['web']['webroot']):
#STATICRESOURCE_DIR = gConfig['web']['webroot']
#STATICRESOURCE_IMG_DIR = os.path.join(STATICRESOURCE_DIR, 'img')
#picpath = os.path.join(STATICRESOURCE_IMG_DIR, gConfig['tiles'][image_type]['missing'])
#with open(picpath, 'rb') as f:
#f1 = gevent.fileobject.FileObjectThread(f, 'rb')
#ret = f1.read()
#mimetype = 'image/png'
#return mimetype, ret
def bing_tile(tiletype, subtype, tilepath, x, y, level):
global gClientMongoTiles, gConfig, gClientMetadata, gIsSaveTileToDB, gHttpClient
def tileXYToQuadKey(x, y, level):
quadkey = ''
for i in range(level, -1, -1):
bitmask = 1 << i
digit = 0
if (x & bitmask) != 0:
digit |= 1
if (y & bitmask) != 0:
digit |= 2
quadkey += str(digit)
return quadkey
def quadKeyToTileXY(quadkey):
x = 0
y = 0
level = len(quadkey) - 1
for i in range(level, -1, -1):
bitmask = 1 << i
digit = quadkey[level - i]
if (digit & 1) != 0 :
x |= bitmask
if (digit & 2) != 0:
y |= bitmask
return {
'x' : x,
'y' : y,
'level' : level
}
arr = tiletype.split('/')
connection_timeout, network_timeout = float(gConfig['webgis'][arr[1]]['www_connection_timeout']), float(gConfig['webgis'][arr[1]]['www_network_timeout'])
#tilepath = '%s/%s/%s%s' % (level, x, y, gConfig[tiletype][subtype]['mimetype'])
mimetype = str(gConfig['mime_type'][gConfig['webgis'][arr[1]][subtype]['mimetype']])
ret = None
if not gClientMetadata.has_key(tiletype):
gClientMetadata[tiletype] = {}
if not gClientMetadata[tiletype].has_key(subtype):
gClientMetadata[tiletype][subtype] = {}
if len(gClientMetadata[tiletype][subtype].keys()) == 0:
size, content = get_missing_file(tiletype, subtype)
gClientMetadata[tiletype][subtype]['missing_file_size'] = size
gClientMetadata[tiletype][subtype]['missing_file_content'] = content
url_metadata_template = gConfig['webgis'][arr[1]][subtype]['url_template']
href = url_metadata_template.replace('{key}', gConfig['webgis'][arr[1]][subtype]['key'])
url = URL(href)
#http = HTTPClient.from_url(url, concurrency=30, connection_timeout=connection_timeout, network_timeout=network_timeout, )
if not gHttpClient.has_key('tiles'):
gHttpClient['tiles'] = HTTPClient(url.host, port=url.port, connection_timeout=connection_timeout, network_timeout=network_timeout, concurrency=1000)
response = gHttpClient['tiles'].get(url.request_uri)
if response and response.status_code == 200:
obj = json.load(response)
if obj.has_key('resourceSets') and len(obj['resourceSets'])>0 and obj['resourceSets'][0].has_key('resources') and len(obj['resourceSets'][0]['resources'])>0:
gClientMetadata[tiletype][subtype] = obj['resourceSets'][0]['resources'][0]
#print(gClientMetadata[tiletype][subtype])
quadkey = tileXYToQuadKey(x, y, level)
href = gClientMetadata[tiletype][subtype]['imageUrl']
href = href.replace('{quadkey}', quadkey).replace('{culture}', '')
subdomains = gClientMetadata[tiletype][subtype]['imageUrlSubdomains']
subdomainIndex = (x + y + level) % len(subdomains)
href = href.replace('{subdomain}', subdomains[subdomainIndex]);
print('downloading from %s' % href)
url = URL(href)
#http = HTTPClient.from_url(url, concurrency=30, connection_timeout=connection_timeout, network_timeout=network_timeout, )
if not gHttpClient.has_key('tiles'):
gHttpClient['tiles'] = HTTPClient(url.host, port=url.port, connection_timeout=connection_timeout, network_timeout=network_timeout, concurrency=200)
response = gHttpClient['tiles'].get(url.request_uri)
if response and response.status_code == 200:
ret = response.read()
if len(ret) == gClientMetadata[tiletype][subtype]['missing_file_size']:
ret = gClientMetadata[tiletype][subtype]['missing_file_content']
else:
if gIsSaveTileToDB:
gevent.spawn(gridfs_tile_save, tiletype, subtype, tilepath, mimetype, ret).join()
else:
ret = None
return mimetype, ret
def gridfs_tile_save(tiletype, subtype, tilepath, mimetype, data):
global gClientMongoTiles, gConfig
arr = tiletype.split('/')
dbname = gConfig['webgis'][arr[1]][subtype]['mongodb']['database']
collection = gConfig['webgis'][arr[1]][subtype]['mongodb']['gridfs_collection']
host, port, replicaset = gConfig['webgis'][arr[1]][subtype]['mongodb']['host'], int(gConfig['webgis'][arr[1]][subtype]['mongodb']['port']), gConfig['webgis'][arr[1]][subtype]['mongodb']['replicaset']
try:
mongo_init_client(tiletype, subtype, host, port, replicaset)
db = gClientMongoTiles[arr[1]][subtype][dbname]
fs = gridfs.GridFS(db, collection=collection)
fs.put(data, mimetype=mimetype, filename=tilepath)
except:
traceback.print_exc()
raise
def gridfs_tile_delete(tiletype, subtype, tilepath=None):
global gClientMongoTiles, gConfig
arr = tiletype.split('/')
dbname = gConfig['webgis'][arr[1]][subtype]['mongodb']['database']
collection = gConfig['webgis'][arr[1]][subtype]['mongodb']['gridfs_collection']
host, port, replicaset = gConfig['webgis'][arr[1]][subtype]['mongodb']['host'], int(gConfig['webgis'][arr[1]][subtype]['mongodb']['port']), gConfig['webgis'][arr[1]][subtype]['mongodb']['replicaset']
try:
mongo_init_client(tiletype, subtype, host, port, replicaset)
db = gClientMongoTiles[arr[1]][subtype][dbname]
fs = gridfs.GridFS(db, collection=collection)
if tilepath:
if fs.exists({'filename':tilepath}):
l = []
for i in fs.find({'filename':tilepath}):
l.append(i._id)
for i in l:
fs.delete(i)
else:
l = []
for i in fs.find():
l.append(i._id)
for i in l:
fs.delete(i)
except:
traceback.print_exc()
raise
def test_clear_gridfs(dbname, clienttype='webgis'):
global gClientMongo, gConfig
try:
mongo_init_client(clienttype)
db = gClientMongo[clienttype][dbname]
fs = gridfs.GridFS(db)
l = fs.list()
idlist = []
for i in fs.find():
idlist.append(i._id)
print(i.filename)
#print(i.bindcollection)
#print(i.key)
for i in idlist:
fs.delete(i)
except:
traceback.print_exc()
def test_resize_image(dbname, clienttype='webgis'):
global gClientMongo, gConfig
size = (100, 100)
try:
mongo_init_client(clienttype)
db = gClientMongo[clienttype][dbname]
fs = gridfs.GridFS(db)
for i in fs.find():
mimetype = i.mimetype
print(mimetype)
#ret = i.read()
im = Image.open(i)
im.thumbnail(size)
buf= StringIO.StringIO()
print(im.format)
im.save(buf, im.format)
print(base64.b64encode(buf.getvalue()))
break
except:
traceback.print_exc()
raise
def test_httpclient():
href = 'http://cesiumjs.org/stk-terrain/tilesets/world/tiles/0/1/0.terrain?v=3924.0.0&f=TerrainTile'
url = URL(href)
http = HTTPClient.from_url(url, connection_timeout=3.0, network_timeout=3.0, )
response = http.get(url.request_uri)
#g = gevent.spawn(http.get, url.request_uri)
#g.start()
#while not g.ready():
#if g.exception:
#break
#gevent.sleep(0.1)
#response = g.value
if response.status_code == 200:
with open(os.path.join(ur'd:', 'test_httpclient_0_1_0.terrain'), 'wb') as f:
with gzip.GzipFile(fileobj=StringIO.StringIO(response.read())) as f1:
f.write(f1.read())
def test_httpclient1():
#href = 'http://cesiumjs.org/stk-terrain/tilesets/world/tiles/0/1/0.terrain'
#request = urllib2.Request(href, urllib.urlencode({'v':'3924.0.0','f':'TerrainTile'}))
href = 'http://cesiumjs.org/stk-terrain/tilesets/world/tiles/0/1/0.terrain?v=3924.0.0&f=TerrainTile'
request = urllib2.Request(href)
request.add_header('User-Agent', 'Mozilla/5.0')
request.add_header('Accept-Encoding', 'gzip')
request.add_header('Accept', 'application/json,application/octet-stream,*/*')
response = urllib2.urlopen(request)
with open(os.path.join(ur'd:', 'test_httpclient1_0_1_0.terrain'), 'wb') as f:
with gzip.GzipFile(fileobj=StringIO.StringIO(response.read())) as f1:
f.write(f1.read())
def get_line_geojson(db_name, line):
ret = None
if line.has_key('properties') and line['properties'].has_key('webgis_type') and line['properties']['webgis_type'] == 'polyline_line':
tids = line['properties']['nodes']
towers_order_list = get_orderlist_by_edges(db_name, 'edge_tower', 'point_tower', tids)
towers = [i['_id'] for i in towers_order_list]
#print(len(towers_order_list))
obj = {}
obj['_id'] = line['_id']
obj['geometry'] = {'type':'LineString', 'coordinates':[]}
obj['type'] = 'Feature'
obj['properties'] = line['properties']
for i in towers_order_list:
lng, lat, alt = i['geometry']['coordinates'][0], i['geometry']['coordinates'][1], i['geometry']['coordinates'][2]
obj['geometry']['coordinates'].append([lng, lat, alt])
obj['properties']['nodes'] = towers
ret = obj
return ret
def get_orderlist_by_edges(db_name, edge_webgis_type, node_webgis_type, node_id_list=[]):
def get_prev(id, nodeidlist):
ret = None
edgelist = mongo_find(db_name, 'edges', {'properties.webgis_type':edge_webgis_type, 'properties.end':add_mongo_id(id), 'properties.start':{'$in':add_mongo_id(nodeidlist)}})
ret = [i['properties']['start'] for i in edgelist]
if len(ret)>0:
ret = add_mongo_id(ret[0])
return ret
def get_next(id, nodeidlist):
ret = None
edgelist = mongo_find(db_name, 'edges', {'properties.webgis_type':edge_webgis_type, 'properties.start':add_mongo_id(id), 'properties.end':{'$in':add_mongo_id(nodeidlist)}})
ret = [i['properties']['end'] for i in edgelist]
if len(ret)>0:
ret = add_mongo_id(ret[0])
return ret
def get_start(id, nodeidlist):
start = get_prev(id, nodeidlist)
startold = None
while start:
startold = start
start = get_prev(start, nodeidlist)
return startold
def get_end(id, nodeidlist):
end = get_next(id, nodeidlist)
endold = None
while end:
endold = end
end = get_next(end, nodeidlist)
return endold
def get_path(endid, nodeidlist):
ret = []
while endid:
ret.append(endid)
endid = get_prev(endid, nodeidlist)
ret.reverse()
return ret
def get_node(id):
ret = None
# for i in alist:
# if i['_id'] == id or i[u'_id'] == id:
# ret = i
# break
ret = mongo_find_one(db_name, 'features', {'_id':add_mongo_id(id)})
return ret
def get_by_function_type(alist, typ):
ret = []
for i in alist:
if i['properties']['function_type'] == typ:
ret.append(i)
return ret
# edges = mongo_find(db_name, 'edges', {'properties.webgis_type':edge_webgis_type})
cond = {'properties.webgis_type':node_webgis_type, }
if len(node_id_list)>0:
cond['_id'] = node_id_list
nodes = mongo_find(db_name, 'features', cond)
ret = []
nodeidlist = [str(i) for i in node_id_list]
if len(nodes)>0:
node0 = nodes[0]
end = get_end(node0['_id'], nodeidlist)
if end is None:
end = node0['_id']
path = get_path(end, nodeidlist)
for i in path:
node = get_node(i)
if node:
ret.append(node)
return ret
def test_generate_ODT(db_name):
def get_prev(id, alist):
ret = None
for i in alist:
if i['properties']['end'] == id:
ret = i['properties']['start']
break
return ret
def get_next(id, alist):
ret = None
for i in alist:
if i['properties']['start'] == id:
ret = i['properties']['end']
break
return ret
def get_start(id, alist):
start = get_prev(id, alist)
startold = None
while start:
startold = start
start = get_prev(start, alist)
return startold
def get_end(id, alist):
end = get_next(id, alist)
endold = None
while end:
endold = end
end = get_next(end, alist)
return endold
def get_path(endid, alist):
ret = []
while endid:
ret.append(endid)
endid = get_prev(endid, alist)
ret.reverse()
return ret
def get_node(id, alist):
ret = None
for i in alist:
if i['_id'] == id:
ret = i
break
return ret
def get_by_type(alist, typ):
ret = []
for i in alist:
if i['properties']['function_type'] == typ:
ret.append(i)
return ret
def slim_matrix(mapping):
l = []
rowexist = []
for k in mapping.keys():
ll = []
for kk in mapping[k].keys():
ll.append(mapping[k][kk])
lll = [i[1] for i in l]
namel = [i[0] for i in l]
if ll | |
__author__ = 'laiyu'
import logging
import json
from django.http import HttpResponse
from django.db import transaction
from django.db.models import Max
from django.db.models import Min
from django.db.models import F
from django.core import serializers
from django.template import loader
from website.models import Sprint_tree
from website.models import Project_chosen
from website.models import Test_project_setting
from website.models import UI_project_setting
from website.models import API_project_setting
from website.models import Test_task_overview
from website.models import Test_task_detail
from website.models import Promble_feedback
from website.models import Browser_setting
from website.models import Env_setting
from website.models import Database_setting
from website.models import Function_setting
from website.models import Operation_for_object
from website.models import Assertion_type_setting
from website.models import Global_variable_setting
from website.models import Page_tree
from website.models import UI_case_tree
from website.models import API_case_tree
from website.models import Page_element
from website.models import UI_test_case_step
from website.models import API_test_case_step
from website.models import UI_test_plan
from website.models import API_test_plan
from website.models import Running_plan
from website.models import UI_case_tree_test_plan
from website.models import API_case_tree_test_plan
from website.models import UI_test_report_for_summary
from website.models import API_test_report_for_summary
from website.models import UI_test_report_for_case
from website.models import API_test_report_for_case
from website.models import UI_test_report_for_case_step
from website.models import API_test_report_for_case_step
logger = logging.getLogger('mylogger')
# 获取上次选择的项目ID,用于展示树形结构,测试计划, 获取测试步骤页面元素所在页面等
def get_project_chosen(request):
params = request.GET
tree_type = params['treeType']
if tree_type == 'SprintTree':
db_class = Test_project_setting
elif tree_type == 'PageTree' or tree_type == 'UICaseTree' or tree_type == 'PlanUICaseTree':
db_class = UI_project_setting
elif tree_type == 'APICaseTree' or tree_type == 'PlanAPICaseTree':
db_class = API_project_setting
try:
record = Project_chosen.objects.filter(tree_type=tree_type).values()
if record:
response = {'result':'success', 'data':{"id":record[0]['project_id'], "projectName":record[0]['project_name']}}
else: # 如果从没设置过,即没操作多下拉选择,切换项目,则从项目配置表取顺序最小的那个项目,作为默认项
project = db_class.objects.all().order_by('order').values()
if project:
project = project[0]
response = {'result':'success', 'data':{"id":project['id'], "projectName":project['project_name']}}
else:
response = {'result':'error', 'data':'请先新建测试项目'}
except Exception as e:
logger.error('%s' % e)
response = {'result':'error', 'data':'%s' % e}
finally:
response = json.dumps(response)
return HttpResponse(response)
# 存储上次选择的项目
def store_project_chosen(request):
params = request.POST
try:
tree_type = params['treeType']
project_id = params['projectID']
project_name = params['projectName']
record = Project_chosen.objects.filter(tree_type=tree_type)
if record.exists(): #如果已经存在记录,则更新
obj = record[0]
obj.project_id = project_id
obj.project_name = project_name
obj.save()
else: # 不存在,则新增
obj = Project_chosen(project_id=project_id, project_name=project_name, tree_type=tree_type)
obj.save()
response = HttpResponse('success')
except Exception as e:
logger.error('%s' % e)
response = HttpResponse('%s' % e)
finally:
return HttpResponse(response)
# 获取所属环境
def get_envs(request):
env_list = []
try:
rows = Env_setting.objects.order_by('order').values()
for row in rows:
temp_dic = {}
temp_dic['id'] = str(row['id'])
temp_dic['choice'] = row['env']
env_list.append(temp_dic)
response = {'result':'success', 'data':env_list}
except Exception as e:
logger.error('%s' % e)
response = {'result':'error', 'data':'%s' % e}
finally:
response = json.dumps(response)
return HttpResponse(response)
# 根据项目类型(测试项目|UI自动化项目|接口自动化项目|所有项目),获取对应的项目
def get_projects(request):
project_list = []
params = request.GET
project_type = params['projectType']
if project_type == 'TestProject':
type = 'TEST'
db_class = Test_project_setting
elif project_type == 'UIProject':
type = 'UI'
db_class = UI_project_setting
elif project_type == 'APIProject':
type = 'API'
db_class = API_project_setting
try:
if project_type != 'ALLProject':
rows = db_class.objects.filter(valid_flag='启用').order_by('-order').values()
for row in rows:
temp_dic = {}
temp_dic['id'] = str(row['id'])
temp_dic['id2'] = '%s%s' % (type, str(row['id'])) # 给数据库设置使用
temp_dic['choice'] = row['project_name']
project_list.append(temp_dic)
else:
rows = API_project_setting.objects.filter(valid_flag='启用').order_by('-order').values()
for row in rows:
temp_dic = {}
temp_dic['id'] = str(row['id'])
temp_dic['id2'] = 'API' + str(row['id']) # 给数据库设置使用
temp_dic['choice'] = row['project_name']
project_list.append(temp_dic)
rows = UI_project_setting.objects.filter(valid_flag='启用').order_by('-order').values()
for row in rows:
temp_dic = {}
temp_dic['id'] = str(row['id'])
temp_dic['id2'] = 'UI' + str(row['id']) # 给数据库设置使用
temp_dic['choice'] = row['project_name']
project_list.append(temp_dic)
response = {'result':'success', 'data':project_list}
except Exception as e:
logger.error('%s' % e)
response = {'result':'error', 'data':'%s' % e}
finally:
response = json.dumps(response)
return HttpResponse(response)
# 根据项目类型(测试项目|UI自动化项目|接口自动化项目),项目ID,获取获取对应的测试计划
def get_plans(request):
try:
plan_list = []
params = request.GET
project_type = params['projectType']
project_id = params['projectID']
if project_type == 'UIProject':
project_db_class = UI_project_setting
plan_db_class = UI_test_plan
elif project_type == 'APIProject':
plan_db_class = API_test_plan
project_db_class = API_project_setting
if(project_db_class.objects.filter(valid_flag='启用').filter(id=project_id).exists()):
rows = plan_db_class.objects.filter(project_id=project_id).filter(valid_flag='启用').order_by('-order').values()
for row in rows:
temp_dic = {}
temp_dic['id'] = str(row['id'])
temp_dic['choice'] = row['plan_name']
plan_list.append(temp_dic)
response = {'result':'success', 'data':plan_list}
else:
response = {'result':'error', 'data':'没有获取到同项目关联的计划'}
except Exception as e:
logger.error('%s' % e)
response = {'result':'error', 'data':'%s' % e}
finally:
response = json.dumps(response)
return HttpResponse(response)
# 获取节点树
def node_tree(request):
node_list = [] # 用于存放所有节点
params = request.GET
tree_type = params['treeType'] # 获取树类型
project_id = params['projectID'] # 获取项目ID
if tree_type == 'SprintTree':
db_class = Sprint_tree
elif tree_type == 'PageTree':
db_class = Page_tree
elif tree_type == 'UICaseTree' or tree_type == 'PlanUICaseTree':
db_class = UI_case_tree
elif tree_type == 'APICaseTree' or tree_type == 'PlanAPICaseTree':
db_class = API_case_tree
#获取子节点
def get_sub_node(node):
node_id = node['id'] # 获取父节点的id
node['children'] = [] # 用于存放子节点信息
sub_nodes = db_class.objects.filter(parent_id=node_id).order_by('-order').values() # 获取父节点
if sub_nodes: #如果存在子节点,遍历添加子节点
for sub_node in sub_nodes:
node['children'].append(sub_node)
get_sub_node(sub_node)
father_nodes = db_class.objects.filter(parent_id=0).filter(project_id=project_id).order_by('-order').values() # 获取所有一级节点
for father_node in father_nodes:
# logger.info(father_node)
node_list.append(father_node)
get_sub_node(father_node) # 获取子节点
node_list = json.dumps(node_list)
return HttpResponse(node_list, request)
# 修改节点名称
def update_tree_node_name(request):
params = request.POST
try:
node_id = params['nodeID']
node_name = params['nodeText']
tree_type = params['treeType'] #获取树类型
if tree_type == 'SprintTree':
db_class = Sprint_tree
elif tree_type == 'PageTree':
db_class = Page_tree
elif tree_type == 'UICaseTree':
db_class = UI_case_tree
elif tree_type == 'APICaseTree':
db_class = API_case_tree
node_obj = db_class.objects.get(id=node_id)
node_obj.text = node_name
node_obj.save()
return HttpResponse('success')
except Exception as e:
logger.error('%s' % e)
return HttpResponse('%s' % e)
# 在节点树中增加节点
def append_tree_node(request):
params = request.POST
try:
node_parent_id = params['nodeParentID']
node_text = params['nodeText']
state = params['state']
iconcls = params['iconCls']
attributes = params['attributes']
tree_type = params['treeType'] # 获取树类型
project_id = params['projectID']
if tree_type == 'SprintTree':
db_class = Sprint_tree
elif tree_type == 'PageTree':
db_class = Page_tree
elif tree_type == 'UICaseTree':
db_class = UI_case_tree
elif tree_type == 'APICaseTree':
db_class = API_case_tree
sub_nodes = db_class.objects.filter(project_id=project_id).filter(parent_id=node_parent_id)
if sub_nodes.exists():
max_order = sub_nodes.aggregate(Max('order'))['order__max']
order = max_order + 1
else:
order = 1
node_obj = db_class(text=node_text, state=state, parent_id=node_parent_id, iconCls=iconcls, attributes=attributes, project_id=project_id, order=order)
node_obj.save()
# parent_node.save()
return HttpResponse('success')
except Exception as e:
logger.error('%s' % e)
return HttpResponse('%s' % e)
# 移除节点树中的节点
def remove_tree_node(request):
params = request.POST
node_id = eval(params['nodeID'])
parent_id = params['parentID']
project_id = params['projectID']
order = params['order']
tree_type = params['treeType'] #获取树类型
if tree_type == 'SprintTree':
db_class = Sprint_tree
elif tree_type == 'PageTree':
db_class = Page_tree
elif tree_type == 'UICaseTree':
db_class = UI_case_tree
elif tree_type == 'APICaseTree':
db_class = API_case_tree
def rm_node(node_id):
try:
with transaction.atomic():
db_class.objects.filter(id=node_id).delete()
nodes = db_class.objects.filter(parent_id=node_id)
for node in nodes:
node_id = node.id
result = rm_node(node_id)
if not result[0]:
return [False,result[1]]
return [True, '成功']
except Exception as e:
logger.error('delete node fail %s' % e)
raise Exception('delete node fail %s' % e)
try:
with transaction.atomic():
result = rm_node(node_id)
# logger.debug(result)
if result[0]:
sibling_nodes = db_class.objects.filter(project_id=project_id).filter(parent_id=parent_id).filter(order__gt=order).order_by('order')
if sibling_nodes.exists():
# logger.info('重新排序节点')
for node in sibling_nodes:
node.order = node.order - 1
node.save()
return HttpResponse('success')
else:
return HttpResponse(result[1])
except Exception as e:
return HttpResponse('%s' % e)
# 拖动树节点
def drag_tree_node(request):
try:
parmas = request.POST
parmas = eval(parmas['info'])
target = parmas['target']
source = parmas['source']
operation = parmas['point']
tree_type = parmas['treeType']
if tree_type == 'SprintTree':
db_class = Sprint_tree
elif tree_type == 'PageTree':
db_class = Page_tree
elif tree_type == 'UICaseTree':
db_class = UI_case_tree
elif tree_type == 'APICaseTree':
db_class = API_case_tree
if operation == 'top' and target['parentID'] == 0:
return HttpResponse('保存失败,只能有一个根节点节点')
elif operation == 'top' and target['parentID'] != 0: # target节点之上
# logger.info('正在重新排序节点')
target_sibling_nodes = db_class.objects.filter(project_id=target['projectID']).filter(parent_id=target['parentID']).filter(order__gt=target['order']+1)
for node in target_sibling_nodes:
node.order = node.order + 1
node.save()
# logger.info('正在更新被拖拽节点的顺序')
source_node = db_class.objects.filter(project_id=target['projectID']).get(id=source['id'])
source_node.order = target['order'] + 1
source_node.parent_id = target['parentID']
source_node.save()
elif operation == 'bottom' and target['parentID'] != 0: # target节点之下
# logger.info('正在重新排序节点')
target_sibling_nodes = db_class.objects.filter(project_id=target['projectID']).filter(parent_id=target['parentID']).filter(order__gte=target['order'])
for node in target_sibling_nodes:
node.order = node.order + 1
node.save()
# logger.info('正在更新被拖拽节点的顺序')
source_node = db_class.objects.filter(project_id=target['projectID']).get(id=source['id'])
source_node.order = target['order']
source_node.parent_id = target['parentID']
source_node.save()
elif operation == 'append':
target_sibling_nodes = db_class.objects.filter(project_id=target['projectID']).filter(parent_id=target['id'])
if target_sibling_nodes.exists():
max_order = target_sibling_nodes.aggregate(Max('order'))['order__max']
order = max_order + 1
else:
order = 1
source_node = db_class.objects.filter(project_id=target['projectID']).get(id=source['id'])
source_node.order = order
source_node.parent_id = target['id']
source_node.save()
return HttpResponse('success')
except Exception as e:
logger.error('%s' % e)
return HttpResponse('%s' % e)
# 复制节点树中叶子节点
def copy_tree_leaf_node(request):
params = request.POST
try:
node_parent_id = params['nodeParentID']
source_node_ID = params['sourceNodeID']
node_text = params['nodeText']
state = params['state']
iconcls = params['iconCls']
attributes = params['attributes']
tree_type = params['treeType'] # 获取树类型
project_id = params['projectID']
if tree_type == 'UICaseTree':
sub_nodes = UI_case_tree.objects.filter(project_id=project_id).filter(parent_id=node_parent_id)
if sub_nodes.exists():
max_order = sub_nodes.aggregate(Max('order'))['order__max']
order = max_order + 1
else:
order = 1
with transaction.atomic():
# logger.info('正在复制基础用例信息')
node_obj = UI_case_tree(text=node_text, state=state, parent_id=node_parent_id, iconCls=iconcls, attributes=attributes, project_id=project_id, order=order)
node_obj.save()
node_obje_id = node_obj.id
# logger.info('正在复制用例步骤')
sub_nodes = UI_test_case_step.objects.filter(case_id=source_node_ID).order_by('order')
for sub_node in sub_nodes:
step_order = sub_node.order
status = sub_node.status
object_type = sub_node.object_type
page_name = sub_node.page_name
object = sub_node.object
exec_operation = sub_node.exec_operation
input_params = sub_node.input_params
output_params = sub_node.output_params
assert_type = sub_node.assert_type
assert_pattern = sub_node.assert_pattern
run_times = sub_node.run_times
try_for_failure = sub_node.try_for_failure
object_id = sub_node.object_id
case_id = node_obje_id
ui_case_step_obj = UI_test_case_step(order=step_order, status=status, object_type= object_type,object=object, exec_operation=exec_operation,
input_params=input_params, output_params=output_params, assert_type=assert_type, assert_pattern=assert_pattern,
run_times=run_times,try_for_failure=try_for_failure, page_name=page_name,case_id=case_id, object_id=object_id)
ui_case_step_obj.save()
elif tree_type == 'APICaseTree':
sub_nodes = API_case_tree.objects.filter(project_id=project_id).filter(parent_id=node_parent_id)
if sub_nodes.exists():
max_order = sub_nodes.aggregate(Max('order'))['order__max']
order = max_order + 1
else:
order = | |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import numpy as np
import ase.io
from os import path
from nomad.datamodel import EntryArchive
from nomad.units import ureg as units
from nomad.datamodel.metainfo.simulation.run import Run, Program, TimeRun
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms)
from nomad.datamodel.metainfo.simulation.method import (
Method, Electronic, BasisSet)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, Charges)
from nomad.parsing.file_parser import TextParser, Quantity
from .metainfo.lobster import x_lobster_section_cohp, x_lobster_section_coop
'''
This is a LOBSTER code parser.
'''
e = (1 * units.e).to_base_units().magnitude
eV = (1 * units.eV).to_base_units().magnitude
def parse_ICOXPLIST(fname, scc, method):
def icoxp_line_split(string):
tmp = string.split()
# LOBSTER version 3 and above
if len(tmp) == 8:
return [tmp[1], tmp[2], float(tmp[3]), [int(tmp[4]),
int(tmp[5]), int(tmp[6])], float(tmp[7])]
# LOBSTER versions below 3
elif len(tmp) == 6:
return [tmp[1], tmp[2], float(tmp[3]), float(tmp[4]), int(tmp[5])]
icoxplist_parser = TextParser(quantities=[
Quantity('icoxpslist_for_spin', r'\s*CO[OH]P.*spin\s*\d\s*([^#]+[-\d\.]+)',
repeats=True,
sub_parser=TextParser(quantities=[
Quantity('line',
# LOBSTER version 3 and above
r'(\s*\d+\s+\w+\s+\w+\s+[\.\d]+\s+[-\d]+\s+[-\d]+\s+[-\d]+\s+[-\.\d]+\s*)|'
# LOBSTER versions below 3
r'(\s*\d+\s+\w+\s+\w+\s+[\.\d]+\s+[-\.\d]+\s+[\d]+\s*)',
repeats=True, str_operation=icoxp_line_split)])
)
])
if not path.isfile(fname):
return
icoxplist_parser.mainfile = fname
icoxplist_parser.parse()
icoxp = []
for spin, icoxplist in enumerate(icoxplist_parser.get('icoxpslist_for_spin')):
lines = icoxplist.get('line')
if lines is None:
break
if type(lines[0][4]) is int:
a1, a2, distances, tmp, bonds = zip(*lines)
else:
a1, a2, distances, v, tmp = zip(*lines)
icoxp.append(0)
icoxp[-1] = list(tmp)
if spin == 0:
if method == 'o':
section = scc.m_create(x_lobster_section_coop)
elif method == 'h':
section = scc.m_create(x_lobster_section_cohp)
setattr(section, "x_lobster_number_of_co{}p_pairs".format(
method), len(list(a1)))
setattr(section, "x_lobster_co{}p_atom1_labels".format(
method), list(a1))
setattr(section, "x_lobster_co{}p_atom2_labels".format(
method), list(a2))
setattr(section, "x_lobster_co{}p_distances".format(
method), np.array(distances) * units.angstrom)
# version specific entries
if 'v' in locals():
setattr(section, "x_lobster_co{}p_translations".format(
method), list(v))
if 'bonds' in locals():
setattr(section, "x_lobster_co{}p_number_of_bonds".format(
method), list(bonds))
if len(icoxp) > 0:
setattr(section, "x_lobster_integrated_co{}p_at_fermi_level".format(
method), np.array(icoxp) * units.eV)
def parse_COXPCAR(fname, scc, method, logger):
coxpcar_parser = TextParser(quantities=[
Quantity('coxp_pairs', r'No\.\d+:(\w{1,2}\d+)->(\w{1,2}\d+)\(([\d\.]+)\)\s*?',
repeats=True),
Quantity('coxp_lines', r'\n\s*(-*\d+\.\d+(?:[ \t]+-*\d+\.\d+)+)',
repeats=True)
])
if not path.isfile(fname):
return
coxpcar_parser.mainfile = fname
coxpcar_parser.parse()
if method == 'o':
if not scc.x_lobster_section_coop:
section = scc.m_create(x_lobster_section_coop)
else:
section = scc.x_lobster_section_coop
elif method == 'h':
if not scc.x_lobster_section_cohp:
section = scc.m_create(x_lobster_section_cohp)
else:
section = scc.x_lobster_section_cohp
pairs = coxpcar_parser.get('coxp_pairs')
if pairs is None:
logger.warning('No CO{}P values detected in CO{}PCAR.lobster.'.format(
method.upper(), method.upper()))
return
a1, a2, distances = zip(*pairs)
number_of_pairs = len(list(a1))
setattr(section, "x_lobster_number_of_co{}p_pairs".format(
method), number_of_pairs)
setattr(section, "x_lobster_co{}p_atom1_labels".format(
method), list(a1))
setattr(section, "x_lobster_co{}p_atom2_labels".format(
method), list(a2))
setattr(section, "x_lobster_co{}p_distances".format(
method), np.array(distances) * units.angstrom)
coxp_lines = coxpcar_parser.get('coxp_lines')
if coxp_lines is None:
logger.warning('No CO{}P values detected in CO{}PCAR.lobster.'
'The file is likely incomplete'.format(
method.upper(), method.upper()))
return
coxp_lines = list(zip(*coxp_lines))
setattr(section, "x_lobster_number_of_co{}p_values".format(
method), len(coxp_lines[0]))
setattr(section, "x_lobster_co{}p_energies".format(
method), np.array(coxp_lines[0]) * units.eV)
if len(coxp_lines) == 2 * number_of_pairs + 3:
coxp = [[x] for x in coxp_lines[3::2]]
icoxp = [[x] for x in coxp_lines[4::2]]
acoxp = [coxp_lines[1]]
aicoxp = [coxp_lines[2]]
elif len(coxp_lines) == 4 * number_of_pairs + 5:
coxp = [x for x in zip(coxp_lines[5:number_of_pairs * 2 + 4:2],
coxp_lines[number_of_pairs * 2 + 5: 4 * number_of_pairs + 4:2])]
icoxp = [x for x in zip(coxp_lines[6:number_of_pairs * 2 + 5:2],
coxp_lines[number_of_pairs * 2 + 6: 4 * number_of_pairs + 5:2])]
acoxp = [coxp_lines[1], coxp_lines[3]]
aicoxp = [coxp_lines[2], coxp_lines[4]]
else:
logger.warning('Unexpected number of columns {} '
'in CO{}PCAR.lobster.'.format(len(coxp_lines),
method.upper()))
return
# FIXME: correct magnitude?
setattr(section, "x_lobster_co{}p_values".format(
method), np.array(coxp))
setattr(section, "x_lobster_average_co{}p_values".format(
method), np.array(acoxp))
setattr(section, "x_lobster_integrated_co{}p_values".format(
method), np.array(icoxp) * units.eV)
setattr(section, "x_lobster_average_integrated_co{}p_values".format(
method), np.array(aicoxp) * units.eV)
setattr(section, "x_lobster_integrated_co{}p_values".format(
method), np.array(icoxp) * units.eV)
def parse_CHARGE(fname, scc):
charge_parser = TextParser(quantities=[
Quantity(
'charges', r'\s*\d+\s+[A-Za-z]{1,2}\s+([-\d\.]+)\s+([-\d\.]+)\s*', repeats=True)
])
if not path.isfile(fname):
return
charge_parser.mainfile = fname
charge_parser.parse()
charges = charge_parser.get('charges')
if charges is not None:
sec_charges = scc.m_create(Charges)
sec_charges.analysis_method = "mulliken"
sec_charges.kind = "integrated"
sec_charges.value = np.array(list(zip(*charges))[0]) * units.elementary_charge
sec_charges = scc.m_create(Charges)
sec_charges.analysis_method = "loewdin"
sec_charges.kind = "integrated"
sec_charges.value = np.array(list(zip(*charges))[1]) * units.elementary_charge
def parse_DOSCAR(fname, run, logger):
def parse_species(run, atomic_numbers):
"""
If we don't have any structure from the underlying DFT code, we can
at least figure out what atoms we have in the structure. The best place
to get this info from is the DOSCAR.lobster
"""
if not run.system:
system = run.m_create(System)
system.atoms = Atoms(species=atomic_numbers, periodic=[True, True, True])
def translate_lm(lm):
lm_dictionary = {
's': [0, 0],
'p_z': [1, 0],
'p_x': [1, 1],
'p_y': [1, 2],
'd_z^2': [2, 0],
'd_xz': [2, 1],
'd_yz': [2, 2],
'd_xy': [2, 3],
'd_x^2-y^2': [2, 4],
'z^3': [3, 0],
'xz^2': [3, 1],
'yz^2': [3, 2],
'xyz': [3, 3],
'z(x^2-y^2)': [3, 4],
'x(x^2-3y^2)': [3, 5],
'y(3x^2-y^2)': [3, 6],
}
return lm_dictionary.get(lm[1:])
if not path.isfile(fname):
return
energies = []
dos_values = []
integral_dos = []
atom_projected_dos_values = []
atom_index = 0
n_atoms = 0
n_dos = 0
atomic_numbers = []
lms = []
with open(fname) as f:
for i, line in enumerate(f):
if i == 0:
n_atoms = int(line.split()[0])
if i == 1:
_ = float(line.split()[0]) * units.angstrom**3
if i == 5:
n_dos = int(line.split()[2])
if 'Z=' in line:
atom_index += 1
atom_projected_dos_values.append([])
lms.append((line.split(';')[-1]).split())
atomic_numbers.append(int(line.split(';')[-2].split('=')[1]))
continue
if i > 5:
line = [float(x) for x in line.split()]
if atom_index == 0:
energies.append(line[0])
if len(line) == 3:
dos_values.append([line[1]])
integral_dos.append([line[2]])
elif len(line) == 5:
dos_values.append([line[1], line[2]])
integral_dos.append([line[3], line[4]])
else:
atom_projected_dos_values[-1].append(line[1:])
if len(atomic_numbers) > 0 and len(atomic_numbers) == n_atoms:
parse_species(run, atomic_numbers)
if n_dos == 0:
return
if len(dos_values) == n_dos:
dos = run.calculation[0].m_create(Dos, Calculation.dos_electronic)
dos.n_energies = n_dos
dos.energies = energies * units.eV
value = list(zip(*dos_values))
n_electrons = sum(atomic_numbers)
index = (np.abs(energies)).argmin()
# integrated dos at the Fermi level should be the number of electrons
n_valence_electrons = int(round(sum(integral_dos[index])))
n_core_electrons = n_electrons - n_valence_electrons
value_integrated = np.array(list(zip(*integral_dos))) + n_core_electrons / len(integral_dos[0])
for spin_i in range(len(value)):
dos_total = dos.m_create(DosValues, Dos.total)
dos_total.spin = spin_i
dos_total.value = value[spin_i] * (1 / units.eV)
dos_total.value_integrated = value_integrated[spin_i]
else:
logger.warning('Unable to parse total dos from DOSCAR.lobster, \
it doesn\'t contain enough dos values')
return
for atom_i, pdos in enumerate(atom_projected_dos_values):
if len(pdos) != n_dos:
logger.warning('Unable to parse atom lm-projected dos from DOSCAR.lobster, \
it doesn\'t contain enough dos values')
continue
if len(lms[atom_i]) == len(pdos[0]):
# we have the same lm-projections for spin up and dn
dos_values = np.array([[lmdos] for lmdos in zip(*pdos)]) / eV
elif len(lms[atom_i]) * 2 == len(pdos[0]):
pdos_up = list(zip(*pdos))[0::2]
pdos_dn = list(zip(*pdos))[1::2]
dos_values = np.array([[a, b] for a, b in zip(pdos_up, pdos_dn)]) / eV
else:
logger.warning('Unexpected number of columns in DOSCAR.lobster')
return
for lm_i, lm in enumerate(lms[atom_i]):
for spin_i in range(len(dos_values[lm_i])):
section_pdos = dos.m_create(DosValues, Dos.atom_projected)
section_pdos.atom_index = atom_i
section_pdos.spin = spin_i
section_pdos.m_kind = 'real_orbital'
section_pdos.lm = translate_lm(lm)
section_pdos.value = dos_values[lm_i][spin_i]
mainfile_parser = TextParser(quantities=[
Quantity('program_version', r'^LOBSTER\s*v([\d\.]+)\s*', repeats=False),
Quantity('datetime', r'starting on host \S* on (\d{4}-\d\d-\d\d\sat\s\d\d:\d\d:\d\d)\s[A-Z]{3,4}',
repeats=False),
Quantity('x_lobster_code',
r'detecting used PAW program... (.*)', repeats=False),
Quantity('x_lobster_basis',
r'setting up local basis functions...\s*((?:[a-zA-Z]{1,2}\s+\(.+\)(?:\s+\d\S+)+\s+)+)',
repeats=False,
sub_parser=TextParser(quantities=[
Quantity('x_lobster_basis_species',
r'([a-zA-Z]+){1,2}\s+\((.+)\)((?:\s+\d\S+)+)\s+', repeats=True)
])),
Quantity('spilling', r'((?:spillings|abs. tot)[\s\S]*?charge\s*spilling:\s*\d+\.\d+%)',
repeats=True,
sub_parser=TextParser(quantities=[
Quantity('abs_total_spilling',
r'abs.\s*total\s*spilling:\s*(\d+\.\d+)%', repeats=False),
Quantity('abs_charge_spilling',
r'abs.\s*charge\s*spilling:\s*(\d+\.\d+)%', repeats=False)
])),
Quantity('finished', r'finished in (\d)', repeats=False),
])
class LobsterParser:
def __init__(self):
pass
def parse(self, mainfile: str, archive: EntryArchive, logger=None):
mainfile_parser.mainfile = mainfile
mainfile_path = path.dirname(mainfile)
mainfile_parser.parse()
run = archive.m_create(Run)
run.program = Program(
name='LOBSTER',
version=str(mainfile_parser.get('program_version')))
# FIXME: There is a timezone info present as well, but datetime support for timezones
# is bad and it doesn't support some timezones (for example CEST).
# That leads to test failures, so ignore it for now.
date = datetime.datetime.strptime(' '.join(mainfile_parser.get('datetime')),
'%Y-%m-%d at %H:%M:%S') - datetime.datetime(1970, 1, 1)
run.time_run = TimeRun(wall_start=date.total_seconds())
code = mainfile_parser.get('x_lobster_code')
# parse structure
if code is not None:
if code == 'VASP':
try:
structure = ase.io.read(mainfile_path + '/CONTCAR', format="vasp")
except FileNotFoundError:
logger.warning('Unable to parse structure info, no CONTCAR detected')
else:
logger.warning('Parsing of {} structure is not supported'.format(code))
if 'structure' in locals():
system = run.m_create(System)
system.atoms = Atoms(
lattice_vectors=structure.get_cell() * units.angstrom,
labels=structure.get_chemical_symbols(),
periodic=structure.get_pbc(),
positions=structure.get_positions() * units.angstrom)
if mainfile_parser.get('finished') is not None:
run.clean_end = True
else:
run.clean_end = False
scc = run.m_create(Calculation)
method = run.m_create(Method)
scc.method_ref = method
spilling = | |
common geno / total samples gives correctness when guessing most common for every sample
genotype_conc_per_marker = counts/n_samples
genotype_conc = np.average(genotype_conc_per_marker)
return genotype_conc
def get_pops_with_k(k, coords_by_pop):
'''
Get a list of unique populations that have at least k samples
:param coords_by_pop: dict mapping pop names to a list of coords for each sample of that pop
:return: list
'''
res = []
for pop in coords_by_pop.keys():
if len(coords_by_pop[pop]) >= k:
res.append(pop)
else:
try:
print("-- {0}".format(pop.decode("utf-8")))
except:
print("-- {0}".format(pop))
return res
def f1_score_kNN(x, labels, labels_to_use, k = 5):
classifier = KNeighborsClassifier(n_neighbors=k)
classifier.fit(x, labels)
predicted_labels = classifier.predict(x)
# this returns a vector of f1 scores per population
f1_score_per_pop = f1_score(y_true=labels, y_pred=predicted_labels, labels = labels_to_use, average = None)
f1_score_avg = f1_score(y_true=labels, y_pred=predicted_labels, average = "micro")
return f1_score_avg, f1_score_per_pop
def my_tf_round(x, d = 2, base = 0.5):
'''
Round input to nearest multiple of base, considering d decimals.
:param x: tensor
:param d: number of decimals to consider
:param base: rounding to nearest base
:return: x rounded
'''
multiplier = tf.constant(10 ** d, dtype=x.dtype)
x = base * tf.math.round(tf.math.divide(x,base))
return tf.math.round(x * multiplier) / multiplier
def to_genotypes_sigmoid_round(data):
'''
Interpret data as genotypes by applying sigmoid
function and rounding result to closest of 0.0, 0.5, 1.0
:param data: n_samples x n_markers
:return: data transformed
'''
data = tf.keras.activations.sigmoid(data)
data = tf.map_fn(my_tf_round, data)
return data
def to_genotypes_invscale_round(data, scaler_vals):
'''
Interpret data as genotypes by applying inverse scaling
based on scaler_vals, and rounding result to closest integer.
:param data: n_samples x n_markers
:param scaler_vals tuple of means and 1/stds that were used to scale the data.
:return: data transformed
'''
means = scaler_vals[0]
stds = scaler_vals[1]
genos = data.T
for m in range(len(genos)):
genos[m] = np.add(np.multiply(genos[m],stds[m]), means[m])
output = tf.map_fn(lambda x : my_tf_round(x, base = 1.0), genos.T)
return output
class GenotypeConcordance(keras.metrics.Metric):
'''
Genotype concordance metric.
Assumes pred and true are genotype values scaled the same way.
'''
def __init__(self, name='genotype_concordance', **kwargs):
super(GenotypeConcordance, self).__init__(name=name, **kwargs)
self.accruary_metric = tf.keras.metrics.Accuracy()
def update_state(self, y_true, y_pred, sample_weight=None):
_ = self.accruary_metric.update_state(y_true=y_true, y_pred = y_pred)
return y_pred
def result(self):
return self.accruary_metric.result()
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.accruary_metric.reset_states()
def write_h5(filename, dataname, data, replace_file = False):
'''
Write data to a h5 file.
Replaces dataset dataname if already exists.
If replace_file specified: skips writing data if replacing the dataset fails.
:param filename: directory and filename (with .h5 extension) of file to write to
:param dataname: name of the datataset
:param data: the data
:param replace_file: if replacing existing dataset does not work: overwrite entire file with new data,
all old data is lost
'''
try:
with h5py.File(filename, 'a') as hf:
try:
hf.create_dataset(dataname, data = data)
except (RuntimeError, ValueError):
print("Replacing dataset {0} in {1}".format(dataname, filename))
del hf[dataname]
hf.create_dataset(dataname, data = data)
except OSError:
print("Could not write to file {0}.".format(filename))
if replace_file:
print("Replacing {0}.".format(filename))
with h5py.File(filename, 'w') as hf:
try:
hf.create_dataset(dataname, data = data)
except RuntimeError:
print("Could not replace {0}. Data not written.".format(filename))
def read_h5(filename, dataname):
'''
Read data from a h5 file.
:param filename: directory and filename (with .h5 extension) of file to read from
:param dataname: name of the datataset in the h5
:return the data
'''
with h5py.File(filename, 'r') as hf:
data = hf[dataname][:]
return data
def get_pop_superpop_list(file):
'''
Get a list mapping populations to superpopulations from file.
:param file: directory, filename and extension of a file mapping populations to superpopulations.
:return: a (n_pops) x 2 list
Assumes file contains one population and superpopulation per line, separated by "," e.g.
Kyrgyz,Central/South Asia
Khomani,Sub-Saharan Africa
'''
pop_superpop_list = np.genfromtxt(file, usecols=(0,1), dtype=str, delimiter=",")
return pop_superpop_list
def get_ind_pop_list_from_map(famfile, mapfile):
'''
Get a list of individuals and their populations from
a .fam file that contains individual IDs, and
a .map file that maps individual IDs to populations.
The order of the individuals in the mapfile does not have to be the same as in the famfile.
The order of indiivudals in the output will be the same as in the famfile.
:param famfile:
:param mapfile:
:return: (n_samples x 2) array of ind_id, pop_id
'''
try:
ind_list = np.genfromtxt(famfile, usecols=(1), dtype=str)
print("Reading ind list from {0}".format(famfile))
ind_pop_map_list = np.genfromtxt(mapfile, usecols=(0,2), dtype=str)
print("Reading ind pop map from {0}".format(mapfile))
ind_pop_map = dict()
for ind, pop in ind_pop_map_list:
ind_pop_map[ind] = pop
ind_pop_list = []
for ind in ind_list:
ind_pop_list.append([ind, ind_pop_map[ind]])
return np.array(ind_pop_list)
except Exception as e:
exc_str = traceback.format_exc()
print("Error in gettinf ind pop list from map : {0}".format(exc_str))
def get_ind_pop_list(filestart):
'''
Get a list of individuals and their populations from a .fam file.
or if that does not exist, tried to find a a .ind file
:param filestart: directory and file prefix of file containing sample info
:return: an (n_samples)x(2) list where ind_pop_list[n] = [individial ID, population ID] of the n:th individual
'''
try:
ind_pop_list = np.genfromtxt(filestart + ".fam", usecols=(1,0), dtype=str)
print("Reading ind pop list from " + filestart + ".fam")
except:
ind_pop_list = np.genfromtxt(filestart+".ind", usecols=(0,2), dtype=str)
print("Reading ind pop list from " + filestart + ".ind")
# probably not general solution
if ":" in ind_pop_list[0][0]:
nlist = []
for v in ind_pop_list:
print(v)
v = v[0]
nlist.append(v.split(":")[::-1])
ind_pop_list = np.array(nlist)
return ind_pop_list
def get_unique_pop_list(filestart):
'''
Get a list of unique populations from a .fam file.
:param filestart: directory and file prefix of file containing sample info
:return: an (n_pops) list where n_pops is the number of unique populations (=families) in the file filestart.fam
'''
pop_list = np.unique(np.genfromtxt(filestart + ".fam", usecols=(0), dtype=str))
return pop_list
def get_coords_by_pop(filestart_fam, coords, pop_subset = None, ind_pop_list = []):
'''
Get the projected 2D coordinates specified by coords sorted by population.
:param filestart_fam: directory + filestart of fam file
:param coords: a (n_samples) x 2 matrix of projected coordinates
:param pop_subset: list of populations to plot samples from, if None then all are returned
:param ind_pop_list: if specified, gives the ind and population IDs for the samples of coords. If None: assumed that filestart_fam has the correct info.
:return: a dict that maps a population name to a list of list of 2D-coordinates (one pair of coords for every sample in the population)
Assumes that filestart_fam.fam contains samples in the same order as the coordinates in coords.
'''
try:
new_list = []
for i in range(len(ind_pop_list)):
new_list.append([ind_pop_list[i][0].decode('UTF-8'), ind_pop_list[i][1].decode('UTF-8')])
ind_pop_list = np.array(new_list)
except:
pass
if not len(ind_pop_list) == 0:
unique_pops = np.unique(ind_pop_list[:,1])
else:
ind_pop_list = get_ind_pop_list(filestart_fam)
unique_pops = get_unique_pop_list(filestart_fam)
pop_list = ind_pop_list[:,1]
coords_by_pop = {}
for p in unique_pops:
coords_by_pop[p] = []
for s in range(len(coords)):
this_pop = pop_list[s]
this_coords = coords[s]
if pop_subset is None:
coords_by_pop[this_pop].append(this_coords)
else:
if this_pop in pop_subset:
coords_by_pop[this_pop].append(this_coords)
return coords_by_pop
def get_saved_epochs(train_directory):
'''
Get an ordered list of the saved epochs in the given directory.
:param train_directory: directory where training data is stored
:return: int list of sorted epochs
'''
epochs = []
for i in os.listdir(train_directory+"/weights"):
start = i.split("/")[-1].split(".")[0]
try:
num = int(start)
if not num in epochs:
epochs.append(num)
except:
continue
epochs.sort()
return epochs
def get_projected_epochs(encoded_data_file):
'''
Get an ordered list of the saved projected epochs in encoded_data_file.
:param encoded_data_file: h5 file of encoded data
:return: int list of sorted epochs
'''
epochs = []
if os.path.isfile(encoded_data_file):
encoded_data = h5py.File(encoded_data_file, 'r')
for i in encoded_data.keys():
start = i.split("_")[0]
try:
num = int(start)
if not num in epochs:
epochs.append(num)
except:
continue
epochs.sort()
else:
print("Encoded data file not found: {0} ".format(encoded_data_file))
return epochs
def write_metric_per_epoch_to_csv(filename, values, epochs):
'''
Write value of a metric per epoch to csv file, extending exisitng data if it exists.
Return the total data in the file, the given values and epochs appended to
any pre-existing data in the file.
Assumes format of file is epochs on first row, corresponding values on second row.
:param filename: full name and path of csv file
:param values: array of metric values
:param epochs: array of corresponding epochs
:return: array of epochs appended to any pre-existing epochs in filename
and
array of metric values appended to any pre-existing values in filename
'''
epochs_saved = np.array([])
values_saved = np.array([])
try:
with open(filename, mode='r') as res_file:
res_reader = csv.reader(res_file, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
epochs_saved = next(res_reader)
values_saved = next(res_reader)
except:
pass
epochs_combined = np.concatenate((epochs_saved, epochs), axis=0)
values_combined = np.concatenate((values_saved, values), axis=0)
with open(filename, mode='w') as res_file:
res_writer = csv.writer(res_file, delimiter=',')
res_writer.writerow(epochs_combined)
res_writer.writerow(np.array(values_combined))
return epochs_combined, values_combined
def plot_genotype_hist(genotypes, filename):
'''
Plots a histogram of all genotype values in the flattened genotype matrix.
:param genotypes: array of genotypes
:param filename: filename (including path) to save plot to
'''
unique, counts = np.unique(genotypes, return_counts=True)
d = zip(unique, counts)
plt.hist(np.ndarray.flatten(genotypes), bins=50)
if len(unique) < 5:
plt.title(", ".join(["{:.2f} : {}".format(u, c) for (u,c) in d]), fontdict = {'fontsize' : 9})
plt.savefig("{0}.pdf".format(filename))
plt.close()
def get_superpop_pop_dict(pop_superpop_file):
'''
Get a dict mapping superpopulation IDs to a list of their population ID
Assumes file contains one population and superpopulation per line, separated by "," e.g.
Kyrgyz,Central/South Asia
Khomani,Sub-Saharan Africa
:param pop_superpop_file: name of file mapping populations to superpopulations
:return: a dictionary mapping each superpopulation ID in the given file to a list of its subpopulations
'''
pop_superpop_list = get_pop_superpop_list(pop_superpop_file)
superpops = np.unique(pop_superpop_list[:,1])
superpop_pop_dict = {}
for i in superpops:
superpop_pop_dict[i] = []
for pp in pop_superpop_list:
pop = pp[0]
superpop = pp[1]
superpop_pop_dict[superpop].append(pop)
return superpop_pop_dict
def genfromplink(fileprefix):
'''
Generate genotypes from plink data.
Replace missing genotypes by the value 9.0.
:param fileprefix: path and filename prefix of the plink data (bed, bim, fam)
:return:
'''
(bim, fam, bed) = read_plink(fileprefix)
genotypes = bed.compute()
genotypes[np.isnan(genotypes)] = 9.0
return (genotypes, bed.shape[0])
def get_test_samples_stratified(genotypes, ind_pop_list, test_split):
'''
Generate a set of samples stratified by population from eigenstratgeno data.
Samples from populations with only one sample are considered as belonging to the same temporary population,
and stratified according to that. If there is only one such sample, another one is randomly assigned the
temporary population.
:param genotypes: (n_samples x n_markers) array of genotypes
:param ind_pop_list: (n_smaples x 2) list of individual id and population | |
<reponame>johnpaulguzman/py-gql<filename>tests/test_execution/test_coercion.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Tests related to how raw JSON variables are coerced and forwared to the
execution context.
"""
import json
import pytest
from py_gql.exc import VariablesCoercionError
from py_gql.schema import (
Argument,
Field,
InputField,
InputObjectType,
ListType,
NonNullType,
ObjectType,
ScalarType,
Schema,
String,
)
from ._test_utils import assert_sync_execution
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
def _complex_parse(value):
if value == "SerializedValue":
return "DeserializedValue"
raise ValueError(value)
ComplexScalar = ScalarType("ComplexScalar", _complex_parse, _complex_parse)
TestInputObject = InputObjectType(
"TestInputObject",
[
InputField("a", String),
InputField("b", ListType(String)),
InputField("c", NonNullType(String)),
InputField("d", ComplexScalar),
],
)
TestNestedInputObject = InputObjectType(
"TestNestedInputObject",
[
InputField("na", NonNullType(TestInputObject)),
InputField("nb", NonNullType(String)),
],
)
def _inspect(name):
def _inspect_resolver(*_, **args):
return json.dumps(args.get(name, None), sort_keys=True)
return _inspect_resolver
_field = lambda name, argType, **kw: Field(
name, String, [Argument("input", argType, **kw)], resolver=_inspect("input")
)
TestType = ObjectType(
"TestType",
[
_field("fieldWithObjectInput", TestInputObject),
_field("fieldWithNullableStringInput", String),
_field("fieldWithNonNullableStringInput", NonNullType(String)),
_field(
"fieldWithNonNullableStringInputAndDefaultArgumentValue",
NonNullType(String),
default_value="Hello World",
),
_field(
"fieldWithDefaultArgumentValue", String, default_value="Hello World"
),
_field("fieldWithNestedObjectInput", TestNestedInputObject),
_field("list", ListType(String)),
_field("nnList", NonNullType(ListType(String))),
_field("listNN", ListType(NonNullType(String))),
_field("nnListNN", NonNullType(ListType(NonNullType(String)))),
],
)
_SCHEMA = Schema(TestType)
async def test_complex_input_inline_struct():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithObjectInput(input: {a: "foo", b: ["bar"], c: "baz"})
}
""",
expected_data={
"fieldWithObjectInput": '{"a": "foo", "b": ["bar"], "c": "baz"}'
},
expected_errors=[],
)
async def test_single_value_to_list_inline_struct():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithObjectInput(input: {a: "foo", b: "bar", c: "baz"})
}
""",
expected_data={
"fieldWithObjectInput": '{"a": "foo", "b": ["bar"], "c": "baz"}'
},
expected_errors=[],
)
async def test_null_value_inline_struct():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithObjectInput(input: {a: null, b: null, c: "C", d: null})
}
""",
expected_data={
"fieldWithObjectInput": '{"a": null, "b": null, "c": "C", "d": null}'
},
expected_errors=[],
)
async def test_null_value_in_list_inline_struct():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithObjectInput(input: {b: ["A",null,"C"], c: "C"})
}
""",
expected_data={
"fieldWithObjectInput": '{"b": ["A", null, "C"], "c": "C"}'
},
expected_errors=[],
)
async def test_does_not_use_incorrect_value_inline_struct():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithObjectInput(input: ["foo", "bar", "baz"])
}
""",
expected_data={"fieldWithObjectInput": None},
expected_errors=[
(
'Argument "input" of type "TestInputObject" was provided invalid '
'value ["foo", "bar", "baz"] (Expected Object but got ListValue)',
(6, 56),
"fieldWithObjectInput",
)
],
)
async def test_uses_parse_literal_on_scalar_types_inline_struct():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithObjectInput(input: {c: "foo", d: "SerializedValue"})
}
""",
expected_data={
"fieldWithObjectInput": '{"c": "foo", "d": "DeserializedValue"}'
},
expected_errors=[],
)
async def test_complex_input_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
expected_data={
"fieldWithObjectInput": '{"a": "foo", "b": ["bar"], "c": "baz"}'
},
expected_errors=[],
variables={"input": {"a": "foo", "b": ["bar"], "c": "baz"}},
)
async def test_uses_default_value_when_not_provided():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject = {a: "foo", b: ["bar"], c: "baz"}) {
fieldWithObjectInput(input: $input)
}
""",
expected_data={
"fieldWithObjectInput": '{"a": "foo", "b": ["bar"], "c": "baz"}'
},
expected_errors=[],
variables={},
)
async def test_single_value_to_list_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
expected_data={
"fieldWithObjectInput": '{"a": "foo", "b": ["bar"], "c": "baz"}'
},
expected_errors=[],
variables={"input": {"a": "foo", "b": "bar", "c": "baz"}},
)
async def test_complex_scalar_input_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
expected_data={
"fieldWithObjectInput": '{"c": "foo", "d": "DeserializedValue"}'
},
expected_errors=[],
variables={"input": {"c": "foo", "d": "SerializedValue"}},
)
async def test_error_on_null_for_nested_non_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
variables={"input": {"a": "foo", "b": "bar", "c": None}},
expected_exc=(
VariablesCoercionError,
(
'Variable "$input" got invalid value '
'{"a": "foo", "b": "bar", "c": null} (Expected non-nullable type '
"String! not to be null at value.c)"
),
),
)
async def test_error_on_incorrect_type():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
variables={"input": "foo bar"},
expected_exc=(
VariablesCoercionError,
(
'Variable "$input" got invalid value "foo bar" (Expected type '
"TestInputObject to be an object)"
),
),
)
async def test_errors_on_omission_of_nested_non_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
variables={"input": {"a": "foo", "b": "bar"}},
expected_exc=(
VariablesCoercionError,
(
'Variable "$input" got invalid value {"a": "foo", "b": "bar"} '
"(Field c of required type String! was not provided at value.c)"
),
),
)
async def test_fail_on_deep_nested_errors_with_multiple_errors():
with pytest.raises(VariablesCoercionError) as exc_info:
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestNestedInputObject) {
fieldWithNestedObjectInput(input: $input)
}
""",
variables={"input": {"na": {"a": "foo"}}},
)
assert str(exc_info.value) == (
'Variable "$input" got invalid value {"na": {"a": "foo"}} '
"(Field c of required type String! was not provided at value.na.c),\n"
'Variable "$input" got invalid value {"na": {"a": "foo"}} '
"(Field nb of required type String! was not provided at value.nb)"
)
async def test_fail_on_addition_of_unknown_input_field():
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestInputObject) {
fieldWithObjectInput(input: $input)
}
""",
variables={
"input": {"a": "foo", "b": "bar", "c": "baz", "extra": "dog"}
},
expected_exc=(
VariablesCoercionError,
(
'Variable "$input" got invalid value {"a": "foo", "b": "bar", "c": '
'"baz", "extra": "dog"} (Field extra is not defined by type '
"TestInputObject)"
),
),
)
async def test_allows_nullable_inputs_to_be_omitted():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithNullableStringInput
}
""",
expected_data={"fieldWithNullableStringInput": "null"},
expected_errors=[],
)
async def test_allows_nullable_inputs_to_be_omitted_in_a_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String) {
fieldWithNullableStringInput(input: $value)
}
""",
expected_data={"fieldWithNullableStringInput": "null"},
expected_errors=[],
)
async def test_allows_nullable_inputs_to_be_set_to_null_in_a_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String) {
fieldWithNullableStringInput(input: $value)
}
""",
expected_data={"fieldWithNullableStringInput": "null"},
expected_errors=[],
variables={"value": None},
)
async def test_allows_nullable_inputs_to_be_set_to_a_value_in_a_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String) {
fieldWithNullableStringInput(input: $value)
}
""",
expected_data={"fieldWithNullableStringInput": '"a"'},
expected_errors=[],
variables={"value": "a"},
)
async def test_allows_nullable_inputs_to_be_set_to_a_value_directly():
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithNullableStringInput(input: "a")
}
""",
expected_data={"fieldWithNullableStringInput": '"a"'},
expected_errors=[],
)
async def test_allows_non_nullable_inputs_to_be_omitted_given_a_default():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String = "default") {
fieldWithNonNullableStringInput(input: $value)
}
""",
expected_data={"fieldWithNonNullableStringInput": '"default"'},
expected_errors=[],
)
async def test_does_not_allow_non_nullable_inputs_to_be_omitted_in_a_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String!) {
fieldWithNonNullableStringInput(input: $value)
}
""",
expected_exc=(
VariablesCoercionError,
'Variable "$value" of required type "String!" was not provided.',
),
)
async def test_does_not_allow_non_nullable_inputs_to_be_set_to_null_in_a_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String!) {
fieldWithNonNullableStringInput(input: $value)
}
""",
variables={"input": None},
expected_exc=(
VariablesCoercionError,
'Variable "$value" of required type "String!" was not provided.',
),
)
async def test_allows_non_nullable_inputs_to_be_set_to_a_value_in_a_variable():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String!) {
fieldWithNonNullableStringInput(input: $value)
}
""",
expected_data={"fieldWithNonNullableStringInput": '"a"'},
expected_errors=[],
variables={"value": "a"},
)
async def test_allows_non_nullable_inputs_to_be_set_to_a_value_directly():
assert_sync_execution(
_SCHEMA,
"""
query {
fieldWithNonNullableStringInput(input: "a")
}
""",
expected_data={"fieldWithNonNullableStringInput": '"a"'},
expected_errors=[],
)
async def test_reports_error_for_missing_non_nullable_inputs():
assert_sync_execution(
_SCHEMA,
"{ fieldWithNonNullableStringInput }",
expected_data={"fieldWithNonNullableStringInput": None},
expected_errors=[
(
'Argument "input" of required type "String!" was not provided',
(2, 33),
"fieldWithNonNullableStringInput",
)
],
)
async def test_reports_error_for_array_passed_into_string_input():
assert_sync_execution(
_SCHEMA,
"""
query ($value: String!) {
fieldWithNonNullableStringInput(input: $value)
}
""",
variables={"value": [1, 2, 3]},
expected_exc=(
VariablesCoercionError,
(
'Variable "$value" got invalid value [1, 2, 3] (String cannot '
'represent list value "[1, 2, 3]")'
),
),
)
async def test_reports_error_for_non_provided_variables_for_non_nullable_inputs():
# This is an *invalid* query, but it should be an *executable* query.
assert_sync_execution(
_SCHEMA,
"""
{
fieldWithNonNullableStringInput(input: $foo)
}
""",
variables={"value": [1, 2, 3]},
expected_data={"fieldWithNonNullableStringInput": None},
expected_errors=[
(
'Argument "input" of required type "String!" was provided the '
'missing variable "$foo"',
(6, 50),
"fieldWithNonNullableStringInput",
)
],
)
async def test_uses_default_when_no_runtime_value_is_provided_to_a_non_null_argument():
assert_sync_execution(
_SCHEMA,
"""
query optionalVariable($optional: String) {
fieldWithNonNullableStringInputAndDefaultArgumentValue(input: $optional)
}
""",
expected_data={
"fieldWithNonNullableStringInputAndDefaultArgumentValue": '"Hello World"'
},
expected_errors=[],
)
async def test_allows_lists_to_be_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String]) {
list(input: $input)
}
""",
expected_data={"list": "null"},
expected_errors=[],
variables={"input": None},
)
async def test_allows_lists_to_contain_values():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String]) {
list(input: $input)
}
""",
expected_data={"list": '["A"]'},
expected_errors=[],
variables={"input": ["A"]},
)
async def test_allows_lists_to_contain_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String]) {
list(input: $input)
}
""",
expected_data={"list": '["A", null, "B"]'},
expected_errors=[],
variables={"input": ["A", None, "B"]},
)
async def test_does_not_allow_non_null_lists_to_be_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String]!) {
nnList(input: $input)
}
""",
expected_exc=(
VariablesCoercionError,
'Variable "$input" of required type "[String]!" must not be null.',
),
variables={"input": None},
)
async def test_allows_non_null_lists_to_contain_values():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String]!) {
nnList(input: $input)
}
""",
expected_data={"nnList": '["A"]'},
expected_errors=[],
variables={"input": ["A"]},
)
async def test_allows_non_null_lists_to_contain_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String]!) {
nnList(input: $input)
}
""",
expected_data={"nnList": '["A", null, "B"]'},
expected_errors=[],
variables={"input": ["A", None, "B"]},
)
async def test_does_not_allow_non_null_lists_of_non_nulls_to_be_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String!]!) {
nnListNN(input: $input)
}
""",
variables={"input": None},
expected_exc=(
VariablesCoercionError,
'Variable "$input" of required type "[String!]!" must not be null.',
),
)
async def test_allows_non_null_lists_of_non_nulls_to_contain_values():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String!]!) {
nnListNN(input: $input)
}
""",
variables={"input": ["A"]},
expected_data={"nnListNN": '["A"]'},
expected_errors=[],
)
async def test_does_not_allow_non_null_lists_of_non_nulls_to_contain_null():
assert_sync_execution(
_SCHEMA,
"""
query ($input: [String!]!) {
nnListNN(input: $input)
}
""",
variables={"input": ["A", None, "B"]},
expected_exc=(
VariablesCoercionError,
(
'Variable "$input" got invalid value ["A", null, "B"] (Expected '
"non-nullable type String! not to be null at value[1])"
),
),
)
async def test_does_not_allow_invalid_types_to_be_used_as_values():
with pytest.raises(VariablesCoercionError) as exc_info:
assert_sync_execution(
_SCHEMA,
"""
query ($input: TestType!) {
fieldWithObjectInput(input: $input)
}""",
variables={"input": ["A", "B"]},
)
assert (
'Variable "$input" expected value of type "TestType!" which cannot be used as '
"an input type." in str(exc_info.value)
)
async def test_does_not_allow_unknown_types_to_be_used_as_values():
with pytest.raises(VariablesCoercionError) as exc_info:
assert_sync_execution(
_SCHEMA,
"""
query ($input: | |
feed_dict=None,
hide_tqdm_progress=False):
"""
Evaluate the network.
The network evaluation method works by walking through the graph from all input nodes, along all possible paths.
The evaluation of each path is stopped as soon as the total path amplitude falls below the amplitude_cutoff limit.
:param amplitude_cutoff: amplitude below which a wave is not further propagated through the network
:type amplitude_cutoff: float
:param max_endpoints: evaluation is interrupted early, if more than max_endpoints exist in evaluation
:type max_endpoints: int
:param use_shared_default: set to true if shared defaults should be used with SymNum's (higher speed),
set to false if the default value of each SymNum should be used instead (higher accuracy). Default: True
:type use_shared_default: bool
:param feed_dict: Feed dictionary for SymNum variables. Default: None
:type feed_dict: dict
:return:
updates self.nodes_to_output
a dictionary whose keys are node names. For each node name a list of quadruplets is given
[(amplitude, phase, delay, path), (amplitude, phase, delay, path), ...].
.. note::
Phases are simply added together and not reset to a finite range.
"""
for node in self.nodes:
self.nodes_to_output[node] = []
# at the start of the evaluation the endpoints are at the inputs
current_endpoints = [input[3] for input in self.inputs]
endpoint = {'point': [input[3] for input in self.inputs],
'delay': [input[2] for input in self.inputs],
'phase': [input[1] for input in self.inputs],
'amp': [input[0] for input in self.inputs],
'path': ['' for node in current_endpoints]}
# keep propagating waves, while there is a front endpoint that is above the amplitude cutoff
pbar = tqdm(disable=hide_tqdm_progress, unit='paths', desc='Network evaluation in progress')
while len(current_endpoints) > 0:
assert len(current_endpoints) < max_endpoints, "evaluation interrupted, too many endpoints"
# in these we will collect the parameters of the next endpoints
new_endpoints, new_delays, new_phases, new_amplitudes, new_paths = [], [], [], [], []
# iterate over all current endpoints
for node_index, node in enumerate(current_endpoints):
# add the current endpoint to the final output
self.nodes_to_output[node].append((endpoint['amp'][node_index],
endpoint['phase'][node_index],
endpoint['delay'][node_index],
endpoint['path'][node_index] + '-' + node))
# check if any edge's start is the current endpoint
for edge in self.edges:
current_attn = (endpoint['amp'][node_index] * edge.attenuation)
current_attn_fl = current_attn.eval(feed_dict=feed_dict,
use_shared_default=use_shared_default) if hasattr(current_attn,
'eval') else current_attn
if (node == edge.start
and not self.stopping_criterion(current_attn_fl, amplitude_cutoff)):
# if yes, add the new endpoint to the new endpoints (unless the amp. is too low)
new_endpoints.append(edge.end)
new_delays.append(endpoint['delay'][node_index] + edge.delay)
new_phases.append(endpoint['phase'][node_index] + edge.phase)
new_amplitudes.append(current_attn)
new_paths.append(endpoint['path'][node_index] + '-' + node)
pbar.update(1)
# set the current endpoint parameters to the new ones and go to the top
current_endpoints = new_endpoints
endpoint['delay'] = new_delays
endpoint['amp'] = new_amplitudes
endpoint['phase'] = new_phases
endpoint['path'] = new_paths
def visualize(self, show_edge_labels=True, path='network', skip_colon=False, format='pdf'):
"""
Visualize the network
:param show_edge_labels: if True, edge labels showing the amplitude, phase and delay of the edge are drawn.
:type show_edge_labels: bool
:param path: output path for file. If the path does not exist it will be created automatically.
:type path: str
:param skip_colon: Skip nodes which contain ':' in their name. This is used for PhysicalNetwork visualization.
:type skip_colon: bool
:param format: output format (supports all format options of Graphviz), e.g. 'pdf', 'svg'
:type format: str
:return: Writes a dot file at the given path and renders it to the desired output format using graphviz.
:return: Returns the path to the file (can be relative).
"""
try:
from graphviz import Digraph
except ModuleNotFoundError as err:
warnings.warn("Graphviz Package was not found, visualization is skipped.")
return 0
s = Digraph('structs', graph_attr={'ranksep': '0.5', 'overlap': 'false', 'splines': 'true', 'rankdir': 'TB',
'constraint': 'true', 'nodesep': '2'}, node_attr={'shape': 'record'},
edge_attr={}, engine='dot')
for node in self.nodes:
if not (skip_colon and ':' in node):
s.node(node, node)
for edge in self.edges:
if show_edge_labels:
s.edge(edge.start.replace(":", ""), edge.end.replace(":", ""),
label='a{}, p{}, d{}'.format(edge.attenuation, edge.phase, edge.delay))
else:
s.edge(edge.start.replace(":", ""), edge.end.replace(":", ""))
head, tail = os.path.split(path)
if head != '':
Path(head).mkdir(parents=True, exist_ok=True)
return s.render(path, view=False, format=format)
def get_html_result(self, name, time_symbol='t', evaluate=False, feed_dict=None, use_shared_default=False,
linebreak_limit=1, precision=0, path='out.html'):
"""
Creates a html file with a rendered math equation describing all waves arriving at the given node.
.. warning:: To correctly render the equations in the browser, MathJax is required. The script is loaded automatically when you open the html file in a browser, if an internet connection is available.
:param name: Name of the node to get result from. If it is a list, results will be retrieved for all nodes in the list and compiled in a single html file.
:type name: str or list
:param time_symbol: character used to describe time/delays in the equation
:type time_symbol: str
:param evaluate: If evaluate is True, SymNum's will be evaluated using the feed_dict and use_shared_default values specified. Otherwise SymNums are represented by their name as variables.
:type evaluate: bool
:param feed_dict: a dictionary specifying values of variables by name. If only some variables are specified, for all other variables the default value will be used.
:type feed_dict: dict
:param use_shared_default: set to true if shared defaults should be used with SymNums (higher speed) when no \
feed_dict is provided, set to false if the default value of each SymNum should be used instead (higher accuracy). \
The value is ignored if feed_dict is not None. Default: False
:type use_shared_default: bool
:param linebreak_limit: A line break will be added roughly every linebreak_limit chars in the latex string. Set to 1 for a linebreak after each term. Set to 0 to get a latex string on a single line. Default: 1
:type linebreak_limit: int
:param path: Output path where html file containing the MathJax code is stored. If the path does not exist it will be created automatically.
:type path: str
:param precision: Number of significant digits to be output. Set to 0 to use the default value of str() method.
:type precision: int
:raises ValueError: If the node with the provided name does not exist in the network.
:raises IOError: If the output file can not be created or accessed.
:return: writes a html file at the given path
"""
template = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title>{}</title>
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js">
</script>
</head>
<body>
{}
</body>
</html>
"""
if isinstance(name, list) == False:
name = [name]
raw_string = ''
for node in name:
if not node in self.nodes:
raise (ValueError("attempted to retrive wave at non-existing node " + name))
raw_string += '<p> Waves at node ' + node + '<br><br> \(' \
+ self.get_latex_result(name=node,
time_symbol=time_symbol,
evaluate=evaluate,
feed_dict=feed_dict,
use_shared_default=use_shared_default,
linebreak_limit=linebreak_limit,
precision=precision) + '\)</p>'
output_html = template.format('waves at nodes' + str(name), raw_string)
head, tail = os.path.split(path)
if head != '':
Path(head).mkdir(parents=True, exist_ok=True)
try:
with open(path, 'w') as file:
file.write(output_html)
except IOError as e:
return e
def get_latex_result(self, name, time_symbol='t', evaluate=False, feed_dict=None, use_shared_default=False,
linebreak_limit=0, precision=0):
"""
Returns a latex string that describes all waves arriving at the given node.
SymNums are shown as variables, unless evaluate is set to True.
:param name: Name of the node to get result from
:type name: str
:param time_symbol: character used to describe time/delays in the equation
:type time_symbol: str
:param evaluate: If evaluate is True, SymNum's will be evaluated using the feed_dict and use_shared_default values specified. Otherwise SymNums are represented by their name as variables.
:type evaluate: bool
:param feed_dict: a dictionary specifying values of variables by name. If only some variables are specified, for all other variables the default value will be used.
:type feed_dict: dict
:param use_shared_default: set to true if shared defaults should be used with SymNums (higher speed) when no \
feed_dict is provided, set to false if the default value of each SymNum should be used instead (higher accuracy). \
The value is ignored if feed_dict is not None. Default: False
:type use_shared_default: bool
:param linebreak_limit: A line break will be added roughly every linebreak_limit chars in the latex string. Set to 1 for a linebreak after each term. Set to 0 to get a latex string on a single line. | |
<filename>TEST/main.py
"""
Simulator of NB-IoT cell at the MAC level.
In this simulator :
- Slotted Aloha contention
- 3 M/D/1-PS queues
- Impatience
- Ghost messages suppression
- User Plane optimization
"""
from enum import Enum
import math
import random
import time
# General Conf
id_max = 1000000
debug_ = False
single_tone = True
# NPRACH
rep_ra = None # [1,2,4,8,16,32,64,128] defined in run_simu
backoff_ms = [0, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 524288, 524288, 524288]
# NPDCCH
nb_CCE_subframe = 2
npdcch_window = 64
# G is computed thanks to the infos in the configuration (number of RU per seconds)
# Communications
""" conf immuable """
preamble_duration_ms = 6.4
total_nprach_sc = 48
ratio_npbch_npss_nsss = 2.5 / 10
epsilon_lambda_t = 0.1
RU_max_UL = 10
class typeEvt(Enum):
ARRIVAL = 0
DEPARTURE = 1
IMPATIENCE = 2
ARRIVAL_COM = 3
RAO = 4
class channel(Enum):
NPDCCH = 0
NPDSCH = 1
NPUSCH = 2
NPRACH = 3
class evt:
def __init__(self, type_, date_, com_, queue_):
super().__init__()
self.type_ = type_
self.date_ = date_
self.com_ = com_
self.queue_ = queue_
class communication:
def __init__(self, id_, D_service_time):
super().__init__()
self.id_ = id_
self.rate_ = 0
self.D_service_time = D_service_time # depends on link budget
self.nb_attempts = 0
def rateModifier(self, t):
self.rate_ += t
def nvlleTentative(self):
self.nb_attempts += 1
def removeEvt(evts, type_, id_, fil):
for i in range(len(evts)):
if (evts[i].type_ == type_ and evts[i].com_.id_ == id_ and evts[i].queue_ == fil):
del evts[i]
return 0
# debug
if (type_ == typeEvt.DEPARTURE):
# No problem, the impatience was triggered when the communication had not even begun to be served yet.
pass
elif (type_ == typeEvt.IMPATIENCE):
# print("Error. There is no event associated to this id_ and the type IMPATIENCE in the queue n° " + str(fil))
# No problem, since a communication may have been served before the impatience in another queue arises.
pass
def changeId(evts, type_, id_, id_new):
for evt in evts:
if (evt.type_ == type_ and evt.com_.id_ == id_):
evt.com_.id_ = id_new
return 0
# debug
print("Error. We tried to change the id of in an evt but can't find the event")
def ComputationNvxTaux(communications, simu_time, next_evt):
queue_DEPARTURE = None
com_DEPARTURE = None
time_DEPARTURE = None
# LOOKING FOR A POSSIBLE DEPARTURE
i = 0
for fil in communications:
len_fil = len(fil)
tx = 0
if (len_fil > 0):
for c in fil:
tx = c.D_service_time / len_fil # # per second, the ratio of the communication that is successfully allocated
percentage_addition = ((next_evt.date_ - simu_time) * tx)
# Test if the oldest communication will be finished before the next event.
if (
c.rate_ + percentage_addition >= 1): # if percentage addition = 0 it means that we are at the same time and therefore that we already had a rate at 1, with a DEPARTURE already recorded
DEPARTURE_time = simu_time + (1 - c.rate_) / tx
if (time_DEPARTURE == None or time_DEPARTURE > DEPARTURE_time):
queue_DEPARTURE = i
com_DEPARTURE = c
time_DEPARTURE = DEPARTURE_time
i += 1
# WHETHER OR NOT TO SCHEDULE THE NEXT DEPARTURE
if (queue_DEPARTURE != None): # A DEPARTURE is to be planned before the next event
for f in communications:
len_fil = len(f)
tx = 0
for c in f:
tx = c.D_service_time / len_fil
percentage_addition = (time_DEPARTURE - simu_time) * tx # different progress because next evt to change
c.rateModifier(percentage_addition)
return evt(typeEvt.DEPARTURE, time_DEPARTURE, com_DEPARTURE, queue_DEPARTURE)
else:
for f in communications:
for c in f:
percentage_addition = (next_evt.date_ - simu_time) * tx # different progress because next evt to change
c.rateModifier(percentage_addition)
return None
def nextEvenement(evenements, fil):
for evt in evenements:
if evt.queue_ == fil:
return evt
return None
def addEvt(evts, evt):
if (len(evts) == 0):
evts.append(evt)
return 0
else:
for i in range(len(evts)):
if (evts[i].date_ > evt.date_):
evts.insert(i, evt)
return 0
evts.append(evt) # If the date_ of the event is greater than all the others then it is added at the end.
return 0
def simulation_slotted_aloha(coms):
successful_coms = []
unsuccessful_coms = []
chosen_preamble = []
for i in range(nprach_sc): # We create an empty list
chosen_preamble.append([])
for c in coms:
p = random.randint(0, nprach_sc - 1)
chosen_preamble[p].append(c)
for i in range(nprach_sc):
if (len(chosen_preamble[i]) == 1):
successful_coms.append(chosen_preamble[i][0])
else:
for c in chosen_preamble[i]:
unsuccessful_coms.append(c)
return successful_coms, unsuccessful_coms
def ComputationRALength():
return math.ceil(preamble_duration_ms * rep_ra) / 1000
def ComputationRscCom(payload_size_bits):
c, ds, us = 0, 0, 0
for i in rsc_NPDCCH:
c += (i * rep_cc)
c += (math.ceil(payload_size_bits / (link_budget_UL * RU_max_UL)) * rep_cc)
for i in rsc_NPDSCH:
ds += (math.ceil(i / link_budget_DL) * rep_ds)
for i in rsc_NPUSCH:
us += (math.ceil(i / link_budget_UL) * rep_us)
us += (math.ceil(payload_size_bits / link_budget_UL) * rep_us)
return c, ds, us
def ComputationRuNpusch():
"""
We consider RU of 24 RE so multitone.
It is the factor 1000 of the DEPARTURE which considers that there are 1000 RU per second.
return the number of RU per second in the NPUSCH (mu_npusch)
"""
ratio_npusch = 1 - (math.ceil(preamble_duration_ms * rep_ra) / nprach_period_ms) * (nprach_sc / total_nprach_sc)
nb_RU_sec = 1000
if (single_tone): nb_RU_sec = 1500
return ratio_npusch * nb_RU_sec
def ComputationRuNpdsch():
"""
return the number of RU per second in the NPDSCH (mu_npdsch)
"""
ratio_shared = 1 - ratio_npbch_npss_nsss
ru_frame = 10 * ratio_shared * (1 - 1 / npdcch_period)
return 100 * ru_frame
def ComputationCceNpdcch():
"""
return the mean number of CCE (control channel element) per second (mu_npdcch)
"""
return 1000 * (1 - ratio_npbch_npss_nsss) * nb_CCE_subframe / npdcch_period
def simu():
# Initialisation
evenements = []
simu_time = 0
gen_id = 0
c_in_queue = [[], [], []]
com_for_RA = []
success = 0
failure_tot = 0
failures = [0, 0, 0]
failure_rach = 0
nb_total_clients = 0
total_impatience = 0
# First Com
first_iat = random.expovariate(lambd)
c = communication(gen_id, None) # None because not yet in a queue (different D's)
evenements.append(evt(typeEvt.ARRIVAL_COM, simu_time + first_iat, c,
None)) # The arrival is common to the three queues and the size of the communication is not yet fixed.
gen_id = (gen_id + 1) % id_max
# First NPRACH opportunity
addEvt(evenements, evt(typeEvt.RAO, simu_time + nprach_period_ms / 1000, None, None))
tempsRA = ComputationRALength()
CCEs, RU_DL, RU_UL = ComputationRscCom(payload_size_bits)
D_npdcch, D_npdsch, D_npusch = ComputationCceNpdcch() / CCEs, ComputationRuNpdsch() / RU_DL, ComputationRuNpusch() / RU_UL
T_NPDCCH, T_NPDSCH, T_NPUSCH = npdcch_window * rep_cc * npdcch_period / (0.75 * 1000), 0.128 / 0.75, 0.064
T_npdcch, T_npdsch, T_npusch = CCEs * T_NPDCCH, len(rsc_NPDSCH) * T_NPDSCH, len(rsc_NPUSCH) * T_NPUSCH + math.ceil(
payload_size_bits / (link_budget_UL * RU_max_UL)) * T_NPUSCH
# print("SIMULATOR----------")
# print(D_npdcch, D_npdsch, D_npusch)
# print(CCEs, RU_DL, RU_UL)
# print(T_npdcch, T_npdsch, T_npusch)
# print(ComputationCceNpdcch(), ComputationRuNpdsch(), ComputationRuNpusch())
size_RAO = 0
nb_RAO = 0
while (nb_total_clients <= nb_simu_clients):
n_evt = evenements.pop(0) # pop the first item on the list instead of the last one
simu_time = n_evt.date_
queue_concerne = n_evt.queue_
if (n_evt.type_ == typeEvt.ARRIVAL):
c_in_queue[queue_concerne].append(n_evt.com_)
if (evenements[
0].date_ == simu_time): # No need to Computationate the new rates if the next event is simultaneous.
pass
else:
res = ComputationNvxTaux(c_in_queue, simu_time, evenements[
0]) # The new rate is Computationated for all queues as events can have global impact.
if (res != None):
addEvt(evenements, res)
else:
pass
elif (n_evt.type_ == typeEvt.DEPARTURE):
try:
c_in_queue[queue_concerne].remove(n_evt.com_)
except:
pass
removeEvt(evenements, typeEvt.IMPATIENCE, n_evt.com_.id_, queue_concerne)
if (evenements[
0].date_ == simu_time): # No need to Computationate the new rates if the next event is simultaneous.
pass
else:
res = ComputationNvxTaux(c_in_queue, simu_time, evenements[
0]) # The new rate is Computationated for all queues as events can have global impact.
if (res != None):
addEvt(evenements, res)
else:
pass
success += 1
elif (n_evt.type_ == typeEvt.IMPATIENCE):
total_impatience += n_evt.com_.rate_
id_remove = n_evt.com_.id_
for i in range(3):
# Remove evenements of the queues
if (i != queue_concerne):
removeEvt(evenements, typeEvt.IMPATIENCE, id_remove, i)
# No need to remove a DEPARTURE event since a DEPARTURE is not put in the stack until it is the next item processed.
# Remove communications from the queues
# num_remove = 0
for c in c_in_queue[i]:
if (c.id_ == id_remove):
c_in_queue[i].remove(c)
break
n_evt.com_.nvlleTentative()
if (n_evt.com_.nb_attempts == max_attempts):
failures[queue_concerne] += 1
failure_tot += 1
else:
bo = random.randint(0, backoff_ms[indice_bo]) / 1000
addEvt(evenements, evt(typeEvt.ARRIVAL_COM, simu_time + bo, | |
import csv
from pathlib import Path
import shutil
import itertools
from judge import Judge
from student import Student
from util import (
PresentationAssignmentError,
OutputVerificationError,
time_slot_to_time,
column_name_to_date,
date_and_time_to_index,
index_to_datetime,
index_to_datetime_str,
get_column_name_from_datetime,
get_time_slot_availability_string_from_datetime,
value_to_excel_csv_string,
)
from config import (
JudgeColumnNames,
StudentColumnNames,
JUDGE_CATEGORIES,
STUDENT_CATEGORIES,
CATEGORY_NUMBERS_TO_LABELS,
CATEGORY_NUMBERS_TO_LABELS_JUDGES,
INPUT_FOLDER_PATH,
OUTPUT_FOLDER_PATH,
STUDENT_DATA,
JUDGE_DATA,
ERROR_FILE,
)
def create_judge_roster(csv_filename):
with open(csv_filename, encoding="utf-8") as csvfile:
judge_roster = list()
csvreader = csv.DictReader(csvfile)
# Create an entry in the roster for each judge with their contact details, preferred categories, and availability
for row in csvreader:
if not any(row.values()):
continue
new_presentation_availability = list()
for column_name, times_selected in row.items():
if column_name not in JudgeColumnNames.JUDGE_AVAILABILITY_COLUMN_NAMES:
continue
column_date = column_name_to_date(column_name)
if times_selected:
for time_slot in times_selected.split(","):
if not time_slot:
continue
index_at_00_min = date_and_time_to_index(
column_date,
time_slot_to_time(time_slot),
)
new_presentation_availability.append(index_at_00_min)
new_presentation_availability.append(index_at_00_min + 0.5)
new_judge = Judge(
judge_id=csvreader.line_num, # using the line number as a sequential ID field for each judge
first=row[JudgeColumnNames.FIRST_NAME],
last=row[JudgeColumnNames.LAST_NAME],
email=row[JudgeColumnNames.EMAIL],
phone=row[JudgeColumnNames.PHONE],
preferred_categories=[
JUDGE_CATEGORIES[category]
for category in JUDGE_CATEGORIES
if category in row[JudgeColumnNames.PREFERRED_CATEGORIES]
],
is_paper_reviewer=row[JudgeColumnNames.IS_PAPER_REVIEWER] == "Yes",
presentation_availability=new_presentation_availability,
)
judge_roster.append(new_judge)
return judge_roster
def create_student_roster(csv_filename):
with open(csv_filename, encoding="utf-8") as csvfile:
student_roster = []
csvreader = csv.DictReader(csvfile)
# Create an entry in the roster for each student
for row in csvreader:
new_student = Student(
student_id=int(row[StudentColumnNames.SUBMISSION_NUMBER]),
is_paper="Oral" in row[StudentColumnNames.PARTICIPATION_TYPE],
is_poster="Poster" in row[StudentColumnNames.PARTICIPATION_TYPE],
category=STUDENT_CATEGORIES[row[StudentColumnNames.CATEGORY]],
poster_pdf=row[StudentColumnNames.POSTER_PDF_UPLOAD],
full_paper_pdf=row[StudentColumnNames.PAPER_PDF_UPLOAD],
)
student_roster.append(new_student)
return student_roster
def assign_presentations(judge_roster, student_roster):
# Aggregate all students by category who will be poster presenters
students_by_cat = {
cat: [
student
for student in student_roster
if student.is_poster and student.category == cat
]
for cat in STUDENT_CATEGORIES.values()
}
category_judges = {category: [] for category in JUDGE_CATEGORIES.values()}
for judge in judge_roster:
# Filter out judges who only review papers
if not judge.presentation_slots:
continue
for category in judge.preferred_categories:
category_judges[category].append(judge)
for cat in sorted(
category_judges, key=lambda category: len(category_judges[category])
):
students = students_by_cat[cat][:]
assigned_yet = 0
while students and assigned_yet <= len(students_by_cat[cat]):
judges = [
judge
for judge in category_judges[cat]
if len(judge.assigned_presentations) <= assigned_yet
and judge.presentation_slots >= 1
]
for judge in judges:
if not students:
break
student = students.pop()
judge.assign_presentation(student)
if judge.is_paper_reviewer and student.is_paper:
judge.assign_paper(student)
assigned_yet += 1
if students:
error_message = (
f"The category {CATEGORY_NUMBERS_TO_LABELS[cat]} did not have enough judges to evaluate all presentations.\n"
"Either assign more judges to this category or transfer some students out of this category.\n"
f"There are {len(students_by_cat[cat])} student(s) in this category who are presenting posters and {len(category_judges[category])} "
"judge(s) who have submitted availability to evaluate poster presentations.\n"
)
raise PresentationAssignmentError(error_message)
def assign_papers(judge_roster, student_roster):
# Aggregate all students by category who will be poster presenters
students_by_cat = {
cat: [
student
for student in student_roster
if student.is_paper
and len(student.paper_judges) < 2
and student.category == cat
]
for cat in STUDENT_CATEGORIES.values()
}
category_judges = {category: [] for category in JUDGE_CATEGORIES.values()}
for judge in judge_roster:
# Filter out judges who only do posters
if not judge.is_paper_reviewer:
continue
for category in judge.preferred_categories:
category_judges[category].append(judge)
for cat in sorted(
category_judges, key=lambda category: len(category_judges[category])
):
students = students_by_cat[cat][:]
conflict_students = []
assigned_yet = 0
while students:
judges = [
judge
for judge in category_judges[cat]
if len(judge.assigned_papers) <= assigned_yet
]
for judge in judges:
if not students:
break
student = students.pop()
if len(student.paper_judges) == 1 and student.paper_judges[0] == judge:
conflict_students.append(student)
break
judge.assign_paper(student)
if len(student.paper_judges) < 2:
students.append(student)
assigned_yet += 1
# Handle conflicts
judges = category_judges[cat][:]
for student in conflict_students:
judges.sort(
key=lambda judge: (
len(judge.assigned_papers),
len(judge.assigned_presentations),
)
)
judge_iter = itertools.cycle(judges)
while len(student.paper_judges) < 2:
judge = next(judge_iter)
if len(student.paper_judges) == 1 and student.paper_judges[0] == judge:
continue
judge.assign_paper(student)
def verify_output(
judge_csv_filename, student_csv_filename, judge_roster, student_roster
):
# Verify judges
with open(judge_csv_filename, encoding="utf-8") as judge_csv_file:
judge_csvreader = csv.DictReader(judge_csv_file)
for row in judge_csvreader:
if not any(row.values()):
continue
first = row[JudgeColumnNames.FIRST_NAME]
last = row[JudgeColumnNames.LAST_NAME]
email = row[JudgeColumnNames.EMAIL]
phone = row[JudgeColumnNames.PHONE]
preferred_categories = [
JUDGE_CATEGORIES[category]
for category in JUDGE_CATEGORIES
if row[JudgeColumnNames.PREFERRED_CATEGORIES].find(category) != -1
]
is_paper_reviewer = row[JudgeColumnNames.IS_PAPER_REVIEWER] == "Yes"
# Find matching judges in output
matching_judges = [
judge
for judge in judge_roster
if (
judge.first,
judge.last,
judge.email,
judge.phone,
judge.preferred_categories,
judge.is_paper_reviewer,
)
== (first, last, email, phone, preferred_categories, is_paper_reviewer)
]
# Throw if more more than one output judge matches the input CSV row
if len(matching_judges) != 1:
error_message = (
"For a given input judge, there was more than one judge in the output with matching details.\n"
"Input judge's name and contact details:\n"
f"First name: {first}, last name: {last}, email: {email}, phone number: {phone}."
)
raise OutputVerificationError(error_message)
judge = matching_judges[0]
# Check that the judge's presentation availability matches the input CSV row
for index in judge.presentation_availability:
index_dt = index_to_datetime(index)
column_name = get_column_name_from_datetime(index_dt)
if (
get_time_slot_availability_string_from_datetime(index_dt)
not in row[column_name]
):
error_message = (
"For a given input judge, their processed judge object was incorrectly set to be available for some amount of time slots during which they are not actually available.\n"
"Input judge's name and contact details:\n"
f"First name: {first}, last name: {last}, email: {email}, phone number: {phone}."
)
raise OutputVerificationError(error_message)
# Check that the judge's assigned presentations are in their presentation availability
for student in judge.assigned_presentations:
if student.presentation_time not in judge.presentation_availability:
error_message = (
"A given input judge was assigned a presentation for a time at which they are not available.\n"
"Input judge's name and contact details:\n"
f"First name: {first}, last name: {last}, email: {email}, phone number: {phone}."
f"Presentation time that they were incorrectly assigned: {get_time_slot_availability_string_from_datetime(student.presentation_time)}."
)
raise OutputVerificationError(error_message)
# Check that the judge is a paper reviewer if they are assigned papers
if judge.assigned_papers and not is_paper_reviewer:
error_message = (
"A given input judge who was not marked as a paper reviewer was assigned papers.\n"
"Input judge's name and contact details:\n"
f"First name: {first}, last name: {last}, email: {email}, phone number: {phone}."
)
raise OutputVerificationError(error_message)
# Check that the judge has selected the categories that they are judging
assigned_student_categories = [
student.category
for student in judge.assigned_papers + judge.assigned_papers
]
for category in assigned_student_categories:
if (
CATEGORY_NUMBERS_TO_LABELS_JUDGES[category]
not in row[JudgeColumnNames.PREFERRED_CATEGORIES]
):
assigned_presentation_students = "\n".join(
[
f"Student ID: {student.student_id}"
for student in judge.assigned_presentatons
]
)
assigned_paper_students = "\n".join(
[
f"Student ID: {student.student_id}"
for student in judge.assigned_papers
]
)
error_message = (
"A given input judge was assigned some amount of papers or presentations whose category the judge did not select.\n"
"Input judge's name and contact details:\n"
f"First name: {first}, last name: {last}, email: {email}, phone number: {phone}.\n"
f"Input judge's assigned presentations:\n{assigned_presentation_students}.\n"
if assigned_presentation_students
else ""
f"Input judge's assigned papers:\n{assigned_paper_students}.\n"
if assigned_paper_students
else ""
)
raise OutputVerificationError(error_message)
# Check that the judge's preferred categories match the input CSV row
for category in judge.preferred_categories:
if (
CATEGORY_NUMBERS_TO_LABELS_JUDGES[category]
not in row[JudgeColumnNames.PREFERRED_CATEGORIES]
):
error_message = (
"For a given input judge, their processed judge object was incorrectly set to prefer some amount of categories which they do not actually prefer.\n"
"Input judge's name and contact details:\n"
f"First name: {first}, last name: {last}, email: {email}, phone number: {phone}.\n"
f"Input judge's processed preferred categories:\n{', '.join([CATEGORY_NUMBERS_TO_LABELS_JUDGES[pref_cat] for pref_cat in judge.preferred_categories])}.\n"
)
raise OutputVerificationError(error_message)
# Verify students
with open(student_csv_filename, encoding="utf-8") as students_csv_file:
student_csvreader = csv.DictReader(students_csv_file)
for row in student_csvreader:
if not any(row.values()):
continue
student_id = int(row[StudentColumnNames.SUBMISSION_NUMBER])
matching_students = [
student
for student in student_roster
if student.student_id == student_id
]
# Throw if more more than one output student matches the input CSV row
if len(matching_students) != 1:
error_message = (
"For a given input student, there was more than one student in the output with matching details.\n"
f"Input student's submission number: {student_id}\n"
)
raise OutputVerificationError(error_message)
student = matching_students[0]
# Check that a student has the correct category
if STUDENT_CATEGORIES[row[StudentColumnNames.CATEGORY]] != student.category:
error_message = (
"For a given input student, the category does not match the output student's category.\n"
f"Input student's submission number: {student_id}\n"
)
raise OutputVerificationError(error_message)
# Check that a student is paper if they have been assigned paper
if (
student.paper_judges
and "Oral" not in row[StudentColumnNames.PARTICIPATION_TYPE]
):
error_message = (
"A given input student was assigned paper judges when they are not an oral/paper presenter.\n"
f"Input student's submission number: {student_id}\n"
)
raise OutputVerificationError(error_message)
# Check that a student is poster if they have been assigned | |
<gh_stars>0
from enum import Enum
import time
from collections import defaultdict
from nltk.corpus import stopwords
from dataanalysis import nlp_utils as nlp
from ontomatch import glove_api
from ontomatch import ss_utils as SS
from datasketch import MinHash, MinHashLSH
from knowledgerepr.networkbuilder import LSHRandomProjectionsIndex
from dataanalysis import dataanalysis as da
import operator
from collections import namedtuple
class MatchingType(Enum):
L1_CLASSNAME_ATTRVALUE = 0
L2_CLASSVALUE_ATTRVALUE = 1
L3_CLASSCTX_RELATIONCTX = 2
L4_CLASSNAME_RELATIONNAME_SYN = 3
L42_CLASSNAME_RELATIONNAME_SEM = 4
L5_CLASSNAME_ATTRNAME_SYN = 5
L52_CLASSNAME_ATTRNAME_SEM = 6
L6_CLASSNAME_RELATION_SEMSIG = 7
L7_CLASSNAME_ATTRNAME_FUZZY = 8
class SimpleTrie:
def __init__(self):
self._leave = "_leave_"
self.root = dict()
def add_sequences(self, sequences):
for seq in sequences:
current_dict = self.root
for token in seq:
current_dict = current_dict.setdefault(token, {}) # another dict as default
current_dict[self._leave] = self._leave
return self.root
def longest_prefix(self):
return
class Matching:
def __init__(self, db_name, source_name):
self.db_name = db_name
self.source_name = source_name
self.source_level_matchings = defaultdict(lambda: defaultdict(list))
self.attr_matchings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
def add_relation_correspondence(self, kr_name, class_name, matching_type):
self.source_level_matchings[kr_name][class_name].append(matching_type)
def add_attribute_correspondence(self, attr_name, kr_name, class_name, matching_type):
self.attr_matchings[attr_name][kr_name][class_name].append(matching_type)
def __str__(self):
header = self.db_name + " - " + self.source_name
relation_matchings = list()
relation_matchings.append(header)
if len(self.source_level_matchings.items()) > 0:
for kr_name, values in self.source_level_matchings.items():
for class_name, matchings in values.items():
line = kr_name + " - " + class_name + " : " + str(matchings)
relation_matchings.append(line)
else:
line = "0 relation matchings"
relation_matchings.append(line)
if len(self.attr_matchings.items()) > 0:
for attr_name, values in self.attr_matchings.items():
for kr_name, classes in values.items():
for class_name, matchings in classes.items():
line = attr_name + " -> " + kr_name + " - " + class_name + " : " + str(matchings)
relation_matchings.append(line)
string_repr = '\n'.join(relation_matchings)
return string_repr
def print_serial(self):
relation_matchings = []
for kr_name, values in self.source_level_matchings.items():
for class_name, matchings in values.items():
line = self.db_name + " %%% " + self.source_name + " %%% _ -> " + kr_name \
+ " %%% " + class_name + " %%% " + str(matchings)
relation_matchings.append(line)
for attr_name, values in self.attr_matchings.items():
for kr_name, classes in values.items():
for class_name, matchings in classes.items():
line = self.db_name + " %%% " + self.source_name + " %%% " + attr_name \
+ " -> " + kr_name + " %%% " + class_name + " %%% " + str(matchings)
relation_matchings.append(line)
#string_repr = '\n'.join(relation_matchings)
return relation_matchings
def combine_matchings(all_matchings):
def process_attr_matching(building_matching_objects, m, matching_type):
sch, krn = m
db_name, source_name, field_name = sch
kr_name, class_name = krn
mobj = building_matching_objects.get((db_name, source_name), None)
if mobj is None:
mobj = Matching(db_name, source_name)
mobj.add_attribute_correspondence(field_name, kr_name, class_name, matching_type)
building_matching_objects[(db_name, source_name)] = mobj
def process_relation_matching(building_matching_objects, m, matching_type):
sch, krn = m
db_name, source_name, field_name = sch
kr_name, class_name = krn
mobj = building_matching_objects.get((db_name, source_name), None)
if mobj is None:
mobj = Matching(db_name, source_name)
mobj.add_relation_correspondence(kr_name, class_name, matching_type)
building_matching_objects[(db_name, source_name)] = mobj
l1_matchings = all_matchings[MatchingType.L1_CLASSNAME_ATTRVALUE]
l2_matchings = all_matchings[MatchingType.L2_CLASSVALUE_ATTRVALUE]
l4_matchings = all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN]
l42_matchings = all_matchings[MatchingType.L42_CLASSNAME_RELATIONNAME_SEM]
l5_matchings = all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN]
l52_matchings = all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM]
l6_matchings = all_matchings[MatchingType.L6_CLASSNAME_RELATION_SEMSIG]
l7_matchings = all_matchings[MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY]
building_matching_objects = defaultdict(None) # (db_name, source_name) -> stuff
for m in l1_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L1_CLASSNAME_ATTRVALUE)
for m in l2_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L2_CLASSVALUE_ATTRVALUE)
for m in l4_matchings:
process_relation_matching(building_matching_objects, m, MatchingType.L4_CLASSNAME_RELATIONNAME_SYN)
for m in l42_matchings:
process_relation_matching(building_matching_objects, m, MatchingType.L42_CLASSNAME_RELATIONNAME_SEM)
for m in l5_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L5_CLASSNAME_ATTRNAME_SYN)
for m in l52_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L52_CLASSNAME_ATTRNAME_SEM)
for m in l6_matchings:
process_relation_matching(building_matching_objects, m, MatchingType.L6_CLASSNAME_RELATION_SEMSIG)
for m in l7_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY)
return building_matching_objects
def combine_matchings2(all_matchings):
# TODO: divide running score, based on whether content was available or not (is it really necessary?)
# L1 creates its own matchings
l1_matchings = all_matchings[MatchingType.L1_CLASSNAME_ATTRVALUE]
# L2, L5, L52 and L6 create another set of matchings
l2_matchings = all_matchings[MatchingType.L2_CLASSVALUE_ATTRVALUE]
l5_matchings = all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN]
l52_matchings = all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM]
l6_matchings = all_matchings[MatchingType.L6_CLASSNAME_RELATION_SEMSIG]
l7_matchings = all_matchings[MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY]
l_combined = dict()
for schema, kr in l1_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L1_CLASSNAME_ATTRVALUE])
for schema, kr in l7_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY)
for schema, kr in l2_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L2_CLASSNAME_ATTRNAME_SYN)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L2_CLASSVALUE_ATTRVALUE])
for schema, kr in l5_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L5_CLASSNAME_ATTRNAME_SYN)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L5_CLASSNAME_ATTRNAME_SYN])
for schema, kr in l52_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L52_CLASSNAME_ATTRNAME_SEM)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L52_CLASSNAME_ATTRNAME_SEM])
for schema, kr in l6_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
# TODO: only append in the matching types are something except L1?
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L6_CLASSNAME_RELATION_SEMSIG)
# L4 and L42 have their own matching too
l4_matchings = all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN]
combined_matchings = []
for key, values in l_combined.items():
matching = values[0]
matching_types = values[1]
# for el in values:
# matching = el[0]
# matching_types = el[1]
combined_matchings.append((matching, matching_types))
combined_matchings = sorted(combined_matchings, key=lambda x: len(x[1]), reverse=True)
return combined_matchings, l4_matchings
def find_relation_class_attr_name_sem_matchings(network, kr_handlers):
# Retrieve relation names
#self.find_relation_class_name_sem_matchings()
st = time.time()
names = []
seen_fields = []
for (db_name, source_name, field_name, _) in network.iterate_values():
orig_field_name = field_name
if field_name not in seen_fields:
seen_fields.append(field_name) # seen already
field_name = nlp.camelcase_to_snakecase(field_name)
field_name = field_name.replace('-', ' ')
field_name = field_name.replace('_', ' ')
field_name = field_name.lower()
svs = []
for token in field_name.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
if sv is not None:
svs.append(sv)
names.append(('attribute', (db_name, source_name, orig_field_name), svs))
num_attributes_inserted = len(names)
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
svs = []
for token in cl.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
if sv is not None:
svs.append(sv)
names.append(('class', (kr_name, original_cl_name), svs))
matchings = []
for idx_rel in range(0, num_attributes_inserted): # Compare only with classes
for idx_class in range(num_attributes_inserted, len(names)):
svs_rel = names[idx_rel][2]
svs_cla = names[idx_class][2]
semantic_sim = SS.compute_semantic_similarity(svs_rel, svs_cla)
if semantic_sim > 0.8:
# match.format db_name, source_name, field_name -> class_name
match = ((names[idx_rel][1][0], names[idx_rel][1][1], names[idx_rel][1][2]), names[idx_class][1])
matchings.append(match)
et = time.time()
print("Time to relation-class (sem): " + str(et - st))
return matchings
def find_relation_class_attr_name_matching(network, kr_handlers):
# Retrieve relation names
st = time.time()
names = []
seen_fields = []
for (db_name, source_name, field_name, _) in network.iterate_values():
orig_field_name = field_name
if field_name not in seen_fields:
seen_fields.append(field_name) # seen already
field_name = nlp.camelcase_to_snakecase(field_name)
field_name = field_name.replace('-', ' ')
field_name = field_name.replace('_', ' ')
field_name = field_name.lower()
m = MinHash(num_perm=64)
for token in field_name.split():
if token not in stopwords.words('english'):
m.update(token.encode('utf8'))
names.append(('attribute', (db_name, source_name, orig_field_name), m))
num_attributes_inserted = len(names)
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
m = MinHash(num_perm=64)
for token in cl.split():
if token not in stopwords.words('english'):
m.update(token.encode('utf8'))
names.append(('class', (kr_name, original_cl_name), m))
# Index all the minhashes
lsh_index = MinHashLSH(threshold=0.6, num_perm=64)
for idx in range(len(names)):
lsh_index.insert(idx, names[idx][2])
matchings = []
for idx in range(0, num_attributes_inserted): # Compare only with classes
N = lsh_index.query(names[idx][2])
for n in N:
kind_q = names[idx][0]
kind_n = names[n][0]
if kind_n != kind_q:
# match.format db_name, source_name, field_name -> class_name
match = ((names[idx][1][0], names[idx][1][1], names[idx][1][2]), names[n][1])
matchings.append(match)
return matchings
def find_relation_class_name_sem_matchings(network, kr_handlers):
# Retrieve relation names
st = time.time()
names = []
seen_sources = []
for (db_name, source_name, _, _) in network.iterate_values():
original_source_name = source_name
if source_name not in seen_sources:
seen_sources.append(source_name) # seen already
source_name = source_name.replace('-', ' ')
source_name = source_name.replace('_', ' ')
source_name = source_name.lower()
svs = []
for token in source_name.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
#if sv is not None:
svs.append(sv) # append even None, to apply | |
import os
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy.orm import declared_attr
from sqlalchemy.sql.expression import func
from {{appname}}.database.sqldblib import engine,session
from {{appname}}.lib.powlib import pluralize
import datetime
import uuid
from sqlalchemy import orm
import sqlalchemy.inspection
from cerberus import Validator
import xmltodict
import simplejson as json
import datetime, decimal
from {{appname}}.conf.config import myapp
import {{appname}}.conf.config as cfg
from {{appname}}.models.modelobject import ModelObject
from {{appname}}.conf.config import database as dbcfg
#print ('importing module %s' % __name__)
def make_uuid():
"""
dummy function to test uuid default value.
"""
return str(uuid.uuid4())
class SqlBaseModel(ModelObject):
"""
All the basic stuff for SQL Models.
Defaults, init functions, observers, querys
upsert, delete,
init_from (json, xml,csv ...)
printing
...
all the stuff you dont need to implement.
"""
#__table_args__ = { "extend_existing": True }
#id = Column(Integer, primary_key=True)
# #_uuid = Column(String, default=make_uuid)
# # create_date column will be populated with the result of the now() SQL function
# #(which, depending on backend, compiles into NOW() or CURRENT_TIMESTAMP in most cases
# # see: http://docs.sqlalchemy.org/en/latest/core/defaults.html
#created_at = Column(DateTime, default=func.now())
#last_updated = Column(DateTime, onupdate=func.now(), default=func.now())
session = session
@orm.reconstructor
def init_on_load(self, *args, **kwargs):
#
# setup a mashmallow schema to be able to dump (serialize) and load (deserialize)
# models to json quick, safe and easy.
# see: http://marshmallow-sqlalchemy.readthedocs.io/en/latest/
# and link it to the model. (as jsonify attribute)
# this enables the model to load / dump json
#
#print(kwargs)
super().init_on_load()
self.class_name = self.__class__.__name__.capitalize()
#from marshmallow_sqlalchemy import ModelSchema, ModelConverter
## ModelSchema was removed from marshmallow_sqlalchemy and is not
## needed in PythonOnWheels anymore.
## see: https://github.com/marshmallow-code/marshmallow-sqlalchemy/pull/382
from marshmallow_sqlalchemy import ModelConverter
cls_meta=type("Meta", (object,),{"model" : self.__class__})
#jschema_class = type(self.class_name+'Schema', (ModelSchema,),
# fix issue42:
jschema_class = type(self.class_name+'Schema',(ModelConverter,),#modelSchema,),
{
"Meta": cls_meta,
"model" : self.__class__,
#"sqla_session" : session
}
)
setattr(self, "marshmallow_schema", jschema_class())
self.session=session
#
# set the tablename
#
#if getattr(self.__class__, "_tablename", None):
# self.table = self.metadata.tables[getattr(self.__class__, "_tablename")]
#else:
# self.table = self.metadata.tables[pluralize(self.__class__.__name__.lower())]
#self.__class__._tablename = self.table.name
#
# fix see issue #42 => https://github.com/pythononwheels/pow_devel/issues/42
# self.table = self.metadata.tables[self.__class__.__tablename__]
try:
self.table = self.metadata.tables[self.__class__.__table_args__["schema"] + "." + self.__class__.__tablename__]
except:
self.table = self.metadata.tables[self.__class__.__tablename__]
#
# if there is a schema (cerberus) set it in the instance
#
#print(str(self.__class__.__dict__.keys()))
if "schema" in self.__class__.__dict__:
#print(" .. found a schema for: " +str(self.__class__.__name__) + " in class dict")
self.schema = self.__class__.__dict__["schema"]
# add the sqlcolumns schema definitions to the cerberus schema (if there are any)
if myapp["sql_auto_schema"]:
self._setup_schema_from_sql()
#self.setup_instance_values()
#
# setup values from kwargs or from init_from_<format> if format="someformat"
# example: m = Model( data = { 'test' : 1 }, format="json")
# will call m.init_from_json(data)
#
if "format" in kwargs:
# set the format and call the according init_from_<format> method
# which initializes the instance with the given vaules (from data)
# e.g. Model(format=json, data={data})
f = getattr(self, "init_from_" + kwargs["format"], None)
if f:
f(kwargs)
else:
# initializes the instanmce with the given kwargs values:
# e.g.: Model(test="sometext", title="sometitle")
for key in kwargs.keys():
#if key in self.__class__.__dict__:
if key in self.schema:
setattr(self, key, kwargs[key])
self.init_observers()
#elf.setup_dirty_model()
@declared_attr
def __tablename__(cls):
"""
returns the tablename for this model. Convention: pluralized Modelname
You can overwrite this by just setting the __tablename__ = <yourtablename> in the
model class.
"""
return pluralize(cls.__name__.lower())
#def set_table(self, name):
# """ setting the table for this model directly. (Not used: see _custom_tablename parameter) """
# # setting the tablename
# self.table = self.metadata.tables[name]
def setup_instance_values(self):
""" fills the instance with defined default values"""
for key in self.schema.keys():
if self.schema[key].get("default", None) != None:
setattr(self,key,self.schema[key].get("default"))
self.schema[key].pop("default", None)
else:
#print("no default for: " + str(self.schema[key]))
#print("trying: " + str(cfg.database["default_values"][self.schema[key]["type"]]))
try:
#print("trying: " + config.database["default_values"][self.schema[key]["type"]])
if key not in ["created_at", "last_updated", "id"]:
setattr(self,key,cfg.database["default_values"][self.schema[key]["type"]])
except Exception as e:
print(e.message)
setattr(self, key, None)
def _setup_schema_from_sql(self):
"""
Constructs a cerberus definition schema
from a given sqlalchemy column definition
for this model.
Also called when DB reflection is used.
__table_args__ = {"extend_existing":True, "autoload":True, ...}
"""
#print(" .. setup schema from sql for : " + str(self.class_name))
for idx,col in enumerate(self.table.columns.items()):
# looks like this:
# ('id',
# Column('id', Integer(), table=<comments>, primary_key=True,
# nullable=False))
#
# fix: issue 42
# if / else added as of issue #42 https://github.com/pythononwheels/pow_devel/issues/42
col_name = str(col[0])
if str.lower(str(col[1].type)) in ["uniqueidentifier", "uuid", "guid"]:
import uuid
col_type = uuid.UUID
self.schema[col_name] = { "type" : "uuid" }
else:
col_type = col[1].type.python_type
exclude_list = [elem for elem in self.schema.keys()]
#exclude_list.append( ["id", "created_at", "last_updated"] )
#print(" #" + str(idx) + "->" + str(col_name) + " -> " + str(col_type))
# dont check internal columns or relation columns.
#print(str(col[1].foreign_keys))
# col[0] is the column name
# col[1] is the sqlalchemy.Column object
if ( col_name not in exclude_list ) and ( len(col[1].foreign_keys) == 0 ):
#print(" .. adding to schema: " + col_name)
if col_type == int:
# sqlalchemy: Integer, BigInteger
# cerberus: integer
self.schema[col_name] = { "type" : "integer" }
elif col_type == str:
# sqlalchemy: String, Text
# cerberus: string
# python: str
self.schema[col_name] = { "type" : "string" }
elif col_type == bool:
# sqlalchemy: Boolean
# cerberus: boolean
# python: bool
self.schema[col_name] = { "type" : "boolean" }
elif col_type == datetime.date:
# sqlalchemy: Date
# cerberus: date
# python: datetime.date
self.schema[col_name] = { "type" : "date" }
elif col_type == datetime.datetime:
# sqlalchemy: DateTime
# cerberus: datetime
# python: datetime.datetime
self.schema[col_name] = { "type" : "datetime" }
elif col_type == float:
# sqlalchemy: Float
# cerberus: float
# python: float
self.schema[col_name] = { "type" : "float" }
elif col_type == decimal.Decimal:
# sqlalchemy: Numeric
# cerberus: number
# python: decimal.Decimal
self.schema[col_name] = { "type" : "number" }
elif col_type == bytes:
# sqlalchemy: LargeBinary
# cerberus: binary
# python: bytes
self.schema[col_name] = { "type" : "binary" }
else:
if cfg.server_settings["debug_print"]:
print(" .. basemodel.py => setup_schema_from_sql => skipping: " + col_name )
# def init_from_json(self, data, ignore=False, autoconvert=True):
# """
# makes a py dict from input json and
# sets the instance attributes
# """
# print(" .. marshmallow load data input: " + str(data))
# if not isinstance(data,(dict)):
# data=json.loads(data)
# d=self.marshmallow_schema.load(data, session=session).data
# print(" . .. init_from_json returned Model d: " + str(d))
# print(" . .. init_from_json returned Model d type: " + str(type(d)))
# self.__dict__ = d.__dict__
# return
def to_json(self):
return json.dumps(self.marshmallow_schema.dump(self).data)
# def json_dumps(self):
# """ probably better return str(self.json_dump()) ??... test it """
# return json.dumps(self.json_dump())
# def json_dump(self):
# """ return this instances columns as json"""
# return self._jsonify.dump(self).data
def json_load_from_db(self, data, keep_id=False):
if keep_id:
self = self._jsonify.load(data, session=session).data
return self
else:
obj = self.__class__()
obj = obj._jsonify.load(data, session=session).data
obj.id = None
return obj
def json_result_to_object(self, res):
"""
creates a list of instances of this model
from a given json resultlist
"""
if not isinstance(res,(list)):
#single element, pack it in a list
res = [res]
# lists, iterate over all elements
reslist = []
for elem in res:
m = self.__class__()
#print(str(type(elem)) + "->" + str(elem))
m.init_from_json(elem)
#print("adding model to reslist: " + str(m))
reslist.append(m)
return reslist
def get_relationships(self):
"""
returns the raw relationships
see: http://stackoverflow.com/questions/21206818/sqlalchemy-flask-get-relationships-from-a-db-model
"""
return sqlalchemy.inspection.inspect(self.__class__).relationships
def get_relations(self):
"""
returns a list of the relation names
see: http://stackoverflow.com/questions/21206818/sqlalchemy-flask-get-relationships-from-a-db-model
"""
rels = sqlalchemy.inspection.inspect(self.__class__).relationships
return rels.keys()
def sync(self):
self.session.expire(self)
self.session.refresh(self)
def new_session(self):
"""
create an entirely new session
"""
from sqlalchemy.orm import sessionmaker
self.session = sessionmaker(bind=engine)()
def _rep_model_as_str(self):
"""
returns a string with the models columns
and value information
including realtion, keys etc..
"""
line = ""
for a in self.__mapper__.attrs:
if isinstance(a, orm.properties.ColumnProperty):
c = a.columns[0]
line += '{:20}'.format(a.key)
line += ": " + str(getattr(self, a.key))
if c.primary_key:
line += '{:15}'.format(" (primary key)")
if c.foreign_keys:
for k in c.foreign_keys:
line += '{:20}'.format(" (" + k.target_fullname + ")")
#line += ' {:40}'.format(", ".join([fk.target_fullname for fk in c.foreign_keys]))
line += os.linesep
elif isinstance(a, orm.properties.RelationshipProperty):
line += "{:20}: {} relationship | |
character control")
return
#Remove existing driven attrs
cmds.aliasAttr(character + "spine_04_anim.driven", rm=True )
attrs = cmds.listAttr(character + "spine_04_anim", keyable = True)
for attr in attrs:
if attr.find("blend") == 0:
cmds.deleteAttr(character + "spine_04_anim", at = attr)
cmds.aliasAttr(character + "spine_02_anim.driven", rm=True )
attrs = cmds.listAttr(character + "spine_02_anim", keyable = True)
for attr in attrs:
if attr.find("blend") == 0:
cmds.deleteAttr(character + "spine_02_anim", at = attr)
#Spine 4 : Rotate X
spine4MultXA = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_04_xa_mult")
cmds.connectAttr(character + "spine_05_anim.rx", spine4MultXA + ".input1X", force = True)
cmds.connectAttr(character + "spine_04_anim.spine_5_Influence", spine4MultXA + ".input2X", force = True)
spine4MultXB = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_04_xb_mult")
cmds.connectAttr(character + "spine_03_anim.rx", spine4MultXB+ ".input1X", force = True)
cmds.connectAttr(character + "spine_04_anim.spine_3_Influence", spine4MultXB + ".input2X", force = True)
spine4MultX = cmds.shadingNode("plusMinusAverage", asUtility = True, name = character + "spine04_drivenX_avg")
cmds.setAttr(spine4MultX + ".operation", 3)
cmds.connectAttr(spine4MultXA + ".outputX", spine4MultX + ".input1D[0]", force = True)
cmds.connectAttr(spine4MultXB + ".outputX", spine4MultX + ".input1D[1]", force = True)
cmds.connectAttr(spine4MultX + ".output1D", character + "spine_04_anim.rotateX", force = True)
#Spine 4 : Rotate Y
spine4MultYA = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_04_ya_mult")
cmds.connectAttr(character + "spine_05_anim.ry", spine4MultYA + ".input1X", force = True)
cmds.connectAttr(character + "spine_04_anim.spine_5_Influence", spine4MultYA + ".input2X", force = True)
spine4MultYB = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_04_yb_mult")
cmds.connectAttr(character + "spine_03_anim.ry", spine4MultYB+ ".input1X", force = True)
cmds.connectAttr(character + "spine_04_anim.spine_3_Influence", spine4MultYB + ".input2X", force = True)
spine4MultY = cmds.shadingNode("plusMinusAverage", asUtility = True, name = character + "spine04_drivenY_avg")
cmds.setAttr(spine4MultY + ".operation", 3)
cmds.connectAttr(spine4MultYA + ".outputX", spine4MultY + ".input1D[0]", force = True)
cmds.connectAttr(spine4MultYB + ".outputX", spine4MultY + ".input1D[1]", force = True)
cmds.connectAttr(spine4MultY + ".output1D", character + "spine_04_anim.rotateY", force = True)
#Spine 4 : Rotate Z
spine4MultZA = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_04_za_mult")
cmds.connectAttr(character + "spine_05_anim.rz", spine4MultZA + ".input1X", force = True)
cmds.connectAttr(character + "spine_04_anim.spine_5_Influence", spine4MultZA + ".input2X", force = True)
spine4MultZB = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_04_zb_mult")
cmds.connectAttr(character + "spine_03_anim.rz", spine4MultZB+ ".input1X", force = True)
cmds.connectAttr(character + "spine_04_anim.spine_3_Influence", spine4MultZB + ".input2X", force = True)
spine4MultZ = cmds.shadingNode("plusMinusAverage", asUtility = True, name = character + "spine04_drivenZ_avg")
cmds.setAttr(spine4MultZ + ".operation", 3)
cmds.connectAttr(spine4MultZA + ".outputX", spine4MultZ + ".input1D[0]", force = True)
cmds.connectAttr(spine4MultZB + ".outputX", spine4MultZ + ".input1D[1]", force = True)
cmds.connectAttr(spine4MultZ + ".output1D", character + "spine_04_anim.rotateZ", force = True)
# # # #
#Spine 2 : Rotate X
spine2MultXA = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_02_xa_mult")
cmds.connectAttr(character + "spine_03_anim.rx", spine2MultXA + ".input1X", force = True)
cmds.connectAttr(character + "spine_02_anim.spine_3_Influence", spine2MultXA + ".input2X", force = True)
spine2MultXB = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_02_xb_mult")
cmds.connectAttr(character + "spine_01_anim.rx", spine2MultXB+ ".input1X", force = True)
cmds.connectAttr(character + "spine_02_anim.spine_1_Influence", spine2MultXB + ".input2X", force = True)
spine2MultX = cmds.shadingNode("plusMinusAverage", asUtility = True, name = character + "spine02_drivenX_avg")
cmds.setAttr(spine2MultX + ".operation", 3)
cmds.connectAttr(spine2MultXA + ".outputX", spine2MultX + ".input1D[0]", force = True)
cmds.connectAttr(spine2MultXB + ".outputX", spine2MultX + ".input1D[1]", force = True)
cmds.connectAttr(spine2MultX + ".output1D", character + "spine_02_anim.rotateX", force = True)
#Spine 2 : Rotate Y
spine2MultYA = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_02_ya_mult")
cmds.connectAttr(character + "spine_03_anim.ry", spine2MultYA + ".input1X", force = True)
cmds.connectAttr(character + "spine_02_anim.spine_3_Influence", spine2MultYA + ".input2X", force = True)
spine2MultYB = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_02_yb_mult")
cmds.connectAttr(character + "spine_01_anim.ry", spine2MultYB+ ".input1X", force = True)
cmds.connectAttr(character + "spine_02_anim.spine_1_Influence", spine2MultYB + ".input2X", force = True)
spine2MultY = cmds.shadingNode("plusMinusAverage", asUtility = True, name = character + "spine02_drivenY_avg")
cmds.setAttr(spine2MultY + ".operation", 3)
cmds.connectAttr(spine2MultYA + ".outputX", spine2MultY + ".input1D[0]", force = True)
cmds.connectAttr(spine2MultYB + ".outputX", spine2MultY + ".input1D[1]", force = True)
cmds.connectAttr(spine2MultY + ".output1D", character + "spine_02_anim.rotateY", force = True)
#Spine 2 : Rotate Z
spine2MultZA = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_02_za_mult")
cmds.connectAttr(character + "spine_03_anim.rz", spine2MultZA + ".input1X", force = True)
cmds.connectAttr(character + "spine_02_anim.spine_3_Influence", spine2MultZA + ".input2X", force = True)
spine2MultZB = cmds.shadingNode("multiplyDivide", asUtility = True, name = character + "spine_02_zb_mult")
cmds.connectAttr(character + "spine_01_anim.rz", spine2MultZB+ ".input1X", force = True)
cmds.connectAttr(character + "spine_02_anim.spine_1_Influence", spine2MultZB + ".input2X", force = True)
spine2MultZ = cmds.shadingNode("plusMinusAverage", asUtility = True, name = character + "spine02_drivenZ_avg")
cmds.setAttr(spine2MultZ + ".operation", 3)
cmds.connectAttr(spine2MultZA + ".outputX", spine2MultZ + ".input1D[0]", force = True)
cmds.connectAttr(spine2MultZB + ".outputX", spine2MultZ + ".input1D[1]", force = True)
cmds.connectAttr(spine2MultZ + ".output1D", character + "spine_02_anim.rotateZ", force = True)
cmds.setKeyframe(character + "spine_02_anim.rotate")
cmds.setKeyframe(character + "spine_04_anim.rotate")
cmds.select(character + "spine_02_anim.cv[*]")
cmds.scale(0, 0, 0, relative = True)
cmds.select(character + "spine_04_anim.cv[*]")
cmds.scale(0, 0, 0, relative = True)
#check for new blendUnitConversion attr and alias attr it
spine4Connections = cmds.listConnections(character + "spine_04_anim", source = True, type = "pairBlend")
for each in spine4Connections:
conversions = cmds.listConnections(each, type = "unitConversion")
if conversions != None:
attr = conversions[0].partition("Conversion")[2]
attr = "blendUnitConversion" + attr
try:
cmds.aliasAttr("driven", character + "spine_04_anim." + attr)
cmds.setAttr(character + "spine_04_anim.driven", 1)
except:
pass
spine2Connections = cmds.listConnections(character + "spine_02_anim", source = True, type = "pairBlend")
for each in spine2Connections:
conversions = cmds.listConnections(each, type = "unitConversion")
if conversions != None:
attr = conversions[0].partition("Conversion")[2]
attr = "blendUnitConversion" + attr
try:
cmds.aliasAttr("driven", character + "spine_02_anim." + attr)
cmds.setAttr(character + "spine_02_anim.driven", 1)
except:
pass
#############################################################################################
#############################################################################################
#############################################################################################
def threeCtrlSpine(*args):
#Spine 4 : Rotate X
cmds.select("spine_04_anim")
cmds.addAttr(longName='spine_5_Influence', defaultValue=1, minValue=-1, maxValue=5, keyable = True)
cmds.addAttr(longName='spine_3_Influence', defaultValue=1, minValue=-1, maxValue=5, keyable = True)
spine4MultXA = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_04_xa_mult")
cmds.connectAttr("spine_05_anim.rx", spine4MultXA + ".input1X")
cmds.connectAttr("spine_04_anim.spine_5_Influence", spine4MultXA + ".input2X")
spine4MultXB = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_04_xb_mult")
cmds.connectAttr("spine_03_anim.rx", spine4MultXB+ ".input1X")
cmds.connectAttr("spine_04_anim.spine_3_Influence", spine4MultXB + ".input2X")
spine4MultX = cmds.shadingNode("plusMinusAverage", asUtility = True, name = "spine04_drivenX_avg")
cmds.setAttr(spine4MultX + ".operation", 3)
cmds.connectAttr(spine4MultXA + ".outputX", spine4MultX + ".input1D[0]")
cmds.connectAttr(spine4MultXB + ".outputX", spine4MultX + ".input1D[1]")
cmds.connectAttr(spine4MultX + ".output1D", "spine_04_anim.rotateX")
#Spine 4 : Rotate Y
spine4MultYA = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_04_ya_mult")
cmds.connectAttr("spine_05_anim.ry", spine4MultYA + ".input1X")
cmds.connectAttr("spine_04_anim.spine_5_Influence", spine4MultYA + ".input2X")
spine4MultYB = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_04_yb_mult")
cmds.connectAttr("spine_03_anim.ry", spine4MultYB+ ".input1X")
cmds.connectAttr("spine_04_anim.spine_3_Influence", spine4MultYB + ".input2X")
spine4MultY = cmds.shadingNode("plusMinusAverage", asUtility = True, name = "spine04_drivenY_avg")
cmds.setAttr(spine4MultY + ".operation", 3)
cmds.connectAttr(spine4MultYA + ".outputX", spine4MultY + ".input1D[0]")
cmds.connectAttr(spine4MultYB + ".outputX", spine4MultY + ".input1D[1]")
cmds.connectAttr(spine4MultY + ".output1D", "spine_04_anim.rotateY")
#Spine 4 : Rotate Z
spine4MultZA = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_04_za_mult")
cmds.connectAttr("spine_05_anim.rz", spine4MultZA + ".input1X")
cmds.connectAttr("spine_04_anim.spine_5_Influence", spine4MultZA + ".input2X")
spine4MultZB = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_04_zb_mult")
cmds.connectAttr("spine_03_anim.rz", spine4MultZB+ ".input1X")
cmds.connectAttr("spine_04_anim.spine_3_Influence", spine4MultZB + ".input2X")
spine4MultZ = cmds.shadingNode("plusMinusAverage", asUtility = True, name = "spine04_drivenZ_avg")
cmds.setAttr(spine4MultZ + ".operation", 3)
cmds.connectAttr(spine4MultZA + ".outputX", spine4MultZ + ".input1D[0]")
cmds.connectAttr(spine4MultZB + ".outputX", spine4MultZ + ".input1D[1]")
cmds.connectAttr(spine4MultZ + ".output1D", "spine_04_anim.rotateZ")
# # # #
#Spine 2 : Rotate X
cmds.select("spine_02_anim")
cmds.addAttr(longName='spine_3_Influence', defaultValue=1, minValue=-1, maxValue=5, keyable = True)
cmds.addAttr(longName='spine_1_Influence', defaultValue=1, minValue=-1, maxValue=5, keyable = True)
spine2MultXA = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_02_xa_mult")
cmds.connectAttr("spine_03_anim.rx", spine2MultXA + ".input1X")
cmds.connectAttr("spine_02_anim.spine_3_Influence", spine2MultXA + ".input2X")
spine2MultXB = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_02_xb_mult")
cmds.connectAttr("spine_01_anim.rx", spine2MultXB+ ".input1X")
cmds.connectAttr("spine_02_anim.spine_1_Influence", spine2MultXB + ".input2X")
spine2MultX = cmds.shadingNode("plusMinusAverage", asUtility = True, name = "spine02_drivenX_avg")
cmds.setAttr(spine2MultX + ".operation", 3)
cmds.connectAttr(spine2MultXA + ".outputX", spine2MultX + ".input1D[0]")
cmds.connectAttr(spine2MultXB + ".outputX", spine2MultX + ".input1D[1]")
cmds.connectAttr(spine2MultX + ".output1D", "spine_02_anim.rotateX")
#Spine 2 : Rotate Y
spine2MultYA = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_02_ya_mult")
cmds.connectAttr("spine_03_anim.ry", spine2MultYA + ".input1X")
cmds.connectAttr("spine_02_anim.spine_3_Influence", spine2MultYA + ".input2X")
spine2MultYB = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_02_yb_mult")
cmds.connectAttr("spine_01_anim.ry", spine2MultYB+ ".input1X")
cmds.connectAttr("spine_02_anim.spine_1_Influence", spine2MultYB + ".input2X")
spine2MultY = cmds.shadingNode("plusMinusAverage", asUtility = True, name = "spine02_drivenY_avg")
cmds.setAttr(spine2MultY + ".operation", 3)
cmds.connectAttr(spine2MultYA + ".outputX", spine2MultY + ".input1D[0]")
cmds.connectAttr(spine2MultYB + ".outputX", spine2MultY + ".input1D[1]")
cmds.connectAttr(spine2MultY + ".output1D", "spine_02_anim.rotateY")
#Spine 2 : Rotate Z
spine2MultZA = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_02_za_mult")
cmds.connectAttr("spine_03_anim.rz", spine2MultZA + ".input1X")
cmds.connectAttr("spine_02_anim.spine_3_Influence", spine2MultZA + ".input2X")
spine2MultZB = cmds.shadingNode("multiplyDivide", asUtility = True, name = "spine_02_zb_mult")
cmds.connectAttr("spine_01_anim.rz", spine2MultZB+ ".input1X")
cmds.connectAttr("spine_02_anim.spine_1_Influence", spine2MultZB + ".input2X")
spine2MultZ = cmds.shadingNode("plusMinusAverage", asUtility = True, name = "spine02_drivenZ_avg")
cmds.setAttr(spine2MultZ + ".operation", 3)
cmds.connectAttr(spine2MultZA + ".outputX", spine2MultZ + ".input1D[0]")
cmds.connectAttr(spine2MultZB + ".outputX", spine2MultZ + ".input1D[1]")
cmds.connectAttr(spine2MultZ + ".output1D", "spine_02_anim.rotateZ")
cmds.setKeyframe("spine_02_anim.rotate")
cmds.setKeyframe("spine_04_anim.rotate")
cmds.select("spine_02_anim.cv[*]")
cmds.scale(0, 0, 0, relative = True)
cmds.select("spine_04_anim.cv[*]")
cmds.scale(0, 0, 0, relative = True)
#check for new blendUnitConversion attr and alias attr it
spine4Connections = | |
,
u'縰' : [u'x'] ,
u'媳' : [u'x'] ,
u'莹' : [u'y'] ,
u'噀' : [u'x'] ,
u'㳇' : [u'f'] ,
u'齆' : [u'w'] ,
u'旍' : [u'j'] ,
u'乐' : [u'y', u'l'] ,
u'煚' : [u'j'] ,
u'川' : [u'c'] ,
u'苣' : [u'q', u'j'] ,
u'楪' : [u'y', u'd'] ,
u'㿱' : [u'x'] ,
u'鹰' : [u'y'] ,
u'擷' : [u'x'] ,
u'䅺' : [u'm'] ,
u'霁' : [u'j'] ,
u'炄' : [u'n'] ,
u'輑' : [u'y'] ,
u'梔' : [u'z'] ,
u'醚' : [u'm'] ,
u'㠟' : [u'l'] ,
u'䂤' : [u'h', u'f'] ,
u'阫' : [u'p'] ,
u'親' : [u'q', u'x'] ,
u'玮' : [u'w'] ,
u'夵' : [u'y'] ,
u'踻' : [u'g'] ,
u'殾' : [u'j'] ,
u'郄' : [u'q', u'x'] ,
u'㭉' : [u'h'] ,
u'恏' : [u'h'] ,
u'䏎' : [u'h', u'f'] ,
u'裔' : [u'y'] ,
u'狘' : [u'y', u'x'] ,
u'塟' : [u'z'] ,
u'腥' : [u'x'] ,
u'櫨' : [u'l'] ,
u'鏮' : [u'k'] ,
u'㩳' : [u's'] ,
u'捹' : [u'b'] ,
u'䋸' : [u'x'] ,
u'课' : [u'k'] ,
u'螋' : [u's'] ,
u'砊' : [u'k'] ,
u'䒍' : [u'p'] ,
u'錘' : [u'c'] ,
u'倚' : [u'y'] ,
u'禟' : [u't'] ,
u'謨' : [u'm'] ,
u'䠪' : [u'd'] ,
u'钭' : [u't'] ,
u'甬' : [u'y', u'd'] ,
u'冯' : [u'p', u'f'] ,
u'貽' : [u'y'] ,
u'洼' : [u'g', u'w'] ,
u'盁' : [u'y'] ,
u'顊' : [u'y'] ,
u'䕌' : [u'z'] ,
u'滑' : [u'h', u'g'] ,
u'駟' : [u's'] ,
u'穞' : [u'l'] ,
u'䛡' : [u'h'] ,
u'镬' : [u'h'] ,
u'剮' : [u'g'] ,
u'篳' : [u'b'] ,
u'㣵' : [u't'] ,
u'赼' : [u'c'] ,
u'䩾' : [u'z'] ,
u'瞀' : [u'm', u'w'] ,
u'㒂' : [u'k', u'z'] ,
u'逅' : [u'h', u'g'] ,
u'崇' : [u'c'] ,
u'澐' : [u'y'] ,
u'蠕' : [u'r'] ,
u'爙' : [u'r'] ,
u'㼛' : [u'x'] ,
u'骞' : [u'q'] ,
u'権' : [u'q'] ,
u'粲' : [u'c'] ,
u'㦴' : [u'q', u'g'] ,
u'蔷' : [u'q', u's'] ,
u'䈹' : [u's'] ,
u'韀' : [u'j'] ,
u'哂' : [u's'] ,
u'杋' : [u'f'] ,
u'运' : [u'y'] ,
u'䳒' : [u'y'] ,
u'鉙' : [u'z'] ,
u'彛' : [u'y'] ,
u'懤' : [u'c'] ,
u'詩' : [u's'] ,
u'瑭' : [u't'] ,
u'鳲' : [u's'] ,
u'姴' : [u'l'] ,
u'汽' : [u'q', u'y', u'g'] ,
u'妁' : [u's'] ,
u'銇' : [u'l'] ,
u'洎' : [u'j'] ,
u'䆑' : [u'c'] ,
u'炛' : [u'g'] ,
u'唞' : [u'd'] ,
u'踤' : [u'c', u'z'] ,
u'墫' : [u'c', u'z'] ,
u'醱' : [u'p'] ,
u'永' : [u'y'] ,
u'䂻' : [u'z'] ,
u'翅' : [u'c'] ,
u'呈' : [u'c'] ,
u'赎' : [u's'] ,
u'柕' : [u'm'] ,
u'郛' : [u'f'] ,
u'止' : [u'z'] ,
u'俥' : [u'c'] ,
u'绯' : [u'f'] ,
u'卲' : [u's'] ,
u'豸' : [u'z'] ,
u'替' : [u't'] ,
u'尃' : [u'f'] ,
u'锉' : [u'c'] ,
u'檌' : [u'z'] ,
u'䐓' : [u'r'] ,
u'㨗' : [u'j'] ,
u'猝' : [u'c'] ,
u'劜' : [u'y'] ,
u'订' : [u'd'] ,
u'嬭' : [u'n'] ,
u'鐳' : [u'l'] ,
u'榶' : [u't'] ,
u'䌽' : [u'c'] ,
u'㥁' : [u'z', u'd'] ,
u'片' : [u'p'] ,
u'准' : [u'z'] ,
u'諌' : [u'j', u'd'] ,
u'婗' : [u'n'] ,
u'㿚' : [u'l'] ,
u'鍝' : [u'y'] ,
u'棠' : [u't'] ,
u'䉧' : [u'l'] ,
u'㡫' : [u'y'] ,
u'煱' : [u'g'] ,
u'僰' : [u'b'] ,
u'觶' : [u'z'] ,
u'鶃' : [u'y'] ,
u'稂' : [u'l'] ,
u'庅' : [u'm'] ,
u'薓' : [u's'] ,
u'戒' : [u'j'] ,
u'䚕' : [u'l'] ,
u'算' : [u's'] ,
u'褠' : [u'g'] ,
u'䨢' : [u'd', u'g', u't'] ,
u'控' : [u'q', u'k'] ,
u'隵' : [u'x'] ,
u'眴' : [u'x', u's'] ,
u'䮷' : [u'd'] ,
u'驂' : [u'c'] ,
u'彄' : [u'k'] ,
u'瓉' : [u'z'] ,
u'艒' : [u'm'] ,
u'鯗' : [u'x'] ,
u'硖' : [u'x'] ,
u'峙' : [u's', u'z'] ,
u'㵘' : [u'm'] ,
u'菧' : [u'd'] ,
u'恦' : [u'x'] ,
u'秫' : [u's'] ,
u'靴' : [u'x'] ,
u'䡶' : [u'p', u'b'] ,
u'懻' : [u'j'] ,
u'疈' : [u'p', u'b'] ,
u'訍' : [u'c'] ,
u'伏' : [u'f'] ,
u'颖' : [u'y'] ,
u'嶘' : [u'z'] ,
u'校' : [u'x', u'j'] ,
u'肦' : [u'b', u'f'] ,
u'䖨' : [u's'] ,
u'鼯' : [u'w'] ,
u'倱' : [u'h'] ,
u'溺' : [u'n'] ,
u'蜿' : [u'w'] ,
u'絃' : [u'x'] ,
u'㹅' : [u'z'] ,
u'闈' : [u'w'] ,
u'囊' : [u'n'] ,
u'敓' : [u'd'] ,
u'珜' : [u'y'] ,
u'㓞' : [u'q', u'y'] ,
u'衡' : [u'h'] ,
u'䵣' : [u'z', u'd'] ,
u'寬' : [u'k'] ,
u'癵' : [u'l'] ,
u'軺' : [u'y', u'd'] ,
u'䏼' : [u'c'] ,
u'㨀' : [u'b'] ,
u'猆' : [u'f'] ,
u'垉' : [u'p'] ,
u'貏' : [u'b'] ,
u'嬖' : [u'b'] ,
u'逜' : [u'w'] ,
u'溣' : [u'l'] ,
u'䌦' : [u'd'] ,
u'爰' : [u'y'] ,
u'嚳' : [u'k'] ,
u'边' : [u'b'] ,
u'婀' : [u'e'] ,
u'鍆' : [u'm'] ,
u'槍' : [u'q', u'c'] ,
u'絚' : [u'g'] ,
u'凝' : [u'n'] ,
u'軣' : [u'h'] ,
u'敪' : [u'd'] ,
u'鉰' : [u's'] ,
u'棷' : [u'z'] ,
u'䵺' : [u't'] ,
u'鬁' : [u'l'] ,
u'粄' : [u'b'] ,
u'䨋' : [u'n'] ,
u'茑' : [u'n'] ,
u'撔' : [u'h'] ,
u'鶚' : [u'e'] ,
u'津' : [u'j'] ,
u'騫' : [u'q', u'j'] ,
u'薪' : [u'x'] ,
u'羮' : [u'l', u'g'] ,
u'唵' : [u'a'] ,
u'舻' : [u'l'] ,
u'枾' : [u's'] ,
u'鳄' : [u'e'] ,
u'㝉' : [u'z'] ,
u'汏' : [u'd', u't'] ,
u'俎' : [u'z'] ,
u'蓔' : [u'y'] ,
u'绘' : [u'h'] ,
u'呟' : [u'q'] ,
u'赥' : [u'x'] ,
u'曨' : [u'l'] ,
u'㙳' : [u'h', u'k'] ,
u'潹' : [u'c'] ,
u'仸' : [u'y'] ,
u'蟾' : [u'c'] ,
u'讋' : [u's', u'z'] ,
u'琊' : [u'y'] ,
u'䢍' : [u'f'] ,
u'鼘' : [u'y'] ,
u'尚' : [u's', u'c'] ,
u'疟' : [u'y', u'n'] ,
u'蜨' : [u'd'] ,
u'䐪' : [u'j', u'f'] ,
u'颭' : [u'z'] ,
u'礬' : [u'f'] ,
u'嶯' : [u'j'] ,
u'㸮' : [u'f'] ,
u'肽' : [u't'] ,
u'愼' : [u's'] ,
u'䖿' : [u'l'] ,
u'竁' : [u'c'] ,
u'㿃' : [u'z', u'd'] ,
u'鑊' : [u'h'] ,
u'䥌' : [u'z'] ,
u'拑' : [u'q'] ,
u'闟' : [u'x', u's', u't'] ,
u'癞' : [u'l'] ,
u'䫡' : [u'q', u'y'] ,
u'饬' : [u'c'] ,
u'幮' : [u'c'] ,
u'石' : [u's', u'd'] ,
u'㓵' : [u'e'] ,
u'腼' : [u'm'] ,
u'简' : [u'j'] ,
u'㢂' : [u'y'] ,
u'鰅' : [u'y'] ,
u'儇' : [u'x'] ,
u'掐' : [u'q'] ,
u'萕' : [u'q'] ,
u'縙' : [u'r'] ,
u'隞' : [u'a'] ,
u'昩' : [u'm'] ,
u'炲' : [u't'] ,
u'褷' : [u's'] ,
u'丹' : [u'd'] ,
u'鯀' : [u'g'] ,
u'壂' : [u'd'] ,
u'歋' : [u'y'] ,
u'菐' : [u'p'] ,
u'䃒' : [u'h'] ,
u'鹙' : [u'q'] ,
u'卛' : [u's'] ,
u'淤' : [u'y'] ,
u'虩' : [u'x'] ,
u'硭' : [u'm'] ,
u'㵯' : [u'f'] ,
u'郲' : [u'l'] ,
u'恽' : [u'y'] ,
u'嶁' : [u'l'] ,
u'隇' : [u'w'] ,
u'椎' : [u'c', u'z'] ,
u'䖑' : [u'm'] ,
u'璛' : [u's'] ,
u'儞' : [u'e', u'n'] ,
u'訤' : [u'x', u'n'] ,
u'岫' : [u'x'] ,
u'閱' : [u'y'] ,
u'㜲' : [u'y', u'w'] ,
u'核' : [u'h'] ,
u'䒻' : [u'q'] ,
u'篅' : [u'c'] ,
u'偈' : [u'q', u'j'] ,
u'襎' : [u'f'] ,
u'揕' : [u'z'] ,
u'铛' : [u'c', u'd'] ,
u'㙜' : [u'c', u't'] ,
u'潢' : [u'h', u'g'] ,
u'䯥' : [u'a', u'q'] ,
u'端' : [u'd'] ,
u'坲' : [u'f'] ,
u'衸' : [u'x'] ,
u'拿' : [u'n'] ,
u'堃' : [u'k'] ,
u'㖆' : [u'q'] ,
u'鄉' : [u'x'] ,
u'溌' : [u'p'] ,
u'䀓' : [u'h'] ,
u'㸗' : [u't'] ,
u'眝' : [u'z'] ,
u'嚜' : [u'm'] ,
u'辢' : [u'l'] ,
u'弭' : [u'm'] ,
u'㒰' : [u'q'] ,
u'逳' : [u'y'] ,
u'涶' : [u't'] ,
u'䜽' : [u'y'] ,
u'癇' : [u'x'] ,
u'嗆' : [u'q'] ,
u'軌' : [u'g'] ,
u'幗' : [u'g'] ,
u'㯚' : [u'd'] ,
u'靝' : [u't'] ,
u'泠' : [u'l'] ,
u'㱫' : [u'l'] ,
u'畱' : [u'l'] ,
u'哰' : [u'l'] ,
u'跶' : [u't', u'd'] ,
u'馃' : [u'g'] ,
u'縂' : [u'z'] ,
u'媅' : [u'd'] ,
u'㬄' : [u'c'] ,
u'膓' : [u'c'] ,
u'昒' : [u'h'] ,
u'羗' : [u'q'] ,
u'㢙' : [u'q'] ,
u'贠' : [u'y'] ,
u'丢' : [u'd'] ,
u'枧' : [u'j'] ,
u'銵' : [u'k'] ,
u'猴' : [u'h'] ,
u'侷' : [u'j'] ,
u'鹂' : [u'l'] ,
u'孄' : [u'l'] ,
u'烉' : [u'h'] ,
u'虒' : [u'y', u's', u'z'] ,
u'䍔' : [u'h'] ,
u'壙' : [u'k'] ,
u'蟧' : [u'l'] ,
u'摦' : [u'h'] ,
u'䃩' : [u'n'] ,
u'緫' : [u'c', u'z'] ,
u'㻭' : [u's', u'z'] ,
u'鍴' : [u'd'] ,
u'䱶' : [u'l'] ,
u'旻' : [u'm'] ,
u'熈' : [u'x'] ,
u'踍' : [u'q'] ,
u'䬏' : [u'b', u'f'] ,
u'鲖' : [u'z', u't'] ,
u'妘' : [u'y'] ,
u'氡' : [u'd'] ,
u'蒦' : [u'y', u'w'] ,
u'䆨' : [u'm'] ,
u'鬯' : [u'c'] ,
u'吱' : [u'z'] ,
u'檺' : [u'g'] ,
u'茿' : [u'z'] ,
u'祃' : [u'm'] ,
u'釈' : [u's'] ,
u'勊' : [u'k'] ,
u'慓' : [u'p'] ,
u'矜' : [u'q', u'j', u'g'] ,
u'象' : [u'x'] ,
u'䥣' : [u'z'] ,
u'忬' : [u'y'] ,
u'牵' : [u'q'] ,
u'諺' : [u'y'] ,
u'䟼' : [u'w'] ,
u'㸀' : [u't'] ,
u'眆' : [u'f'] ,
u'厉' : [u'l'] ,
u'袏' : [u'z'] ,
u'弖' : [u'h'] ,
u'鐜' : [u'd'] ,
u'檣' : [u'q'] ,
u'瘰' : [u'l'] ,
u'劳' : [u'l'] | |
<filename>tools/MethylSig/rpy2/rpy/rinterface/tests/test_SexpVector.py
import unittest
import sys, struct
import rpy2.rinterface as ri
ri.initr()
def evalr(string):
res = ri.parse(string)
res = ri.baseenv["eval"](res)
return res
def floatEqual(x, y, epsilon = 0.00000001):
return abs(x - y) < epsilon
IS_PYTHON3 = sys.version_info[0] == 3
class WrapperSexpVectorTestCase(unittest.TestCase):
def testInt(self):
sexp = ri.IntSexpVector([1, ])
isInteger = ri.globalenv.get("is.integer")
ok = isInteger(sexp)[0]
self.assertTrue(ok)
def testFloat(self):
sexp = ri.IntSexpVector([1.0, ])
isNumeric = ri.globalenv.get("is.numeric")
ok = isNumeric(sexp)[0]
self.assertTrue(ok)
def testStr(self):
sexp = ri.StrSexpVector(["a", ])
isStr = ri.globalenv.get("is.character")
ok = isStr(sexp)[0]
self.assertTrue(ok)
def testBool(self):
sexp = ri.BoolSexpVector([True, ])
isBool = ri.globalenv.get("is.logical")
ok = isBool(sexp)[0]
self.assertTrue(ok)
def testComplex(self):
sexp = ri.ComplexSexpVector([1+2j, ])
is_complex = ri.globalenv.get("is.complex")
ok = is_complex(sexp)[0]
self.assertTrue(ok)
def testByte(self):
if IS_PYTHON3:
seq = (b'a', b'b')
else:
seq = ('a', 'b')
sexp = ri.ByteSexpVector(seq)
is_raw = ri.globalenv.get("is.raw")
ok = is_raw(sexp)[0]
self.assertTrue(ok)
class NAValuesTestCase(unittest.TestCase):
def testRtoNAInteger(self):
na_int = ri.NAIntegerType()
r_na_int = evalr("NA_integer_")[0]
self.assertTrue(r_na_int is na_int)
def testNAIntegertoR(self):
na_int = ri.NAIntegerType()
self.assertEqual(True, ri.baseenv["is.na"](na_int)[0])
def testNAIntegerBinaryfunc(self):
na_int = ri.NAIntegerType()
self.assertTrue((na_int + 2) is na_int)
def testNAIntegerInVector(self):
na_int = ri.NAIntegerType()
x = ri.IntSexpVector((1, na_int, 2))
self.assertTrue(x[1] is na_int)
self.assertEqual(1, x[0])
self.assertEqual(2, x[2])
def testNAIntegerRepr(self):
na_int = ri.NAIntegerType()
self.assertEqual("NA_integer_", repr(na_int))
def testRtoNALogical(self):
na_lgl = ri.NALogicalType()
r_na_lgl = evalr("NA")[0]
self.assertTrue(r_na_lgl is na_lgl)
def testNALogicaltoR(self):
na_lgl = ri.NALogicalType()
self.assertEqual(True, ri.baseenv["is.na"](na_lgl)[0])
def testNALogicalInVector(self):
na_bool = ri.NALogicalType()
x = ri.BoolSexpVector((True, na_bool, False))
self.assertTrue(x[1] is na_bool)
self.assertEqual(True, x[0])
self.assertEqual(False, x[2])
def testNAIntegerRepr(self):
na_bool = ri.NALogicalType()
self.assertEqual("NA", repr(na_bool))
def testRtoNAReal(self):
na_real = ri.NARealType()
r_na_real = evalr("NA_real_")[0]
self.assertTrue(r_na_real is na_real)
def testNARealtoR(self):
na_real = ri.NARealType()
self.assertEqual(True, ri.baseenv["is.na"](na_real)[0])
def testNARealBinaryfunc(self):
na_real = ri.NARealType()
self.assertTrue((na_real + 2.0) is na_real)
def testNARealInVector(self):
na_float = ri.NARealType()
x = ri.FloatSexpVector((1.1, na_float, 2.2))
self.assertTrue(x[1] is na_float)
self.assertEqual(1.1, x[0])
self.assertEqual(2.2, x[2])
def testNARealRepr(self):
na_float = ri.NARealType()
self.assertEqual("NA_real_", repr(na_float))
def testRtoNACharacter(self):
na_character = ri.NACharacterType()
r_na_character = evalr("NA_character_")[0]
self.assertTrue(r_na_character is na_character)
def testNACharactertoR(self):
na_character = ri.NACharacterType()
self.assertEqual(True, ri.baseenv["is.na"](ri.StrSexpVector((na_character, )))[0])
def testNACharacterInVector(self):
na_str = ri.NACharacterType()
x = ri.StrSexpVector(("ab", na_str, "cd"))
self.assertTrue(x[1] is na_str)
self.assertEqual("ab", x[0])
self.assertEqual("cd", x[2])
def testNACharacterRepr(self):
na_str = ri.NACharacterType()
self.assertEqual("NA_character_", repr(na_str))
class IntSexpVectorTestCase(unittest.TestCase):
def testInitFromSeq(self):
seq = range(3)
v = ri.IntSexpVector(seq)
self.assertEqual(3, len(v))
for x,y in zip(seq, v):
self.assertEqual(x, y)
def testInitFromIter(self):
it = xrange(3)
v = ri.IntSexpVector(it)
self.assertEqual(3, len(v))
for x,y in zip(xrange(3), v):
self.assertEqual(x, y)
def testInitFromSeqInvalidInt(self):
seq = (1, 'b', 3)
self.assertRaises(ValueError, ri.IntSexpVector, seq)
def testInitFromSeqInvalidOverflow(self):
v = ri.IntSexpVector((ri.R_LEN_T_MAX-1, ri.R_LEN_T_MAX))
self.assertEqual(ri.R_LEN_T_MAX-1, v[0])
self.assertEqual(ri.R_LEN_T_MAX, v[1])
# check 64-bit architecture
if struct.calcsize("P") >= 8:
self.assertRaises(OverflowError,
ri.IntSexpVector, (ri.R_LEN_T_MAX+1, ))
class FloatSexpVectorTestCase(unittest.TestCase):
def testInitFromSeq(self):
seq = (1.0, 2.0, 3.0)
v = ri.FloatSexpVector(seq)
self.assertEqual(3, len(v))
for x,y in zip(seq, v):
self.assertEqual(x, y)
def testInitFromIter(self):
it = xrange(10)
v = ri.FloatSexpVector(it)
self.assertEqual(10, len(v))
for x,y in zip(xrange(10), v):
self.assertEqual(x, y)
def testInitFromSeqInvalidFloat(self):
seq = (1.0, 'b', 3.0)
self.assertRaises(ValueError, ri.FloatSexpVector, seq)
class ByteSexpVectorTestCase(unittest.TestCase):
def testInitFromBytes(self):
if IS_PYTHON3:
seq = (b'a', b'b', b'c')
else:
seq = 'abc'
v = ri.ByteSexpVector(seq)
self.assertEqual(3, len(v))
for x,y in zip(seq, v):
self.assertEqual(x, y)
def testInitFromSeqOfBytes(self):
if IS_PYTHON3:
seq = (b'a', b'b', b'c')
else:
seq = ('a', 'b', 'c')
v = ri.ByteSexpVector(seq)
self.assertEqual(3, len(v))
for x,y in zip(seq, v):
self.assertEqual(x, y)
def testInitFromSeqInvalidByte(self):
if IS_PYTHON3:
seq = (b'a', 2, b'c')
else:
seq = ('a', 2, 'c')
self.assertRaises(ValueError, ri.ByteSexpVector, seq)
class SexpVectorTestCase(unittest.TestCase):
def testMissinfType(self):
self.assertRaises(ValueError, ri.SexpVector, [2, ])
def testDel(self):
v = ri.IntSexpVector(range(10))
self.assertRaises(TypeError, v.__delitem__, 3)
#FIXME: end and initializing again causes currently a lot a trouble...
def testNewWithoutInit(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
self.assertTrue(False) # cannot be tested with Python < 2.6
return None
import multiprocessing
def foo(queue):
import rpy2.rinterface as rinterface
rinterface.endr(1)
try:
tmp = ri.SexpVector([1,2], ri.INTSXP)
res = (False, None)
except RuntimeError, re:
res = (True, re)
except Exception, e:
res = (False, e)
queue.put(res)
q = multiprocessing.Queue()
p = multiprocessing.Process(target = foo, args = (q,))
p.start()
res = q.get()
p.join()
self.assertTrue(res[0])
def testNewBool(self):
sexp = ri.SexpVector([True, ], ri.LGLSXP)
isLogical = ri.globalenv.get("is.logical")
ok = isLogical(sexp)[0]
self.assertTrue(ok)
self.assertTrue(sexp[0])
sexp = ri.SexpVector(["a", ], ri.LGLSXP)
isLogical = ri.globalenv.get("is.logical")
ok = isLogical(sexp)[0]
self.assertTrue(ok)
self.assertTrue(sexp[0])
def testNewInt(self):
sexp = ri.SexpVector([1, ], ri.INTSXP)
isInteger = ri.globalenv.get("is.integer")
ok = isInteger(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector(["a", ], ri.INTSXP)
isNA = ri.globalenv.get("is.na")
ok = isNA(sexp)[0]
self.assertTrue(ok)
def testNewReal(self):
sexp = ri.SexpVector([1.0, ], ri.REALSXP)
isNumeric = ri.globalenv.get("is.numeric")
ok = isNumeric(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector(["a", ], ri.REALSXP)
isNA = ri.globalenv.get("is.na")
ok = isNA(sexp)[0]
self.assertTrue(ok)
def testNewComplex(self):
sexp = ri.SexpVector([1.0 + 1.0j, ], ri.CPLXSXP)
isComplex = ri.globalenv.get("is.complex")
ok = isComplex(sexp)[0]
self.assertTrue(ok)
def testNewString(self):
sexp = ri.SexpVector(["abc", ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
sexp = ri.SexpVector([1, ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
def testNewUnicode(self):
sexp = ri.SexpVector([u'abc', ], ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
self.assertEqual('abc', sexp[0])
def testNewUnicodeSymbol(self):
sexp = ri.SexpVector((u'\u21a7', ), ri.STRSXP)
isCharacter = ri.globalenv.get("is.character")
ok = isCharacter(sexp)[0]
self.assertTrue(ok)
self.assertEqual(u'\u21a7', sexp[0])
def testNewList(self):
vec = ri.ListSexpVector([1,'b',3,'d',5])
ok = ri.baseenv["is.list"](vec)[0]
self.assertTrue(ok)
self.assertEqual(5, len(vec))
self.assertEqual(1, vec[0][0])
self.assertEqual('b', vec[1][0])
def testNewVector(self):
sexp_char = ri.SexpVector(["abc", ],
ri.STRSXP)
sexp_int = ri.SexpVector([1, ],
ri.INTSXP)
sexp = ri.SexpVector([sexp_char, sexp_int],
ri.VECSXP)
isList = ri.globalenv.get("is.list")
ok = isList(sexp)[0]
self.assertTrue(ok)
self.assertEqual(2, len(sexp))
def testNew_InvalidType_NotAType(self):
self.assertRaises(ValueError, ri.SexpVector, [1, ], -1)
self.assertRaises(ValueError, ri.SexpVector, [1, ], 250)
def testNew_InvalidType_NotAVectorType(self):
self.assertRaises(ValueError, ri.SexpVector, [1, ], ri.ENVSXP)
def testNew_InvalidType_NotASequence(self):
self.assertRaises(ValueError, ri.SexpVector, 1, ri.INTSXP)
def testGetItem(self):
letters_R = ri.globalenv.get("letters")
self.assertTrue(isinstance(letters_R, ri.SexpVector))
letters = (('a', 0), ('b', 1), ('c', 2),
('x', 23), ('y', 24), ('z', 25))
for l, i in letters:
self.assertTrue(letters_R[i] == l)
Rlist = ri.globalenv.get("list")
seq_R = ri.globalenv.get("seq")
mySeq = seq_R(ri.SexpVector([0, ], ri.INTSXP),
ri.SexpVector([10, ], ri.INTSXP))
myList = Rlist(s=mySeq, l=letters_R)
idem = ri.globalenv.get("identical")
self.assertTrue(idem(mySeq, myList[0]))
self.assertTrue(idem(letters_R, myList[1]))
letters_R = ri.globalenv.get("letters")
self.assertEqual('z', letters_R[-1])
def testGetItemLang(self):
formula = ri.baseenv.get('formula')
f = formula(ri.StrSexpVector(['y ~ x', ]))
y = f[0]
self.assertEqual(ri.SYMSXP, y.typeof)
def testGetItemExpression(self):
expression = ri.baseenv.get('expression')
e = expression(ri.StrSexpVector(['a', ]),
ri.StrSexpVector(['b', ]))
y = e[0]
self.assertEqual(ri.STRSXP, y.typeof)
def testGetItemPairList(self):
pairlist = ri.baseenv.get('pairlist')
pl = pairlist(a = ri.StrSexpVector([1, ]))
y = pl[0]
self.assertEqual(ri.LISTSXP, y.typeof)
def testGetItemNegativeOutOfBound(self):
letters_R = ri.globalenv.get("letters")
self.assertRaises(IndexError, letters_R.__getitem__,
-100)
def testGetItemOutOfBound(self):
myVec = ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP)
self.assertRaises(IndexError, myVec.__getitem__, 10)
if (sys.maxint > ri.R_LEN_T_MAX):
self.assertRaises(IndexError, myVec.__getitem__,
ri.R_LEN_T_MAX+1)
def testGetSliceFloat(self):
vec = ri.FloatSexpVector([1.0,2.0,3.0])
vec = vec[0:2]
self.assertEqual(2, len(vec))
self.assertEqual(1.0, vec[0])
self.assertEqual(2.0, vec[1])
def testGetSliceInt(self):
vec = ri.IntSexpVector([1,2,3])
vec = vec[0:2]
self.assertEqual(2, len(vec))
self.assertEqual(1, vec[0])
self.assertEqual(2, vec[1])
def testGetSliceIntNegative(self):
vec = ri.IntSexpVector([1,2,3])
vec = vec[-2:-1]
self.assertEqual(1, len(vec))
self.assertEqual(2, vec[0])
def testGetSliceMissingBoundary(self):
vec = ri.IntSexpVector(range(10))
vec_slice = vec[:2]
self.assertEqual(2, len(vec_slice))
self.assertEqual(0, vec_slice[0])
self.assertEqual(1, vec_slice[1])
vec_slice = vec[8:]
self.assertEqual(2, len(vec_slice))
self.assertEqual(8, vec_slice[0])
self.assertEqual(9, vec_slice[1])
vec_slice = vec[-2:]
self.assertEqual(2, len(vec_slice))
self.assertEqual(8, vec_slice[0])
self.assertEqual(9, vec_slice[1])
def testGetSliceBool(self):
vec = ri.BoolSexpVector([True,False,True])
vec = vec[0:2]
self.assertEqual(2, len(vec))
self.assertEqual(True, vec[0])
self.assertEqual(False, vec[1])
def testGetSliceStr(self):
vec = ri.StrSexpVector(['a','b','c'])
vec = vec[0:2]
self.assertEqual(2, len(vec))
self.assertEqual('a', vec[0])
self.assertEqual('b', vec[1])
def testGetSliceComplex(self):
vec = ri.ComplexSexpVector([1+2j,2+3j,3+4j])
vec = vec[0:2]
self.assertEqual(2, len(vec))
self.assertEqual(1+2j, vec[0])
self.assertEqual(2+3j, vec[1])
def testGetSliceList(self):
vec = ri.ListSexpVector([1,'b',True])
vec = vec[0:2]
self.assertEqual(2, len(vec))
self.assertEqual(1, vec[0][0])
self.assertEqual('b', vec[1][0])
def testAssignItemDifferentType(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
self.assertRaises(ValueError, myVec.__setitem__, 0,
ri.SexpVector(["a", ], ri.STRSXP))
def testAssignItemOutOfBound(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
self.assertRaises(IndexError, myVec.__setitem__, 10,
ri.SexpVector([1, ], ri.INTSXP))
def testAssignItemInt(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0, 1, 2, 3, 4, 5], ri.INTSXP))
myVec[0] = ri.SexpVector([100, ], ri.INTSXP)
self.assertTrue(myVec[0] == 100)
myVec[3] = ri.SexpVector([100, ], ri.INTSXP)
self.assertTrue(myVec[3] == 100)
myVec[-1] = ri.SexpVector([200, ], ri.INTSXP)
self.assertTrue(myVec[5] == 200)
def testAssignItemReal(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([0.0, 1.0, 2.0, 3.0, 4.0, 5.0],
ri.REALSXP))
myVec[0] = ri.SexpVector([100.0, ], ri.REALSXP)
self.assertTrue(floatEqual(myVec[0], 100.0))
myVec[3] = ri.SexpVector([100.0, ], ri.REALSXP)
self.assertTrue(floatEqual(myVec[3], 100.0))
def testAssignItemLogical(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([True, False, True, True, False],
ri.LGLSXP))
myVec[0] = ri.SexpVector([False, ], ri.LGLSXP)
self.assertFalse(myVec[0])
myVec[3] = ri.SexpVector([False, ], ri.LGLSXP)
self.assertFalse(myVec[3])
def testAssignItemComplex(self):
c_R = ri.globalenv.get("c")
myVec = c_R(ri.SexpVector([1.0+2.0j, 2.0+2.0j, 3.0+2.0j,
4.0+2.0j, 5.0+2.0j],
ri.CPLXSXP))
myVec[0] = ri.SexpVector([100.0+200.0j, ], ri.CPLXSXP)
self.assertTrue(floatEqual(myVec[0].real, 100.0))
self.assertTrue(floatEqual(myVec[0].imag, 200.0))
myVec[3] = ri.SexpVector([100.0+200.0j, ], ri.CPLXSXP)
self.assertTrue(floatEqual(myVec[3].real, 100.0))
self.assertTrue(floatEqual(myVec[3].imag, 200.0))
def testAssignItemList(self):
myVec = ri.SexpVector([ri.StrSexpVector(["a", ]),
ri.IntSexpVector([1, ]),
ri.IntSexpVector([3, ])],
ri.VECSXP)
myVec[0] = ri.SexpVector([ri.FloatSexpVector([100.0, ]), ],
ri.VECSXP)
self.assertTrue(floatEqual(myVec[0][0][0], 100.0))
myVec[2] = ri.SexpVector([ri.StrSexpVector(["a", ]), | |
else:
print("You just entered " + SAVE_dict['CurrentTownName'] + ". The next 3 turns are SAFE")
blockType_Safe()
if SAVE_dict['InTown'] == 1 and CONT2_dict['TravelledCONT2'] == 1: # --------------------------------------------- S E C O N D . C O N T I N E N T . N A M E S . ----------------------------------------------
if not SAVE_dict['CurrentTownName']:
SAVE_dict['CurrentTownName'] = random.choice(town_names_CONT2)
print("You just entered " + SAVE_dict['CurrentTownName'] + ". The next 3 turns are SAFE")
blockType_Safe()
def combat():
global playerHealth
global enemyHealth
global manaPoints
global enemy_isAlert
global gold
global game_class
global bossBattle
global e_weaponPower
enemyAlive = 1
yourTurn = 1
playerHealth = SAVE_dict['YourHealth']
manaPoints = SAVE_dict['YourMana']
armorPoints = SAVE_dict['YourArmor']
boss_weaponPower = ((SAVE_dict['YourLevel'] * 5) + (SAVE_dict['YourLevel'] + random.randint(3, 5))) / 1.6
critical = random.randint(1, 130)
if config.get('General', 'music') == "On":
mixer.music.load(combat_music)
mixer.music.play(-1)
else:
mixer.music.stop()
while enemyAlive == 1:
while yourTurn == 1:
if SAVE_dict['YourClass'] == "Warrior": # WARRIOR CLASS
show_warriorskills()
print("5. Normal Attack\n")
print("Your HP: " + str(playerHealth) + "\nEnemy HP: " + str(enemyHealth))
print("Mana: " + str(manaPoints))
skill_selection = input("Select a skill from the above: ")
if skill_selection == "1":
if manaPoints >= 5 and SAVE_dict['ShieldEquipped'] == 1:
manaPoints -= 5
print("Devastating Charge!")
time.sleep(1)
dc_damage = ((SAVE_dict['YourWeaponPower'] + (SAVE_dict['YourLevel'] * 1.55)) + random.randint(1, 5)) + SKILLS_dict['dChargePower'] # Devastating Charge damage calculation
if critical in range(1, SAVE_dict['CriticalRate']):
dc_critical = dc_damage * 2
print("You deal " + str(round(dc_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= dc_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
dc_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(dc_damage, 3)) + " damage.")
enemyHealth -= dc_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
else:
print("You don't have enough mana and/or you don't have a shield equipped.\n")
time.sleep(1)
continue
elif skill_selection == "2":
if manaPoints >= 10 and SAVE_dict['ShieldEquipped'] == 1:
manaPoints -= 10
print("Shield Bash!")
time.sleep(1)
sb_damage = ((SAVE_dict['YourWeaponPower'] + 2) + (SAVE_dict['YourLevel'] * 2) + random.randint(2, 6)) + SKILLS_dict['sBashPower'] # Shield Bash damage calculation
if critical in range(1, SAVE_dict['CriticalRate']):
sb_critical = sb_damage * 2
print("You deal " + str(round(sb_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= sb_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
sb_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(sb_damage, 3)) + " damage.")
enemyHealth -= sb_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
else:
print("You don't have enough mana and/or you don't have a shield equipped.\n")
time.sleep(1)
continue
elif skill_selection == "3":
if manaPoints >= 20:
manaPoints -= 20
print("Decapitating Slash!")
time.sleep(1)
ds_damage = ((SAVE_dict['YourWeaponPower'] + 3) + (SAVE_dict['YourLevel'] * 1.8) + random.randint(4, 13)) + SKILLS_dict['dSlashPower'] # Decapitating Slash damage calculation
if critical in range(1, SAVE_dict['CriticalRate']):
ds_critical = ds_damage * 2
print("You deal " + str(round(ds_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= ds_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
ds_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(ds_damage, 3)) + " damage.")
enemyHealth -= ds_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
else:
print("You don't have enough mana.\n")
time.sleep(1)
continue
elif skill_selection == "4":
if manaPoints >= 60:
manaPoints -= 60
print("Thousand Spiritual Swords of Hell!")
time.sleep(1)
tssoh_damage = ((SAVE_dict['YourWeaponPower'] + 10) + (SAVE_dict['YourLevel'] * 2) + random.randint(9, 22) + 7) + SKILLS_dict['sHellPower'] # Thousand Spiritual Swords of Hell damage calculation
if critical in range(1, SAVE_dict['CriticalRate']):
tssoh_critical = tssoh_damage * 2
print("You deal " + str(round(tssoh_critical, 3)) + " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= tssoh_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
tssoh_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(tssoh_damage, 3)) + " damage.")
enemyHealth -= tssoh_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
else:
print("You don't have enough mana.\n")
time.sleep(1)
continue
elif skill_selection == "5":
print("Normal Attack!")
time.sleep(1)
na_damage = SAVE_dict['YourWeaponPower'] + random.randint(1, 3)
if critical in range(1, SAVE_dict['CriticalRate']):
na_critical = na_damage * 2
print("You deal " + str(round(na_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= na_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
na_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(na_damage, 3)) + " damage.")
enemyHealth -= na_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
else:
print("You skipped your turn for not selecting from one of the options specified.\n")
time.sleep(1)
yourTurn = 0
elif SAVE_dict['YourClass'] == "Rogue": # ROGUE CLASS
if enemy_isAlert == 1: # If Rogue is in Stealthed state, do the following
show_stealthed_rogueskills()
print("3. Normal/Ambush Attack - This Normal Attack is SPECIAL because you're in Stealthed state.\n")
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
print("Mana: " + str(manaPoints))
skill_selection = input("Select a skill from the above: ")
if skill_selection == "1":
if manaPoints >= 10:
manaPoints -= 10
skill_selection = ""
print("Backstab!")
time.sleep(1)
backstab_damage = (SAVE_dict['YourWeaponPower'] + random.randint(4, 11)) + random.randint(3,9) + 2 # Backstab damage calculation
if critical in range(1, SAVE_dict['CriticalRate']):
backstab_critical = backstab_damage * 2
print("You deal " + str(round(backstab_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= backstab_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
backstab_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(backstab_damage, 3)) + " damage.")
enemyHealth -= backstab_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
enemy_isAlert = 0
else:
print("You don't have enough mana.\n")
time.sleep(1)
continue
elif skill_selection == "2":
if manaPoints >= 25:
manaPoints -= 25
print("Swift Assault!")
time.sleep(1)
ss_damage = (SAVE_dict['YourWeaponPower'] + random.randint(5, 12) + random.randint(5,9)) + (SAVE_dict['YourLevel'] * 1.66) / 0.3 + 3 # Swift Assault damage calculation
if critical in range(1, SAVE_dict['CriticalRate']):
ss_critical = ss_damage * 2
print("You deal " + str(round(ss_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= ss_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
ss_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(ss_damage, 3)) + " damage.")
enemyHealth -= ss_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
enemy_isAlert = 0
else:
print("You don't have enough mana.\n")
time.sleep(1)
continue
elif skill_selection == "3":
print("Ambush Attack!")
time.sleep(1)
ambush_damage = (SAVE_dict['YourWeaponPower'] + random.randint(1, 3)) + random.randint(1,4) # Ambush Attack damage calculation (Normal Attack but SPECIAL)
if critical in range(1, SAVE_dict['CriticalRate']):
ambush_critical = ambush_damage * 2
print("You deal " + str(round(ambush_critical, 3)) * " damage.")
time.sleep(1)
print("Critical Hit!")
enemyHealth -= ambush_critical
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
ambush_critical = 0
yourTurn = 0
else:
print("You deal " + str(round(ambush_damage, 3)) + " damage.")
enemyHealth -= ambush_damage
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
time.sleep(1)
yourTurn = 0
enemy_isAlert = 0
else: # If Rogue isn't in Stealthed state, do the following
show_rogueskills()
print("5. Normal Attack\n")
print("Your HP: " + str(round(playerHealth, 3)) + "\nEnemy HP: " + str(round(enemyHealth, 3)))
print("Mana: " + str(manaPoints))
skill_selection = input("Select a skill from the above: ")
if skill_selection == "1":
if manaPoints >= 10:
manaPoints -= 10
print("You become one with the shadows of the environment around you.")
time.sleep(1)
yourTurn = 0
else:
print("You don't have enough mana.\n")
time.sleep(1)
continue
elif skill_selection == "2":
if manaPoints >= 10:
manaPoints -= 10
print("Bleeding Dagger!")
time.sleep(1)
bd_damage = ((SAVE_dict['YourWeaponPower'] + 4) + (SAVE_dict['YourLevel'] * 2) + random.randint(3, 7) + 3) + SKILLS_dict['bDaggerPower'] # Bleeding Dagger damage calculation
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.