input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>10-100
import os, sys, json, pickle, io, time, random, copy
import h5py
import pprint
import threading, queue
from tqdm import tqdm
from collections import Counter
from transformers import MobileBertTokenizer
import cv2
from PIL import Image
import numpy as np
import revtok
import torch
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
from gen.constants import *
from gen.utils.py_util import remove_spaces_and_lower
from gen.utils.bb_util import bb_IoU
from models.config.configs import Config
from models.utils.vocab import Vocab
from models.nn.mrcnn import MaskRCNNDetector
from models.utils.bert_utils import get_bert_tknz, mmt_word_ids_to_bert_ids
lock = threading.Lock()
class AlfredPyTorchDataset(torch.utils.data.Dataset):
def __init__(self, alfred_data, split, task_type, args):
self.data = alfred_data
self.split = split
self.task_type = task_type
self.args = args
self.detector_type = args.detector_type
self.topk = args.topk_objs
self.max_length = args.max_enc_length
self.image_size = args.image_size
self.action_vocab = self.data.dec_in_vocab
self.level = task_type.split('_')[0]
self.low_data = task_type.split('_')[1] if self.level == 'low' else None
self.dataset = self.get_data_instances()
# if self.args.use_bert:
# with open('data/full_2.1.0_pp/language_%s.json'%self.split, 'r') as f:
# self.language_data = json.load(f)
# self.resnet18_feats = h5py.File('data/resnet18_feats.hdf5', 'r')
# with open('data/full_2.1.0_pp/img_map_%s.json'%split, 'r') as f:
# self.img_map = json.load(f)
def get_data_instances(self):
det_sp = os.path.join(self.data.pp_path, '%s_det_res_%s.json'%(self.split, self.args.detector_type))
with open(det_sp, 'rb') as f:
self.obj_det_res = json.load(f)
if self.level == 'high':
sp = os.path.join(self.data.pp_path, '%s_high_action_instances.json'%(self.split))
elif self.level == 'low':
sp = os.path.join(self.data.pp_path, '%s_low_action_instances_%s.json'%(self.split, self.low_data))
if self.args.train_one_shot and self.split == 'train':
sp = sp.replace('instances', 'seed')
if not os.path.exists(sp) or not os.path.exists(det_sp):
self.data.prepare_data_instances()
with open(sp, 'rb') as f:
self.dataset = json.load(f)
if self.split == 'train':
if self.args.train_one_shot:
total_len = 209331 if self.low_data == 'mani' else 983260
self.args.train_proportion = len(self.dataset) / total_len * 100
elif self.args.train_proportion != 100:
random.shuffle(self.dataset)
prop = int(len(self.dataset) * self.args.train_proportion / 100)+1
self.dataset = self.dataset[:prop]
# if self.args.low_data == 'navi':
# self.dataset_new = []
# add_list = []
# for idx, d in enumerate(self.dataset):
# if d['actype_output'] == 12:
# add_list.append(d)
# self.dataset_new.append(d)
# elif d['actype_output'] == 0 and random.random()>0.5:
# pass
# else:
# self.dataset_new.append(d)
# self.dataset = self.dataset_new + add_list
print('%s: %s action data instance: #%d'%(self.split, self.level, len(self)))
return self.dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# get a data instance
args = self.args
instance = self.dataset[idx]
# record the item in which data are transformed to PyTorch Tensors
item = {'batch_type': self.task_type}
item['interact'] = instance['interact'] if 'interact' in instance else 0
for i in ['path', 'high_idx', 'low_idx']:
if i in instance:
item[i] = instance[i]
# process the visual input
task_path = instance['path']
ins_obj_det_res = self.obj_det_res[task_path+str(instance['vision_input'])]
vis_len = args.topk_objs + 1
obj_num = min(vis_len - 1, len(ins_obj_det_res['score']))
vis_feat_col = np.zeros((vis_len, 7)) # vis_len x 7
vis_cls_col = np.zeros((vis_len, ), dtype=int) # vis_len
if not args.disable_feat_vis:
for obj_idx in range(obj_num):
bbox = [i/self.image_size for i in ins_obj_det_res['bbox'][obj_idx]]
cls_idx = self.action_vocab.w2id(ins_obj_det_res['class'][obj_idx])
vis_feat_col[obj_idx+1][:4] = bbox
vis_feat_col[obj_idx+1][4] = bbox[2]-bbox[0]
vis_feat_col[obj_idx+1][5] = bbox[3]-bbox[1]
vis_feat_col[obj_idx+1][6] = ins_obj_det_res['score'][obj_idx] # 1d
vis_cls_col[obj_idx + 1] = cls_idx
vis_cls_col[0] = 1
item['vision_feats'] = vis_feat_col.astype(np.float32) # np: vis_len x 7
item['vision_cls'] = vis_cls_col # np: vis_len
# history visual input
if self.args.enable_feat_vis_his and 'navi' in self.task_type:
his_len = args.history_max_length - 1
max_obj_num = 10
his_vis_feat = np.zeros((his_len, max_obj_num, 7), dtype=np.float32) # his_len x max_obj_num x 7
his_vis_cls = np.zeros((his_len, max_obj_num), dtype=int) # his_len x max_obj_num
for his_idx, img_idx in enumerate(instance['vis_history_input'][-his_len:]):
his_ins_obj_det_res = self.obj_det_res[task_path+str(img_idx)]
obj_num = min(max_obj_num, len(his_ins_obj_det_res['class']))
for obj_idx in range(obj_num):
bbox = [i/self.image_size for i in his_ins_obj_det_res['bbox'][obj_idx]]
cls_idx = self.action_vocab.w2id(his_ins_obj_det_res['class'][obj_idx])
his_vis_feat[his_idx][obj_idx] = bbox + [bbox[2]-bbox[0], bbox[3]-bbox[1], his_ins_obj_det_res['score'][obj_idx]]
his_vis_cls[his_idx][obj_idx] = cls_idx
item['his_vis_feat'] = his_vis_feat
item['his_vis_cls'] = his_vis_cls
# dp = os.path.join('../../moca', task_path.replace('full_2.1.0_pp', 'json_feat_2.1.0'), 'feat_conv.pt')
# img_idx = self.img_map[task_path][str(instance['vision_input'])]
# item['resnet18feat'] = self.resnet18_feats[dp][img_idx]
# process mask selection label
label = ins_obj_det_res['label'] # None or an obj index
if label is None or instance['arg_output'] == 105:
label = -1
elif isinstance(label, int):
label += 1
if label >= vis_len:
label = 0
elif item['interact']:
# leave the vis idx 0 as the indicator of not sure about how to ground
label = 0
item['mask_label'] = label # int
# process the language input
lang_len = args.lang_max_length
lang_widx = np.zeros((lang_len,), dtype=int) # lang_len
# if not self.args.use_bert:
# lang_widx[:2] = self.data.vocab.w2id('[SEP]')
if not args.disable_feat_lang:
wids = instance['lang_input']
if self.args.use_bert:
wids = mmt_word_ids_to_bert_ids(wids[1:-1], self.data.vocab, self.data.bert_tknz)
actual_len = min(lang_len, len(wids))
if actual_len != len(wids):
print('warning: %d truncated to %s'%(len(wids), lang_len))
lang_widx[:actual_len] = wids[:actual_len]
if not self.args.use_bert:
lang_widx[lang_widx >= self.data.vocab.vocab_size] = 0
item['lang_input'] = lang_widx #np: lang_len
# else:
# if self.level == 'high':
# item['lang_input'] = random.choice(self.language_data[task_path]['goal'])
# else:
# item['lang_input'] = random.choice(self.language_data[task_path]['instr'])[instance['high_idx']]
# process history actions
action_len = args.history_max_length * 2 #*2 because an action consists of a type and an arg
action_seq = np.zeros((action_len,), dtype=int) # max_his_len*2
if not args.disable_feat_action_his:
for aidx, a in enumerate(instance['actype_history_input'][-args.history_max_length:]):
action_seq[aidx*2] = a
action_seq[aidx*2+1] = instance['arg_history_input'][-args.history_max_length:][aidx]
item['action_history_input'] = action_seq #np: max_his_len*2
# action type/arg labels
item['actype_label'] = instance['actype_output'] #int
item['arg_label'] = instance['arg_output'] #int
if 'navi' in self.task_type:
item['arg_label'] = -1 # do not predict arguments for non-interactable low actions
# # process the sequence mask
seq_mask = np.zeros((args.max_enc_length,), dtype=int) + 1 # max_enc_length
offset = 1
for l, seq in [(vis_len, vis_cls_col), (lang_len, lang_widx) , (action_len, action_seq)]:
seq_mask[offset: offset+l] = (seq!=0).astype(int)
offset += l
assert offset == args.max_enc_length
item['seq_mask'] = seq_mask #np: max_enc_length
item['arg_pos'] = seq_mask.nonzero()[0][-1]
item['type_pos'] = item['arg_pos'] - 1
if 'navi' in self.task_type:
if self.args.auxiliary_loss_navi:
item['visible_label'] = instance['visible']
item['reached_label'] = instance['reached']
item['progress_label'] = instance['progress']
if self.args.enable_feat_posture:
item['rotation'] =int(instance['rotation'])
item['horizon'] = int(instance['horizon'])%12
return item
class AlfredDataset(object):
def __init__(self, args):
self.args = args
self.raw_path = args.raw_data
self.pp_path = args.pp_data # preprocessed data saving path
self.image_size = args.image_size
with open(self.args.splits) as f:
self.dataset_splits = json.load(f)
pprint.pprint({k: len(v) for k, v in self.dataset_splits.items()})
if not os.path.isdir(self.pp_path):
os.makedirs(self.pp_path)
# load/construct vocabularies
self.prepare_vocab()
# preprocess data
if args.preprocess:
if not args.skip_detection:
# load trajectory images recorded in the full dataset
self.image_hdf_path = args.img_data
self.image_data = h5py.File(self.image_hdf_path, 'r')
# self.image_data.visit(lambda x: print(x))
self.mrcnn = MaskRCNNDetector(args, ['all', 'sep'])
self.batch_size = 10
self.init_statistics()
self.preprocess_data()
self.prepare_data_instances()
self.save_statistics()
def init_statistics(self):
self.stats = {
'goal length': Counter(),
'instr length': Counter(),
'high action steps': Counter(),
'low action steps': Counter(),
'detection num': Counter(),
'object num': Counter(),
'receptacle num': Counter(),
}
self.interact_num = 0
self.good_detect_num = {'all':0, 'sep':0}
def save_statistics(self):
for k,v in self.stats.items():
if isinstance(v, dict):
self.stats[k] = dict(sorted(v.items(), key=lambda item: item[0]))
with open(os.path.join(self.pp_path, 'statistics.json'), 'w') as f:
json.dump(self.stats, f, indent=2)
print('interact_num:', int(self.interact_num/2))
print('good_detect_num:', self.good_detect_num)
def prepare_vocab(self):
# vocab save/load paths
self.language_vocab_save = os.path.join(self.pp_path, 'vocab')
self.dec_in_vocab_save = os.path.join(self.pp_path, 'dec_in_vocab')
self.dec_out_vocab_high_save = os.path.join(self.pp_path, 'dec_out_vocab_high')
self.dec_out_vocab_low_save = os.path.join(self.pp_path, 'dec_out_vocab_low')
self.dec_out_vocab_args_save = os.path.join(self.pp_path, 'dec_out_vocab_arg')
preprocess_vocab = not os.path.exists(self.language_vocab_save+'.w2id.json')
# preprocess_vocab= True
if preprocess_vocab:
# natural language vocabulary (word <-> idx for encoder)
self.vocab = Vocab(self.args.vocab_size, special_tokens=[PAD, UNK, SEP, SOS, 'None'])
print('Constructing vocabulary for natural language')
for k, d in self.dataset_splits.items():
if 'test' in k:
continue # should not see test sets even for vocabulary construction
print(' - dataset: {}'.format(k))
for task in tqdm(d):
# load json file
json_path = os.path.join(self.args.raw_data, k, task['task'], 'traj_data.json')
with open(json_path) as f:
traj_raw = json.load(f)
self.process_language(traj_raw, {}, 0, for_vocab_construction=True)
# save vocab in data path
self.vocab.construct(self.language_vocab_save)
print('Constructing vocabularies for encoder/decoder actions and objects')
# decoder input (action/object names <-> idx)
task_tokens = [PAD, UNK, SOS]+list(ACTION_TO_WORDS.keys())
task_tokens += ALL_OBJECTS+['None']
self.dec_in_vocab = Vocab(special_tokens=task_tokens)
self.dec_in_vocab.construct(self.dec_in_vocab_save)
# high-level decoder action output (high actions <-> idx)
self.dec_out_vocab_high = Vocab(special_tokens=HIGH_ACTIONS)
self.dec_out_vocab_high.construct(self.dec_out_vocab_high_save)
# low-level decoder action output (high actions <-> idx)
self.dec_out_vocab_low = Vocab(special_tokens=LOW_ACTIONS)
self.dec_out_vocab_low.construct(self.dec_out_vocab_low_save)
# decoder arguments output (object names <-> idx)
self.dec_out_vocab_arg = Vocab(special_tokens=ACTION_ARGS)
self.dec_out_vocab_arg.construct(self.dec_out_vocab_args_save)
else:
print('Loading vocabularies')
self.vocab = Vocab()
self.vocab.load(self.language_vocab_save, self.args.vocab_size)
self.dec_in_vocab = Vocab()
self.dec_in_vocab.load(self.dec_in_vocab_save)
self.dec_out_vocab_high = Vocab()
self.dec_out_vocab_high.load(self.dec_out_vocab_high_save)
self.dec_out_vocab_low = Vocab()
self.dec_out_vocab_low.load(self.dec_out_vocab_low_save)
self.dec_out_vocab_arg = Vocab()
self.dec_out_vocab_arg.load(self.dec_out_vocab_args_save)
if self.args.use_bert:
self.bert_tknz = get_bert_tknz(self.args)
def preprocess_data(self):
'''
saves preprocessed data as jsons in specified folder
'''
if self.args.num_threads in [0,1]:
for k, d in self.dataset_splits.items():
print('Preprocessing {}'.format(k))
# debugging:
if self.args.fast_epoch:
d = d[:10]
for task in tqdm(d):
self.preprocess_traj(k, task)
else:
task_queue = queue.Queue()
for k, d in self.dataset_splits.items():
if 'tests' in k:
continue
if self.args.fast_epoch:
d = d[:30]
for task in d:
task_queue.put((k, task))
pbar = tqdm(total=task_queue.qsize())
# start threads
threads = []
for n in range(self.args.num_threads):
thread = threading.Thread(target=run, args=(self.preprocess_traj, task_queue, pbar))
threads.append(thread)
thread.start()
for t in threads:
t.join()
def preprocess_traj(self, k, task):
train_mode = 'test' not in k
# load json file
json_path = os.path.join(self.args.raw_data, k, task['task'], 'traj_data.json')
| |
Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealDoubleLongInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealDoubleLongInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealDoubleLongInt_AM(self, nilp, prod)
parMatrixSparseRealDoubleLongInt_swigregister = _smg2s.parMatrixSparseRealDoubleLongInt_swigregister
parMatrixSparseRealDoubleLongInt_swigregister(parMatrixSparseRealDoubleLongInt)
def smg2sRealDoubleLongInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealDoubleLongInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealDoubleLongInt = _smg2s.smg2sRealDoubleLongInt
class parMatrixSparseRealSingleInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseRealSingleInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseRealSingleInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_CSR_lloc_get, _smg2s.parMatrixSparseRealSingleInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_CSR_gloc_get, _smg2s.parMatrixSparseRealSingleInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_CSR_loc_get, _smg2s.parMatrixSparseRealSingleInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseRealSingleInt_dynmat_loc_get, _smg2s.parMatrixSparseRealSingleInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseRealSingleInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseRealSingleInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseRealSingleInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseRealSingleInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseRealSingleInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseRealSingleInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseRealSingleInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseRealSingleInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseRealSingleInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseRealSingleInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseRealSingleInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealSingleInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleInt_AM(self, nilp, prod)
parMatrixSparseRealSingleInt_swigregister = _smg2s.parMatrixSparseRealSingleInt_swigregister
parMatrixSparseRealSingleInt_swigregister(parMatrixSparseRealSingleInt)
def smg2sRealSingleInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealSingleInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealSingleInt = _smg2s.smg2sRealSingleInt
class parMatrixSparseRealSingleLongInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseRealSingleLongInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseRealSingleLongInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_get, _smg2s.parMatrixSparseRealSingleLongInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_get, _smg2s.parMatrixSparseRealSingleLongInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_get, _smg2s.parMatrixSparseRealSingleLongInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_get, _smg2s.parMatrixSparseRealSingleLongInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseRealSingleLongInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseRealSingleLongInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleLongInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseRealSingleLongInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleLongInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleLongInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseRealSingleLongInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseRealSingleLongInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseRealSingleLongInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleLongInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleLongInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleLongInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseRealSingleLongInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleLongInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseRealSingleLongInt_AM(self, nilp, prod)
parMatrixSparseRealSingleLongInt_swigregister = _smg2s.parMatrixSparseRealSingleLongInt_swigregister
parMatrixSparseRealSingleLongInt_swigregister(parMatrixSparseRealSingleLongInt)
def smg2sRealSingleLongInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sRealSingleLongInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sRealSingleLongInt = _smg2s.smg2sRealSingleLongInt
class parMatrixSparseComplexDoubleInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseComplexDoubleInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseComplexDoubleInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_get, _smg2s.parMatrixSparseComplexDoubleInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_get
if _newclass:
CSR_gloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_get, _smg2s.parMatrixSparseComplexDoubleInt_CSR_gloc_set)
__swig_setmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_set
__swig_getmethods__["CSR_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_get
if _newclass:
CSR_loc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_get, _smg2s.parMatrixSparseComplexDoubleInt_CSR_loc_set)
__swig_setmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_set
__swig_getmethods__["dynmat_loc"] = _smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_get
if _newclass:
dynmat_loc = _swig_property(_smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_get, _smg2s.parMatrixSparseComplexDoubleInt_dynmat_loc_set)
def __init__(self, *args):
this = _smg2s.new_parMatrixSparseComplexDoubleInt(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _smg2s.delete_parMatrixSparseComplexDoubleInt
__del__ = lambda self: None
def GetXMap(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetXMap(self)
def GetYMap(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetYMap(self)
def GetComm(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetComm(self)
def GetXLowerBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetXLowerBound(self)
def GetYLowerBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetYLowerBound(self)
def GetXUpperBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetXUpperBound(self)
def GetYUpperBound(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetYUpperBound(self)
def GetTrueLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexDoubleInt_GetTrueLocalSize(self, rs, cs)
def GetLocalSize(self, rs, cs):
return _smg2s.parMatrixSparseComplexDoubleInt_GetLocalSize(self, rs, cs)
def GetDynMatGLobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetDynMatGLobLoc(self)
def GetDynMatGlobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetDynMatGlobLoc(self)
def GetDynMatLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetDynMatLoc(self)
def GetCSRLocLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetCSRLocLoc(self)
def GetCSRGlobLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_GetCSRGlobLoc(self)
def AddValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_AddValueLocal(self, row, col, value)
def AddValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleInt_AddValuesLocal(self, nindex, rows, cols, values)
def AddValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_AddValue(self, row, col, value)
def SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_SetValueLocal(self, row, col, value)
def SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleInt_SetValuesLocal(self, nindex, rows, cols, values)
def SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_SetValue(self, row, col, value)
def GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_GetLocalValue(self, row, col)
def GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_GetValue(self, row, col)
def glocPlusLloc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_glocPlusLloc(self)
def llocToGlocLoc(self):
return _smg2s.parMatrixSparseComplexDoubleInt_llocToGlocLoc(self)
def MatView(self):
return _smg2s.parMatrixSparseComplexDoubleInt_MatView(self)
def LOC_MatView(self):
return _smg2s.parMatrixSparseComplexDoubleInt_LOC_MatView(self)
def Loc_SetValueLocal(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetValueLocal(self, row, col, value)
def Loc_SetValuesLocal(self, nindex, rows, cols, values):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetValuesLocal(self, nindex, rows, cols, values)
def Loc_SetValue(self, row, col, value):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetValue(self, row, col, value)
def Loc_GetLocalValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_GetLocalValue(self, row, col)
def Loc_GetValue(self, row, col):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_GetValue(self, row, col)
def SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexDoubleInt_SetDiagonal(self, diag)
def Loc_SetDiagonal(self, diag):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_SetDiagonal(self, diag)
def MatScale(self, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_MatScale(self, scale)
def Loc_MatScale(self, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_MatScale(self, scale)
def Loc_MatAXPY(self, X, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_MatAXPY(self, X, scale)
def Loc_MatAYPX(self, X, scale):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_MatAYPX(self, X, scale)
def ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexDoubleInt_ConvertToCSR(self)
def Loc_ConvertToCSR(self):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_ConvertToCSR(self)
def ZeroEntries(self):
return _smg2s.parMatrixSparseComplexDoubleInt_ZeroEntries(self)
def Loc_ZeroEntries(self):
return _smg2s.parMatrixSparseComplexDoubleInt_Loc_ZeroEntries(self)
def MA(self, nilp, prod):
return _smg2s.parMatrixSparseComplexDoubleInt_MA(self, nilp, prod)
def AM(self, nilp, prod):
return _smg2s.parMatrixSparseComplexDoubleInt_AM(self, nilp, prod)
parMatrixSparseComplexDoubleInt_swigregister = _smg2s.parMatrixSparseComplexDoubleInt_swigregister
parMatrixSparseComplexDoubleInt_swigregister(parMatrixSparseComplexDoubleInt)
def smg2sComplexDoubleInt(probSize, nilp, lbandwidth, spectrum, comm):
return _smg2s.smg2sComplexDoubleInt(probSize, nilp, lbandwidth, spectrum, comm)
smg2sComplexDoubleInt = _smg2s.smg2sComplexDoubleInt
class parMatrixSparseComplexDoubleLongInt(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, parMatrixSparseComplexDoubleLongInt, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, parMatrixSparseComplexDoubleLongInt, name)
__repr__ = _swig_repr
__swig_setmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_set
__swig_getmethods__["CSR_lloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_get
if _newclass:
CSR_lloc = _swig_property(_smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_get, _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_lloc_set)
__swig_setmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_gloc_set
__swig_getmethods__["CSR_gloc"] = _smg2s.parMatrixSparseComplexDoubleLongInt_CSR_gloc_get
if _newclass:
CSR_gloc = | |
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BlueNet: and out of the blue network to experiment with shake-shake."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
# var: 1d tensor, raw weights for each choice
# tempered_var: raw weights with temperature applied
# inv_t: inverse of the temperature to use when normalizing `var`
# normalized: same shape as var, but where each item is between 0 and 1, and
# the sum is 1
SelectionWeights = collections.namedtuple(
"SelectionWeights", ["var", "tempered_var", "inv_t", "normalized"])
def create_selection_weights(name,
type_,
shape,
inv_t=1,
initializer=tf.zeros_initializer(),
regularizer=None,
names=None):
"""Create a SelectionWeights tuple.
Args:
name: Name for the underlying variable containing the unnormalized weights.
type_: "softmax" or "sigmoid" or ("softmax_topk", k) where k is an int.
shape: Shape for the variable.
inv_t: Inverse of the temperature to use in normalization.
initializer: Initializer for the variable, passed to `tf.get_variable`.
regularizer: Regularizer for the variable. A callable which accepts
`tempered_var` and `normalized`.
names: Name of each selection.
Returns:
The created SelectionWeights tuple.
Raises:
ValueError: if type_ is not in the supported range.
"""
var = tf.get_variable(name, shape, initializer=initializer)
if callable(inv_t):
inv_t = inv_t(var)
if inv_t == 1:
tempered_var = var
else:
tempered_var = var * inv_t
if type_ == "softmax":
weights = tf.nn.softmax(tempered_var)
elif type_ == "sigmoid":
weights = tf.nn.sigmoid(tempered_var)
elif isinstance(type_, (list, tuple)) and type_[0] == "softmax_topk":
assert len(shape) == 1
# TODO(rshin): Change this to select without replacement?
selection = tf.multinomial(tf.expand_dims(var, axis=0), 4)
selection = tf.squeeze(selection, axis=0) # [k] selected classes.
to_run = tf.one_hot(selection, shape[0]) # [k x nmodules] one-hot.
# [nmodules], 0=not run, 1=run.
to_run = tf.minimum(tf.reduce_sum(to_run, axis=0), 1)
weights = tf.nn.softmax(tempered_var - 1e9 * (1.0 - to_run))
else:
raise ValueError("Unknown type: %s" % type_)
if regularizer is not None:
loss = regularizer(tempered_var, weights)
if loss is not None:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss)
if names is not None:
tf.get_collection_ref("selection_weight_names/" + var.name).extend(
names.flatten() if isinstance(names, np.ndarray) else names)
tf.add_to_collection("selection_weight_names_tensor/" + var.name,
tf.constant(names))
return SelectionWeights(
var=var, tempered_var=tempered_var, inv_t=inv_t, normalized=weights)
def kernel_premultiplier(max_kernel_size, kernel_sizes, input_channels,
kernel_selection_weights, channel_selection_weights):
"""Get weights to multiply the kernel with, before convolving.
Args:
max_kernel_size: (int, int) tuple giving the largest kernel size.
kernel_sizes: A list of (height, width) pairs of integers, containing
different kernel sizes to use.
input_channels: A list of (begin, end) pairs of integers, which describe
which channels in the input to use.
kernel_selection_weights: SelectionWeights object to use for choosing
among kernel sizes.
channel_selection_weights: SelectionWeights object to use for choosing
among which input channels to use.
Returns:
The multiplier.
"""
kernel_weights = []
for kernel_i, (h, w) in enumerate(kernel_sizes):
top = (max_kernel_size[0] - h) // 2
bot = max_kernel_size[0] - h - top
left = (max_kernel_size[1] - w) // 2
right = max_kernel_size[1] - w - left
kernel_weight = tf.fill((h, w),
kernel_selection_weights.normalized[kernel_i])
if top != 0 or bot != 0 or left != 0 or right != 0:
kernel_weight = tf.pad(kernel_weight, [[top, bot], [left, right]])
kernel_weights.append(kernel_weight)
kernel_weight = tf.add_n(kernel_weights)
channel_weights = []
min_channel = np.min(input_channels)
max_channel = np.max(input_channels)
for channel_i, (begin, end) in enumerate(input_channels):
channel_weight = tf.pad(
tf.fill((end - begin,),
channel_selection_weights.normalized[channel_i]),
[[begin - min_channel, max_channel - end]])
channel_weights.append(channel_weight)
channel_weight = tf.add_n(channel_weights)
multiplier = (tf.reshape(kernel_weight, max_kernel_size +
(1, 1)) * tf.reshape(channel_weight, (1, 1, -1, 1)))
return multiplier
def make_subseparable_kernel(kernel_size, input_channels, filters, separability,
kernel_initializer, kernel_regularizer):
"""Make a kernel to do subseparable convolution wiht `tf.nn.conv2d`.
Args:
kernel_size: (height, width) tuple.
input_channels: Number of input channels.
filters: Number of output channels.
separability: Integer denoting separability.
kernel_initializer: Initializer to use for the kernel.
kernel_regularizer: Regularizer to use for the kernel.
Returns:
A 4D tensor.
"""
if separability == 1:
# Non-separable convolution
return tf.get_variable(
"kernel",
kernel_size + (input_channels, filters),
initializer=kernel_initializer,
regularizer=kernel_regularizer)
elif separability == 0 or separability == -1:
# Separable convolution
# TODO(rshin): Check initialization is as expected, as these are not 4D.
depthwise_kernel = tf.get_variable(
"depthwise_kernel",
kernel_size + (input_channels,),
initializer=kernel_initializer,
regularizer=kernel_regularizer)
pointwise_kernel = tf.get_variable(
"pointwise_kernel", (input_channels, filters),
initializer=kernel_initializer,
regularizer=kernel_regularizer)
expanded_depthwise_kernel = tf.transpose(
tf.scatter_nd(
indices=tf.tile(
tf.expand_dims(tf.range(0, input_channels), axis=1), [1, 2]),
updates=tf.transpose(depthwise_kernel, (2, 0, 1)),
shape=(input_channels, input_channels) + kernel_size), (2, 3, 0, 1))
return tf.reshape(
tf.matmul(
tf.reshape(expanded_depthwise_kernel, (-1, input_channels)),
pointwise_kernel), kernel_size + (input_channels, filters))
elif separability >= 2:
assert filters % separability == 0, (filters, separability)
assert input_channels % separability == 0, (filters, separability)
raise NotImplementedError
elif separability <= -2:
separability *= -1
assert filters % separability == 0, (filters, separability)
assert input_channels % separability == 0, (filters, separability)
raise NotImplementedError
def multi_subseparable_conv(inputs,
filters,
kernel_sizes,
input_channels,
separabilities,
kernel_selection_weights=None,
channel_selection_weights=None,
separability_selection_weights=None,
kernel_selection_weights_params=None,
channel_selection_weights_params=None,
separability_selection_weights_params=None,
kernel_initializer=None,
kernel_regularizer=None,
scope=None):
"""Simultaneously compute different kinds of convolutions on subsets of input.
Args:
inputs: 4D tensor containing the input, in NHWC format.
filters: Integer, number of output channels.
kernel_sizes: A list of (height, width) pairs of integers, containing
different kernel sizes to use.
input_channels: A list of (begin, end) pairs of integers, which describe
which channels in the input to use.
separabilities: An integer or a list, how separable are the convolutions.
kernel_selection_weights: SelectionWeights object to use for choosing
among kernel sizes.
channel_selection_weights: SelectionWeights object to use for choosing
among which input channels to use.
separability_selection_weights: SelectionWeights object to use for choosing
separability.
kernel_selection_weights_params: dict with up to three keys
- initializer
- regularizer
- inv_t
channel_selection_weights_params: dict with up to three keys
- initializer
- regularizer
- inv_t
separability_selection_weights_params: dict with up to three keys
- initializer
- regularizer
- inv_t
kernel_initializer: Initializer to use for kernels.
kernel_regularizer: Regularizer to use for kernels.
scope: the scope to use.
Returns:
Result of convolution.
"""
kernel_selection_weights_params = kernel_selection_weights_params or {}
channel_selection_weights_params = channel_selection_weights_params or {}
if separability_selection_weights_params is None:
separability_selection_weights_params = {}
# Get input image size.
input_shape = inputs.get_shape().as_list()
assert len(input_shape) == 4
in_channels = input_shape[3]
assert in_channels is not None
max_kernel_size = tuple(np.max(kernel_sizes, axis=0))
max_num_channels = np.max(input_channels) - np.min(input_channels)
with tf.variable_scope(scope or "selection_weights"):
if kernel_selection_weights is None:
kernel_selection_weights = create_selection_weights(
"kernels",
"softmax", (len(kernel_sizes),),
names=["kernel_h{}_w{}".format(h, w) for h, w in kernel_sizes],
**kernel_selection_weights_params)
if channel_selection_weights is None:
channel_selection_weights = create_selection_weights(
"channels",
"softmax", (len(input_channels),),
names=["channels_{}_{}".format(c1, c2) for c1, c2 in input_channels],
**channel_selection_weights_params)
if separability_selection_weights is None:
separability_selection_weights = create_selection_weights(
"separability",
"softmax", (len(separabilities),),
names=["separability_{}".format(s) for s in separabilities],
**separability_selection_weights_params)
kernels = []
for separability in separabilities:
with tf.variable_scope("separablity_{}".format(separability)):
kernel = make_subseparable_kernel(max_kernel_size, max_num_channels,
filters, separability,
kernel_initializer, kernel_regularizer)
premultiplier = kernel_premultiplier(
max_kernel_size, kernel_sizes, input_channels,
kernel_selection_weights, channel_selection_weights)
kernels.append(kernel * premultiplier)
kernel = tf.add_n([
separability_selection_weights.normalized[i] * k
for i, k in enumerate(kernels)
])
if np.min(input_channels) != 0 or np.max(input_channels) != in_channels:
inputs = inputs[:, :, :, np.min(input_channels):np.max(input_channels)]
return tf.nn.conv2d(
inputs,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
def conv_module(kw, kh, sep, div):
def convfn(x, hparams):
return common_layers.subseparable_conv(
x,
hparams.hidden_size // div, (kw, kh),
padding="SAME",
separability=sep,
name="conv_%d%d_sep%d_div%d" % (kw, kh, sep, div))
return convfn
def multi_conv_module(kernel_sizes, seps):
def convfn(x, hparams):
return multi_subseparable_conv(x, hparams.hidden_size, kernel_sizes,
[(0, hparams.hidden_size)], seps)
return convfn
def layernorm_module(x, hparams):
return common_layers.layer_norm(x, hparams.hidden_size, name="layer_norm")
def noamnorm_module(x, hparams):
del hparams # Unused.
return common_layers.noam_norm(x)
def identity_module(x, hparams):
del hparams # Unused.
return x
def first_binary_module(x, y, hparams):
del y, hparams # Unused.
return x
def second_binary_module(x, y, hparams):
del x, hparams # Unused.
return y
def sum_binary_module(x, y, hparams):
del hparams # Unused.
return x + y
def shakeshake_binary_module(x, y, hparams):
del hparams # Unused.
return common_layers.shakeshake2(x, y)
def run_binary_modules(modules, cur1, cur2, hparams):
"""Run binary modules."""
selection_weights = create_selection_weights(
"selection",
"softmax",
shape=[len(modules)],
inv_t=100.0 * common_layers.inverse_exp_decay(
hparams.anneal_until, min_value=0.01))
all_res = [modules[n](cur1, cur2, hparams) for n in xrange(len(modules))]
all_res = tf.concat([tf.expand_dims(r, axis=0) | |
# Natural Language Toolkit (NLTK) MUC Corpus Reader
#
# Copyright (C) 2001-2011 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>> (original IEER Corpus Reader)
# <NAME> <<EMAIL>> (original IEER Corpus
# Reader)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Adapted from nltk.corpus.reader.ieer.IEERCorpusReader
import re
import codecs
from itertools import chain
from nltk import Tree
from nltk.util import LazyMap, LazyConcatenation
from nltk.tokenize.treebank import TreebankWordTokenizer
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.util import concat, StreamBackedCorpusView
muc6_titles = {
'891102-0189.ne.v1.3.sgm':'',
'891102-0189.co.v2.0.sgm':'',
'891101-0050.ne.v1.3.sgm':'',
}
muc6_documents = sorted(muc6_titles)
muc7_titles = {
'dryrun01.muc7':'',
'dryrun02.muc7':'',
'dryrun03.muc7':'',
}
muc7_documents = sorted(muc7_titles)
_MUC_CHUNK_TYPES = [
'DATE',
'IDENT',
'LOCATION',
'MONEY',
'ORGANIZATION',
'PERCENT',
'PERSON',
'TIME'
]
_MUC6_DOC_RE = re.compile(
r'\s*<DOC>\s*'
r"""
(\s*(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>|
<CODER>\s*.+?\s*</CODER>|
<DD>\s*.+?\s*</DD>|
<AN>\s*.+?\s*</AN>|
<HL>\s*(?P<headline>.+?)\s*</HL>|
<SO>\s*.+?\s*</SO>|
<CO>\s*.+?\s*</CO>|
<IN>\s*.+?\s*</IN>|
<GV>\s*.+?\s*</GV>|
<DATELINE>\s*(?P<dateline>.+?)\s*</DATELINE>)\s*)*
"""
r'<TXT>\s*(?P<text>(<p>\s*(<s>\s*.+?\s*</s>)+\s*</p>)+)\s*</TXT>\s*'
r'</DOC>\s*', re.DOTALL | re.I | re.VERBOSE)
_MUC6_PARA_RE = re.compile('(<p>\s*(?P<para>.+?)\s*</p>?)+', re.DOTALL | re.I)
_MUC6_SENT_RE = re.compile('(<s>\s*(?P<sent>.+?)\s*</s>)+', re.DOTALL | re.I)
_MUC7_DOC_RE = re.compile(
r'\s*<DOC>\s*'
r"""
(\s*(<DOCID>\s*(?P<docid>.+?)\s*</DOCID>|
<STORYID\s+[^>]*?>\s*.+?\s*</STORYID>|
<SLUG\s+[^>]*?>\s*.+?\s*</SLUG>|
<DATE>\s*(?P<date>.+?)\s*</DATE>|
<NWORDS>\s*.+?\s*</NWORDS>|
<PREAMBLE>\s*.+?\s*</PREAMBLE>)\s*)*
"""
r'<TEXT>\s*(?P<text>.+?)\s*</TEXT>\s*'
r'(<TRAILER>\s*(?P<trailer>.+?)\s*</TRAILER>\s*)?'
r'</DOC>\s*', re.DOTALL | re.I | re.VERBOSE)
_MUC7_PARA_RE = re.compile(r'\s*<p>\s*.+?\s*(<p>\s*.+?\s*?)*\s*', re.DOTALL | re.I)
_MUC7_PARA_SPLIT_RE = re.compile(r'\s*<p>\s*', re.DOTALL | re.I)
_MUC_NE_B_RE = re.compile('<(ENAMEX|NUMEX|TIMEX)\s+[^>]*?TYPE="(?P<type>\w+)"', re.DOTALL | re.I)
_MUC_NE_E_RE = re.compile('</(ENAMEX|NUMEX|TIMEX)>', re.DOTALL | re.I)
_MUC_CO_B_RE = re.compile('<COREF\s+[^>]*?ID="(?P<id>\w+)"(\s+TYPE="(?P<type>\w+)")?(\s+REF="(?P<ref>\w+)")?', re.DOTALL | re.I)
_MUC_CO_E_RE = re.compile('</COREF>', re.DOTALL | re.I)
_WORD_TOKENIZER = TreebankWordTokenizer()
_SENT_TOKENIZER = PunktSentenceTokenizer()
class MUCDocument:
# def __init__(self, text, docno=None, dateline=None, headline=''):
def __init__(self, **text):
self.text = None
if isinstance(text, basestring):
self.text = text
elif isinstance(text, dict):
for key, val in text.items():
setattr(self, key, val)
else:
raise
assert self.text
def __repr__(self):
if self.headline:
headline = ' '.join(self.headline.leaves())
else:
headline = ' '.join([w for w in self.text.leaves()
if w[:1] != '<'][:11])+'...'
if self.docno is not None:
return '<MUCDocument %s: %r>' % (self.docno, headline)
else:
return '<MUCDocument: %r>' % headline
class MUCCorpusReader(CorpusReader):
"""
A corpus reader for MUC SGML files. Each file begins with a preamble
of SGML-tagged metadata. The document text follows. The text of the
document is contained in <TXT> tags for MUC6 and <TEXT> tags for MUC7.
Paragraphs are contained in <p> tags in both corpus formats. Sentences are
contained in <s> tags in MUC6 only. For MUC7 corpus files L{sents()},
L{chunked_sents()}, and L{iob_sents()} return sentences via tokenizing
with C{PunktSentenceTokenizer}.
Additionally named entities and coreference mentions may be marked within
the document text and document metadata. The MUC6 corpus provides
named entity and coreference annotations in two separate sets of files.
The MUC7 corpus contains coreference annotations only. Only one kind of
metadata will be returned depending on which kind of file is being read.
Named entities are tagged as ENAMEX (name expressions), NUMEX
(number expressions), or TIMEX (time expressions), all of which include
TYPE attributes.
Coreference mentions are tagged as COREF and include ID, TYPE, REF, and
MIN attributes. ID is used to give each coreference mention a unique
numeric idenitifier. REF indicates the ID of the intended referent of the
coreference mention and is not required for first mentions. MIN contains
the minimum coreferential string of the coreference mention.
"""
def raw(self, fileids=None):
"""
@return: A list of corpus file contents.
@rtype: C{list} of C{str}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, basestring):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def docs(self, fileids=None):
"""
@return: A list of corpus document strings.
@rtype: C{list} of C{StreamBackedCorpusView}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
"""
return concat([StreamBackedCorpusView(fileid,
self._read_block,
encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def parsed_docs(self, fileids=None):
"""
@return: A list of parsed corpus documents.
@rtype: C{list} of C{StreamBackedCorpusView}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
"""
return concat([StreamBackedCorpusView(fileid,
self._read_parsed_block,
encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None, **kwargs):
"""
@return: A list of paragraphs.
@rtype: C{list} of C{list} of C{list} of C{str}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression.
"""
def __para(para):
return [sent.leaves() for sent in list(para)]
return LazyMap(__para, self._paras(fileids))
def sents(self, fileids=None):
"""
@return: A list of sentences.
@rtype: C{list} of C{list} of C{str}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
"""
return LazyConcatenation(self.paras(fileids))
def chunked_sents(self, fileids=None, **kwargs):
"""
@return: A list of sentence chunks as tuples of string/tag pairs.
@rtype: C{list} of C{list} of C{tuple}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
@kwparam depth: Depth of chunk parsing for nested chunks.
@type depth: C{int}
"""
def __chunked_sent(sent):
chunks = []
# Map each sentence subtree into a tuple.
for token in map(tree2tuple, sent):
# If the token's contents is a list of chunk pieces, append it
# as a list of word/tag pairs.
if isinstance(token[0], list):
chunks.append([(word, None) for word in token[0]])
# If the token's contents is a string, append it as a
# word/tag tuple.
elif isinstance(token[0], basestring):
chunks.append((token[0], None))
# Something bad happened.
else:
raise
return chunks
depth = kwargs.get('depth', 0)
sents = self._chunked_sents(self._sents(fileids, **kwargs), depth)
return LazyMap(__chunked_sent, sents)
def iob_sents(self, fileids=None, **kwargs):
"""
@return: A list of sentences as iob word/iob/other tag pairs.
@rtype: C{list} of C{list} of C{tuple}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
@kwparam depth: Depth of chunk parsing for nested chunks.
@type depth: C{int}
"""
def __iob_sent(sent):
chunks = []
# Map each sentence subtree into a tuple.
for token in map(tree2tuple, sent):
# If the token has a chunk type, parse the token contents.
if token[1] is not None:
for index, word in enumerate(token[0]):
# The first word in a chunk B-egins the chunk.
if index == 0:
chunks.append((word, 'B-%s' % token[1:2]) + token[2:])
# All other words in a chunk are I-n the chunk.
else:
chunks.append((word, 'I-%s' % token[1:2]) + token[2:])
# If the token doesn't have a chunk type, it's O-ut.
else:
chunks.append((token[0], 'O'))
return chunks
depth = kwargs.get('depth', 0)
sents = self._chunked_sents(self._sents(fileids), depth)
return LazyMap(__iob_sent, sents)
def words(self, fileids=None):
"""
@return: A list of words.
@rtype: C{list} of C{str}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
@kwparam depth: Depth of chunk parsing for nested chunks.
@type depth: C{int}
"""
# Concatenate the list of lists given by sents().
return LazyConcatenation(self.sents(fileids))
def iob_words(self, fileids=None, **kwargs):
"""
@return: A list of word/iob/other tag tuples.
@rtype: C{list} of C{tuple}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
@kwparam depth: Depth of chunk parsing for nested chunks.
@type depth: C{int}
"""
# Concatenate the list of lists given by iob_sents().
return LazyConcatenation(self.iob_sents(fileids, **kwargs))
def chunks(self, fileids=None, **kwargs):
"""
@return: A list of chunked sents where chunks are multi-word strings.
@rtype: C{list} of C{list} of C{str}
@param fileids: A list of corpus files.
@type fileids: C{list} of C{str} or regular expression
@kwparam depth: Depth of chunk parsing for nested chunks.
@type depth: C{int}
@kwparam concat: Concatenate sentence lists into one list; works like
itertools.chain()
@type concat: C{bool}
"""
def __chunks(sent):
chunks = []
for token in sent:
# If the token is a list of chunk pieces, append the piece's
# contents as a string.
if isinstance(token, list):
# TODO: Better if able to reverse Treebank-style
# tokenization. The join leaves some weird whitespace.
chunks.append(' '.join([word[0] for word in token]))
# If the token is a tuple, append the token's contents.
elif isinstance(token, tuple):
chunks.append(token[0])
# Something bad happened.
else:
raise
return chunks
sents = self.chunked_sents(fileids, **kwargs)
# Concatenate the lists.
if kwargs.get('concat'):
return LazyConcatenation(LazyMap(__chunks, sents))
# Or not.
else:
return LazyMap(__chunks, sents)
def mentions(self, fileids=None, **kwargs):
"""
| |
max_abs_scaler(input_cols, output_cols=None):
pass
def iqr(self, columns, more=None, relative_error=RELATIVE_ERROR):
"""
Return the column Inter Quartile Range
:param columns:
:param more: Return info about q1 and q3
:param relative_error:
:return:
"""
df = self.root
iqr_result = {}
columns = parse_columns(df, columns)
quartile = df.cols.percentile(columns, [0.25, 0.5, 0.75], relative_error=relative_error, tidy=False)[
"percentile"]
# print("quantile",quartile)
for col_name in columns:
if is_null(quartile[col_name]):
iqr_result[col_name] = np.nan
else:
q1 = quartile[col_name][0.25]
q2 = quartile[col_name][0.5]
q3 = quartile[col_name][0.75]
iqr_value = q3 - q1
if more:
result = {"iqr": iqr_value, "q1": q1, "q2": q2, "q3": q3}
else:
result = iqr_value
iqr_result[col_name] = result
return format_dict(iqr_result)
@staticmethod
@abstractmethod
def nest(input_cols, separator="", output_col=None, drop=False, shape="string"):
pass
def unnest(self, input_cols, separator=None, splits=2, index=None, output_cols=None, drop=False, mode="string"):
"""
Split an array or string in different columns
:param input_cols: Columns to be un-nested
:param output_cols: Resulted on or multiple columns after the unnest operation [(output_col_1_1,output_col_1_2),
(output_col_2_1, output_col_2]
:param separator: char or regex
:param splits: Number of columns splits.
:param index: Return a specific index per columns. [1,2]
:param drop:
:param mode:
"""
df = self.root
if separator is not None:
separator = re.escape(separator)
input_cols = parse_columns(df, input_cols)
index = val_to_list(index)
output_ordered_columns = df.cols.names()
dfd = df.data
for idx, input_col in enumerate(input_cols):
if is_list_of_tuples(index):
final_index = index[idx]
else:
final_index = index
if output_cols is None:
final_columns = [input_col + "_" + str(i) for i in range(splits)]
elif is_list_of_tuples(output_cols):
final_columns = output_cols[idx]
elif is_list_value(output_cols):
final_columns = output_cols
else:
final_columns = [output_cols + "_" + str(i) for i in range(splits)]
if mode == "string":
dfd_new = dfd[input_col].astype(str).str.split(separator, expand=True, n=splits - 1)
elif mode == "array":
if is_dask_dataframe(dfd):
def func(value):
pdf = value.apply(pd.Series)
pdf.columns = final_columns
return pdf
dfd_new = dfd[input_col].map_partitions(func, meta={c: object for c in final_columns})
else:
dfd_new = dfd[input_col].apply(pd.Series)
# If columns split is shorter than the number of splits
dfd_new.columns = final_columns[:len(dfd_new.columns)]
df_new = df.new(dfd_new)
if final_index:
df_new = df_new.cols.select(final_index[idx])
df = df.cols.append([df_new])
df.meta = Meta.action(df.meta, Actions.UNNEST.value, final_columns)
df = df.cols.move(df_new.cols.names(), "after", input_cols)
if drop is True:
if output_cols is not None:
columns = [col for col in input_cols if col not in output_cols]
else:
columns = input_cols
df = df.cols.drop(columns)
return df
@staticmethod
@abstractmethod
def scatter(columns, buckets=10):
pass
def hist(self, columns="*", buckets=20, compute=True):
df = self.root
columns = parse_columns(df, columns)
@self.F.delayed
def _bins_col(_columns, _min, _max):
return {col_name: list(np.linspace(float(_min["min"][col_name]), float(_max["max"][col_name]), num=buckets))
for
col_name in _columns}
_min = df.cols.min(columns, compute=False, tidy=False)
_max = df.cols.max(columns, compute=False, tidy=False)
_bins = _bins_col(columns, _min, _max)
@self.F.delayed
def _hist(pdf, col_name, _bins):
# import cupy as cp
_count, bins_edges = np.histogram(pd.to_numeric(pdf, errors='coerce'), bins=_bins[col_name])
# _count, bins_edges = np.histogram(self.to_float(col_name).data[col_name], bins=_bins[col_name])
# _count, bins_edges = cp.histogram(cp.array(_series.to_gpu_array()), buckets)
return {col_name: [list(_count), list(bins_edges)]}
@self.F.delayed
def _agg_hist(values):
_result = {}
x = np.zeros(buckets - 1)
for i in values:
for j in i:
t = i.get(j)
if t is not None:
_count = np.sum([x, t[0]], axis=0)
_bins = t[1]
col_name = j
l = len(_count)
r = [{"lower": float(_bins[i]), "upper": float(_bins[i + 1]),
"count": int(_count[i])} for i in range(l)]
_result[col_name] = r
return {"hist": _result}
partitions = self.F.to_delayed(df.data)
c = [_hist(part[col_name], col_name, _bins) for part in partitions for col_name in columns]
d = _agg_hist(c)
if is_dict(d) or compute is False:
result = d
elif compute is True:
result = d.compute()
return result
def count_mismatch(self, columns_type: dict = None, compute=True):
"""
Count mismatches values in columns
:param columns_type:
:param compute:
:return: {'col_name': {'mismatch': 0, 'missing': 9, 'match': 0, 'profiler_dtype': 'object'}}
"""
df = self.root
result = {}
profiler_to_mask_func = {
"decimal": "float"
}
for col_name, props in columns_type.items():
# Match the profiler dtype with the function. The only function that need to be remapped are decimal and int
dtype = profiler_to_mask_func.get(props["dtype"], props["dtype"])
matches_mismatches = getattr(df.mask, dtype)(col_name).cols.frequency()
values = {list(j.values())[0]: list(j.values())[1] for j in
matches_mismatches["frequency"][col_name]["values"]}
missing = df.mask.nulls(col_name).cols.sum()
matches = values.get(True)
mismatches = values.get(False, missing) - missing
# Ensure that value are not None
matches = 0 if matches is None else int(matches)
mismatches = 0 if mismatches is None else int(mismatches)
missing = 0 if missing is None else int(missing)
result[col_name] = {"match": matches, "missing": missing, "mismatch": mismatches}
for col_name in columns_type.keys():
result[col_name].update({"profiler_dtype": columns_type[col_name]})
return result
@staticmethod
@abstractmethod
def count_by_dtypes(columns, infer=False, str_funcs=None, int_funcs=None):
pass
def quality(self, columns="*"):
"""
Infer the datatype and return the match. mismatch and profiler datatype for every column.
In case of date it returns also the format datatype
:param columns:
:return:
"""
df = self.root
a = df.cols.infer_profiler_dtypes(columns)
return df.cols.count_mismatch(a)
def infer_profiler_dtypes(self, columns="*"):
"""
Infer datatypes in a dataframe from a sample. First it identify the data type of every value in every cell.
After that it takes all ghe values apply som heuristic to try to better identify the datatype.
This function use Pandas no matter the engine you are using.
:param columns: Columns in which you want to infer the datatype.
:return:Return a dict with the column and the inferred data type
"""
df = self.root
columns = parse_columns(df, columns)
# Infer the data type from every element in a Series.
sample = df.cols.select(columns).rows.limit(INFER_PROFILER_ROWS)
rows_count = sample.rows.count()
sample_dtypes = sample.to_optimus_pandas().cols.infer_dtypes().cols.frequency()
_unique_counts = df.cols.count_uniques()
cols_and_inferred_dtype = {}
for col_name in columns:
infer_value_counts = sample_dtypes["frequency"][col_name]["values"]
# Common datatype in a column
dtype = infer_value_counts[0]["value"]
second_dtype = infer_value_counts[1]["value"] if len(infer_value_counts) > 1 else None
if dtype == ProfilerDataTypes.MISSING.value and second_dtype:
_dtype = second_dtype
elif dtype != ProfilerDataTypes.NULL.value and dtype != ProfilerDataTypes.MISSING.value:
if dtype == ProfilerDataTypes.INT.value and second_dtype == ProfilerDataTypes.DECIMAL.value:
# In case we have integers and decimal values no matter if we have more integer we cast to decimal
_dtype = second_dtype
else:
_dtype = dtype
elif infer_value_counts[0]["count"] < len(sample_dtypes):
_dtype = second_dtype
else:
_dtype = ProfilerDataTypes.OBJECT.value
_unique_counts = df[col_name].cols.count_uniques()
if not (any(x in [word.lower() for word in wordninja.split(col_name)] for x in ["zip", "zc"])) \
and _dtype == ProfilerDataTypes.ZIP_CODE.value \
and _unique_counts / rows_count < ZIPCODE_THRESHOLD:
_dtype = ProfilerDataTypes.INT.value
# Is the column categorical?. Try to infer the datatype using the column name
is_categorical = False
# if any(x in [word.lower() for word in wordninja.split(col_name)] for x in ["id", "type"]):
# is_categorical = False
if _dtype in PROFILER_CATEGORICAL_DTYPES \
or _unique_counts / rows_count < CATEGORICAL_THRESHOLD \
or any(x in [word.lower() for word in wordninja.split(col_name)] for x in ["id", "type"]):
is_categorical = True
cols_and_inferred_dtype[col_name] = {"dtype": _dtype, "categorical": is_categorical}
if dtype == ProfilerDataTypes.DATETIME.value:
# pydatainfer do not accepts None value so we must filter them
filtered_dates = [i for i in sample[col_name].to_list() if i]
cols_and_inferred_dtype[col_name].update({"format": pydateinfer.infer_dtypes(filtered_dates)})
return cols_and_inferred_dtype
# def match(self, input_cols, regex):
# dfd = self.root.data
#
# return self.root.new(dfd[input_cols].str.match(regex).to_frame())
def _series_to_dict(self, series):
return series.to_dict()
def frequency(self, columns="*", n=MAX_BUCKETS, percentage=False, total_rows=None, count_uniques=False,
compute=True, tidy=False):
df = self.root
columns = parse_columns(df, columns)
@self.F.delayed
def series_to_dict(_series, _total_freq_count=None):
_result = [{"value": i, "count": j} for i, j in self._series_to_dict(_series).items()]
if _total_freq_count is None:
_result = {_series.name: {"values": _result}}
else:
_result = {_series.name: {"values": _result, "count_uniques": int(_total_freq_count)}}
return _result
@self.F.delayed
def flat_dict(top_n):
return {"frequency": {key: value for ele in top_n for key, value in ele.items()}}
@self.F.delayed
def freq_percentage(_value_counts, _total_rows):
for i, j in _value_counts.items():
for x in list(j.values())[0]:
x["percentage"] = round((x["count"] * 100 / _total_rows), 2)
return _value_counts
value_counts = [df.data[col_name].value_counts() for col_name in columns]
n_largest = [_value_counts.nlargest(n) for _value_counts in value_counts]
if count_uniques is True:
count_uniques = [_value_counts.count() for _value_counts in value_counts]
b = [series_to_dict(_n_largest, _count) for _n_largest, _count in zip(n_largest, count_uniques)]
else:
b = [series_to_dict(_n_largest) for _n_largest in n_largest]
c = flat_dict(b)
if percentage:
c = freq_percentage(c, df.delayed(len)(df))
if compute is True:
result = dd.compute(c)[0]
else:
result = c
# if tidy is True:
# format_dict(result)
return result
@staticmethod
@abstractmethod
def correlation(input_cols, method="pearson", output="json"):
pass
def boxplot(self, columns):
"""
Output the boxplot in python dict format
:param columns: Columns to be processed
:return:
"""
df = self.root
columns = parse_columns(df, columns)
stats = {}
for col_name in columns:
iqr = df.cols.iqr(col_name, more=True)
lb = iqr["q1"] - (iqr["iqr"] * 1.5)
ub = iqr["q3"] + (iqr["iqr"] * 1.5)
_mean = | |
return
ik_step = Pose()
ik_step.position.x = d * ik_delta.position.x + target_pose.position.x
ik_step.position.y = d * ik_delta.position.y + target_pose.position.y
ik_step.position.z = d * ik_delta.position.z + target_pose.position.z
ik_step.orientation.x = d * ik_delta.orientation.x + target_pose.orientation.x
ik_step.orientation.y = d * ik_delta.orientation.y + target_pose.orientation.y
ik_step.orientation.z = d * ik_delta.orientation.z + target_pose.orientation.z
ik_step.orientation.w = d * ik_delta.orientation.w + target_pose.orientation.w
joint_angles = self.sawyer_robot._limb.ik_request(ik_step, self.sawyer_robot._tip_name)
if joint_angles:
self.sawyer_robot._limb.set_joint_positions(joint_angles)
else:
rospy.logerr("No Joint Angles provided for move_to_joint_positions. Staying put.")
r.sleep()
r.sleep()
@tasync("APPROACH HOVER")
def approach_hover(self, pose, time, hover_distance, approach_speed=0.001):
"""
:param pose:
:param time:
:param approach_speed:
:return:
"""
approach_pose = copy.deepcopy(pose)
rospy.logwarn("approach pose:" + str(approach_pose))
rospy.logwarn("hover distance:" + str(hover_distance))
# approach with a pose the hover-distance above the requested pose
rospy.logwarn("approach prev z :" + str(approach_pose.position.z))
approach_pose.position.z = approach_pose.position.z + hover_distance
rospy.logwarn("approach pos z :" + str(approach_pose.position.z))
# joint_angles = self._limb.ik_request(approach, self._tip_name)
# self._limb.set_joint_position_speed(0.0001)
# self._guarded_move_to_joint_position(joint_angles)
success = self.create_linear_motion_task(approach_pose, time=time).result()
if not success:
self.create_wait_forever_task().result()
rospy.sleep(0.1)
# self._limb.set_joint_position_speed(0.0001)
@tasync("PLACE")
def create_place_task(self, target_pose, approach_speed, approach_time, meet_time, retract_time):
"""
:param target_pose:
:return:
"""
rospy.logwarn("\nPlacing task..." + str(target_pose))
hover_distance = self.place_hover_distance
self.sawyer_robot._limb.set_joint_position_speed(1.0)
# final_joints = self.sawyer_robot._limb.ik_request(target_pose, self.sawyer_robot._tip_name)
final_joints = self.sawyer_robot._limb.ik_request(target_pose, self.sawyer_robot._tip_name,
joint_seed=self.starting_joint_angles,
nullspace_goal=self.starting_joint_angles)
hover_pose = copy.deepcopy(target_pose)
hover_pose.position.z += hover_distance
# rospy.logwarn("SEED pick: "+ str(jntsseed))
approach_joints = self.sawyer_robot._limb.ik_request(hover_pose, self.sawyer_robot._tip_name,
joint_seed=final_joints)
if rospy.is_shutdown():
return
# servo above pose
# self.approach_hover(target_pose, time=approach_time, approach_speed=approach_speed,
# hover_distance=self._hover_distance).result()
self.safe_goto_joint_position(approach_joints).result()
rospy.sleep(0.1)
rospy.logwarn("APPROACHING DOWN")
self.trajectory_planner.table2_z = -1.0
self.trajectory_planner.enable_orientation_constraint = True
self.create_linear_motion_task(target_pose, time=1.0).result()
# self.safe_goto_joint_position(final_joints).result()
rospy.sleep(0.5)
# self.create_linear_motion_task(target_pose, time=meet_time).result()
rospy.sleep(0.1)
if rospy.is_shutdown():
return
# open the gripper
self.sawyer_robot.gripper_open()
rospy.sleep(0.1)
self.sawyer_robot._gripper.set_object_weight(0)
rospy.sleep(0.1)
rospy.logwarn("RETRACTING")
# retract to clear object
# self.safe_goto_joint_position(approach_joints).result()
self.create_linear_motion_task(hover_pose, time=1.0).result()
self.trajectory_planner.enable_orientation_constraint = False
self.trajectory_planner.set_default_tables_z()
@tasync("SELECT BLOCK&TRAY")
def create_decision_select_block_and_tray(self, blocks, target_block_index):
"""
:return:
"""
# An orientation for gripper fingers to be overhead and parallel to the obj
rospy.logwarn("NEW TARGET BLOCK INDEX: %d" % target_block_index)
target_block = None
if blocks is not None and len(blocks) > 0:
target_block = blocks[target_block_index] # access first item , pose field
else:
rospy.logwarn("No block to pick from table!!")
return False
target_tray = self.environment_estimation.get_tray_by_color(target_block.get_color())
target_tray.gazebo_pose = self.compute_tray_pick_offset_transform(target_tray.gazebo_pose)
rospy.logwarn("TARGET TRAY POSE: " + str(target_tray))
return target_block, target_tray
def compute_grasp_pose_offset(self, pose):
"""
:param pose:
:return:
"""
yrot = tf.transformations.quaternion_from_euler(0, math.pi, 0)
cubeorientation = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
# oorient = [overhead_orientation.x,overhead_orientation.y,overhead_orientation.z,overhead_orientation.w]
# resultingorient = tf.transformations.quaternion_multiply(cubeorientation, tf.transformations.quaternion_conjugate(oorient))
resultingorient = tf.transformations.quaternion_multiply(cubeorientation, yrot)
# resultingorient = cubeorientation
pose.orientation = Quaternion(x=resultingorient[0], y=resultingorient[1], z=resultingorient[2],
w=resultingorient[3])
pose.position.x += 0
pose.position.y += 0
pose.position.z = demo_constants.TABLE_HEIGHT + demo_constants.CUBE_EDGE_LENGTH
return pose
def compute_tray_pick_offset_transform(self, pose):
"""
:param pose:
:return:
"""
overhead_orientation = Quaternion(
x=-0.00142460053167,
y=0.999994209902,
z=-0.00177030764765,
w=0.00253311793936)
pose.orientation = overhead_orientation
return pose
@tasync("SLEEP")
def delay_task(self, secs):
"""
:param secs:
:return:
"""
if not rospy.is_shutdown():
rospy.sleep(secs)
@tasync("MOVEIT TRAY PLACE")
def moveit_tray_place(self, target_block, target_tray):
result = False
while not result or result < 0:
self.scheduler_yield()
self.trajectory_planner.set_default_tables_z()
self.trajectory_planner.table2_z = demo_constants.TABLE_HEIGHT
self.trajectory_planner.update_table2_collision()
self.trajectory_planner.update_table1_collision()
target_block.tray = target_tray
target_block.tray_place_pose = self.compute_grasp_pose_offset(target_tray.get_tray_place_block_pose())
target_block.place_pose = target_block.tray_place_pose
result = self.trajectory_planner.place_block(target_block)
rospy.logwarn("place result: " + str(result))
target_tray.notify_place_block(target_block, self.gripper_state)
@tasync("MOVEIT TABLETOP PICK")
def moveit_tabletop_pick(self, target_block):
# self.sawyer_robot.gripper_open()
self.trajectory_planner.ceilheight = 0.7
self.trajectory_planner.register_box(target_block)
result = False
while not result or result < 0:
self.scheduler_yield()
rospy.logwarn("target block: " + str(target_block))
target_block.grasp_pose = copy.deepcopy(
self.compute_grasp_pose_offset(target_block.tabletop_arm_view_estimated_pose))
rospy.logwarn("target block pose : " + str(target_block.grasp_pose))
self.trajectory_planner.set_default_tables_z()
self.trajectory_planner.table1_z = demo_constants.TABLE_HEIGHT
self.trajectory_planner.update_table1_collision()
result = self.trajectory_planner.pick_block(target_block, "table1")
rospy.logwarn("pick result: " + str(result))
self.environment_estimation.table.notify_gripper_pick(target_block, self.gripper_state)
@tasync("MOVEIT TABLETOP PLACE")
def moveit_tabletop_place(self, target_block):
result = False
while not result or result < 0:
self.scheduler_yield()
self.trajectory_planner.set_default_tables_z()
self.trajectory_planner.table1_z = demo_constants.TABLE_HEIGHT
self.trajectory_planner.update_table2_collision()
self.trajectory_planner.update_table1_collision()
target_block.table_place_pose = self.compute_grasp_pose_offset(
target_block.tabletop_arm_view_estimated_pose)
target_block.place_pose = target_block.table_place_pose
result = self.trajectory_planner.place_block(target_block)
rospy.logwarn("place result: " + str(result))
self.environment_estimation.table.notify_gripper_place(target_block,self.gripper_state)
@tasync("MOVEIT TRAYTOP PICK")
def moveit_traytop_pick(self, target_block):
# self.sawyer_robot.gripper_open()
result = False
while not result or result < 0:
self.scheduler_yield()
rospy.logwarn("target block: " + str(target_block))
target_block.grasp_pose = self.compute_grasp_pose_offset(target_block.traytop_arm_view_estimated_pose)
rospy.logwarn("target block pose : " + str(target_block.grasp_pose))
self.trajectory_planner.set_default_tables_z()
self.trajectory_planner.table2_z = demo_constants.TABLE_HEIGHT
self.trajectory_planner.update_table2_collision()
result = self.trajectory_planner.pick_block(target_block, "table2")
rospy.logwarn("pick result: " + str(result))
target_block.tray.notify_pick_block(target_block,self.gripper_state)
@tasync("PICK BLOCK FROM TABLE AND MOVE TO TRAY")
def pick_block_from_table_and_place_to_tray(self, target_block, target_tray):
"""
:param original_block_poses:
:return: None if picking failed (probably grasping failed)
"""
try:
self.moveit_tabletop_pick(target_block).result()
rospy.sleep(0.1)
if self.sawyer_robot._gripper.get_position() < 0.01:
rospy.logerr("LOOKS LIKE THE GRASPING FAILED")
self.trajectory_planner.clear_attached_object(target_block)
self.gripper_state.holding_block = None
return None
rospy.sleep(0.5)
self.moveit_tray_place(target_block, target_tray).result()
rospy.logwarn("pick and place finished. table blocks: " + str(self.environment_estimation.table.blocks))
rospy.logwarn("pick and place finished. target tray blocks: " + str(target_tray.blocks))
except Exception as ex:
rospy.logerr("Some exception on pick and place: "+ str(ex.message))
self.create_wait_forever_task().result()
# self.create_wait_forever_task().result()
# self.trajectory_planner.ceilheight = 0.8
# self.create_pick_task(grasp_block_pose, approach_speed=0.01, approach_time=1.0,
# meet_time=1.0,
# retract_time=0.5,
# hover_distance=None).result()
# self.create_place_task(
# copy.deepcopy(self.compute_block_pick_offset_transform(target_tray.get_tray_place_block_pose())),
# approach_speed=0.0001,
# approach_time=2.0,
# meet_time=3.0,
# retract_time=1.0).result()
self.trajectory_planner.ceilheight = 0.75
self.trajectory_planner.set_default_tables_z()
return target_block.grasp_pose
@tasync("BLOCK FROM TRAY TO TABLE")
def pick_all_pieces_from_tray_and_put_on_table(self):
"""
Pick a block from where it is located on the tray and move it back to the table
:param original_block_pose:
:return:
"""
self.environment_estimation.update()
for target_tray in self.environment_estimation.get_trays():
for target_block in reversed(
target_tray.blocks): # you have to pop in the inverse order since adding object is pushing back the block queue
if not demo_constants.SIMULATE_TRAY_BLOCK_DETECTION:
detected = self.create_move_top_block_view_and_detect(target_block, "tray_place_pose",
additional_z_offset=0.1,
CUBE_SIZE=95).result()
else:
rospy.sleep(1.0)
self.environment_estimation.update()
target_block.traytop_arm_view_estimated_pose = target_block.gazebo_pose
target_block.traytop_arm_view_estimated_pose.orientation.x = 0
target_block.traytop_arm_view_estimated_pose.orientation.y = 0
target_block.traytop_arm_view_estimated_pose.orientation.z = 0
target_block.traytop_arm_view_estimated_pose.orientation.w = 1.0
homopose = utils.mathutils.get_homo_matrix_from_pose_msg(
target_block.traytop_arm_view_estimated_pose)
rotz = utils.mathutils.rot_z(math.pi / 2)
rotatedhomopose = utils.mathutils.composition(homopose, rotz)
target_block.traytop_arm_view_estimated_pose = utils.mathutils.homotransform_to_pose_msg(
rotatedhomopose)
self.moveit_traytop_pick(target_block).result()
self.moveit_tabletop_place(target_block).result()
# target_block, target_tray = self.create_detect_block_poses_task(blocks, target_block_index) \
# .result()
# self.create_pick_task(copy.deepcopy(target_block.gazebo_pose),
# approach_speed=0.0001,
# approach_time=2.0,
# meet_time=3.0,
# retract_time=1.0,
# hover_distance=None).result()
# place_pose = self.compute_grasp_pose_offset(target_block.grasp_pose)
# place_pose = target_block.grasp_pose
# rospy.logerr("place vs: "+ str(target_block.gazebo_pose) +"\n"+ str(place_pose))
# self.create_place_task(copy.deepcopy(place_pose),
# approach_speed=0.0001,
# approach_time=2.0,
# meet_time=3.0,
# retract_time=1.0).result()
# target_block_index += 1
@tasync("HEAD VISION PROCESSING")
def create_head_vision_processing_on_table(self):
"""
:return:
"""
self.environment_estimation.update()
self.environment_estimation.compute_block_pose_estimations_from_head_camera()
return self.environment_estimation.table.blocks
@tasync("LOCATE ARMVIEW TO BLOCK ESTIMATION")
def create_move_top_block_view_and_detect(self, block, source="headview_pose_estimation", additional_z_offset=0.0,
CUBE_SIZE=150):
"""
:return:
"""
tabletop = True
if source == "headview_pose_estimation":
tabletop = True
elif source == "tray_place_pose":
tabletop = False
if tabletop:
rospy.loginfo("trying to estimate pose of block: " + str(block))
top_view_pose = copy.deepcopy(block.headview_pose_estimation)
else:
rospy.loginfo("trying to estimate pose of block: " + str(block))
top_view_pose = copy.deepcopy(block.tray_place_pose)
top_view_pose.orientation.x = 0
top_view_pose.orientation.y = 0
top_view_pose.orientation.z = 0
top_view_pose.orientation.w = 1
# chose z plane
top_view_pose.position.z = demo_constants.ARM_TOP_VIEW_Z_OFFSET + additional_z_offset
poseaux = top_view_pose # Pose(position=Point(x=0.5 + ki*0.1, y=0.0, z=0.2),orientation=Quaternion(x=0, y=0, z=0, w=1))
topview_homo_pose = utils.mathutils.get_homo_matrix_from_pose_msg(poseaux)
topview_homo_pose = utils.mathutils.composition(topview_homo_pose, utils.mathutils.rot_y(math.pi / 2.0))
poseaux = utils.mathutils.homotransform_to_pose_msg(topview_homo_pose)
self.create_move_XY(poseaux).result()
# individual processing algorithm
estimated_cube_grasping_pose, available_grasp = self.environment_estimation.compute_block_pose_estimation_from_arm_camera(
CUBE_SIZE=CUBE_SIZE)
if estimated_cube_grasping_pose is None:
rospy.logerr("cube on table not detected")
return False
else:
rospy.logwarn("CUBE POSE DETECTED")
if not available_grasp:
rospy.logerr("there is no available grasping for this piece")
return False
self.environment_estimation.update()
if tabletop:
block.tabletop_arm_view_estimated_pose = estimated_cube_grasping_pose
else:
block.traytop_arm_view_estimated_pose = estimated_cube_grasping_pose
rospy.logerr("Cube properly detected with convinient grasp")
return True
@tasync("OBSERVE ALL CUBES")
def create_visit_all_cubes_armview(self, iterations_count=None):
""""
the robot camera locates on top of each block iteratively and in a loop
"""
blocks = self.create_head_vision_processing_on_table().result()
iteration = 0
while iterations_count is None or iteration < iterations_count:
self.scheduler_yield()
for block in blocks:
detected = self.create_move_top_block_view_and_detect(block).result()
iteration += 1
@tasync("WAIT FOREVER")
def create_wait_forever_task(self):
"""
locks the taskplanner forever
:return:
"""
while not rospy.is_shutdown() and self.scheduler_yield():
self.delay_task(10).result()
@tasync("PLACE TO TRAY REQUEST")
def place_block_to_tray(self,tray_index):
tray_index = int(tray_index)
trayslen = len(self.environment_estimation.trays)
rospy.logwarn("request place to tray: %d", tray_index)
if self.gripper_state.holding_block is not None:
rospy.logwarn("gripper holding block:"+ str(self.gripper_state.holding_block.get_state()))
if tray_index <trayslen and tray_index >= 0:
target_tray = self.environment_estimation.trays[tray_index]
self.moveit_tray_place(self.gripper_state.holding_block, target_tray).result()
else:
rospy.logwarn(
"Cannot place, incorrect tray id")
else:
rospy.logwarn(
"Cannot place block, robot is not holding any block")
@tasync("PICK BY COLOR REQUEST")
def pick_block_from_table_by_color(self, color):
"""
:param color:
:return:
"""
self.reset_and_recompute_head_vision_table_state_estimation()
blocks = self.environment_estimation.table.get_blocks()
filtered = [b for i, b in enumerate(blocks) if b.is_color(color)]
btarget =None
if len(filtered)>0:
btarget = filtered[0]
else:
return None
res = self.create_move_top_block_view_and_detect(btarget).result()
if res:
self.moveit_tabletop_pick(btarget).result()
return btarget
@tasync("PICK BY COLOR AND PUT TRAY")
def put_block_into_tray_task(self, color, trayid):
"""
:param color:
:param trayid:
:return:
"""
# self.reset_cycle()
# decide and select block and pick it
target_block = self.pick_block_from_table_by_color(color).result()
self.place_block_to_tray(trayid).result()
@tasync("REQUEST PUT ALL CONTENTS ON TABLE")
def put_all_contents_on_table(self):
"""
:return:
"""
self.environment_estimation.update()
original_blocks_poses = self.environment_estimation.get_original_block_poses()
rospy.logwarn(original_blocks_poses)
self.pick_all_pieces_from_tray_and_put_on_table().result()
@tasync("DETECT BLOCK POSE")
def create_detect_block_poses_task(self, blocks, target_block_index):
"""
:param target_block_index:
:return:
"""
target_block = None
target_tray = None
while target_block is None:
target_block, target_tray = self.create_decision_select_block_and_tray(blocks, target_block_index).result()
self.delay_task(0.1).result()
return | |
<gh_stars>0
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
@pytest.fixture
def left():
"""left dataframe (not multi-indexed) for multi-index join tests"""
# a little relevant example with NAs
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = ["two", "one", "three", "one", "two", "one", "two", "two", "three", "one"]
data = np.random.randn(len(key1))
return DataFrame({"key1": key1, "key2": key2, "data": data})
@pytest.fixture
def right():
"""right dataframe (multi-indexed) for multi-index join tests"""
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["key1", "key2"],
)
return DataFrame(
np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"]
)
@pytest.fixture
def left_multi():
return DataFrame(
dict(
Origin=["A", "A", "B", "B", "C"],
Destination=["A", "B", "A", "C", "A"],
Period=["AM", "AM", "IP", "AM", "OP"],
TripPurp=["hbw", "nhb", "hbo", "nhb", "hbw"],
Trips=[1987, 3647, 2470, 4296, 4444],
),
columns=["Origin", "Destination", "Period", "TripPurp", "Trips"],
).set_index(["Origin", "Destination", "Period", "TripPurp"])
@pytest.fixture
def right_multi():
return DataFrame(
dict(
Origin=["A", "A", "B", "B", "C", "C", "E"],
Destination=["A", "B", "A", "B", "A", "B", "F"],
Period=["AM", "AM", "IP", "AM", "OP", "IP", "AM"],
LinkType=["a", "b", "c", "b", "a", "b", "a"],
Distance=[100, 80, 90, 80, 75, 35, 55],
),
columns=["Origin", "Destination", "Period", "LinkType", "Distance"],
).set_index(["Origin", "Destination", "Period", "LinkType"])
@pytest.fixture
def on_cols_multi():
return ["Origin", "Destination", "Period"]
@pytest.fixture
def idx_cols_multi():
return ["Origin", "Destination", "Period", "TripPurp", "LinkType"]
class TestMergeMulti:
def setup_method(self):
self.index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
self.to_join = DataFrame(
np.random.randn(10, 3),
index=self.index,
columns=["j_one", "j_two", "j_three"],
)
# a little relevant example with NAs
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = [
"two",
"one",
"three",
"one",
"two",
"one",
"two",
"two",
"three",
"one",
]
data = np.random.randn(len(key1))
self.data = DataFrame({"key1": key1, "key2": key2, "data": data})
def test_merge_on_multikey(self, left, right, join_type):
on_cols = ["key1", "key2"]
result = left.join(right, on=on_cols, how=join_type).reset_index(drop=True)
expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type)
tm.assert_frame_equal(result, expected)
result = left.join(right, on=on_cols, how=join_type, sort=True).reset_index(
drop=True
)
expected = pd.merge(
left, right.reset_index(), on=on_cols, how=join_type, sort=True
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sort", [False, True])
def test_left_join_multi_index(self, left, right, sort):
icols = ["1st", "2nd", "3rd"]
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord("a")
return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 1e4
def run_asserts(left, right, sort):
res = left.join(right, on=icols, how="left", sort=sort)
assert len(left) < len(res) + 1
assert not res["4th"].isna().any()
assert not res["5th"].isna().any()
tm.assert_series_equal(res["4th"], -res["5th"], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res["4th"], result, check_names=False)
assert result.name is None
if sort:
tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort"))
out = merge(left, right.reset_index(), on=icols, sort=sort, how="left")
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord("a"), ord("z") + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)), columns=["1st", "3rd"])
left.insert(1, "2nd", np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left["4th"] = bind_cols(left)
right["5th"] = -bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right, sort)
# inject some nulls
left.loc[1::23, "1st"] = np.nan
left.loc[2::37, "2nd"] = np.nan
left.loc[3::43, "3rd"] = np.nan
left["4th"] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right["5th"] = -bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right, sort)
@pytest.mark.parametrize("sort", [False, True])
def test_merge_right_vs_left(self, left, right, sort):
# compare left vs right merge with multikey
on_cols = ["key1", "key2"]
merged_left_right = left.merge(
right, left_on=on_cols, right_index=True, how="left", sort=sort
)
merge_right_left = right.merge(
left, right_on=on_cols, left_index=True, how="right", sort=sort
)
# Reorder columns
merge_right_left = merge_right_left[merged_left_right.columns]
tm.assert_frame_equal(merged_left_right, merge_right_left)
def test_merge_multiple_cols_with_mixed_cols_index(self):
# GH29522
s = pd.Series(
range(6),
pd.MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]),
name="Amount",
)
df = pd.DataFrame(
{"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0}
)
result = pd.merge(df, s.reset_index(), on=["lev1", "lev2"])
expected = pd.DataFrame(
{
"lev1": list("AAABBB"),
"lev2": [1, 2, 3, 1, 2, 3],
"col": [0] * 6,
"Amount": range(6),
}
)
tm.assert_frame_equal(result, expected)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({"key1": key1, "key2": key2, "value1": np.random.randn(20000)})
df2 = DataFrame(
{"key1": key1[::2], "key2": key2[::2], "value2": np.random.randn(10000)}
)
# just to hit the label compression code path
merge(df, df2, how="outer")
def test_left_join_index_preserve_order(self):
on_cols = ["k1", "k2"]
left = DataFrame(
{
"k1": [0, 1, 2] * 8,
"k2": ["foo", "bar"] * 12,
"v": np.array(np.arange(24), dtype=np.int64),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": [5, 7]}, index=index)
result = left.join(right, on=on_cols)
expected = left.copy()
expected["v2"] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result.sort_values(on_cols, kind="mergesort", inplace=True)
expected = left.join(right, on=on_cols, sort=True)
tm.assert_frame_equal(result, expected)
# test join with multi dtypes blocks
left = DataFrame(
{
"k1": [0, 1, 2] * 8,
"k2": ["foo", "bar"] * 12,
"k3": np.array([0, 1, 2] * 8, dtype=np.float32),
"v": np.array(np.arange(24), dtype=np.int32),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": [5, 7]}, index=index)
result = left.join(right, on=on_cols)
expected = left.copy()
expected["v2"] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result = result.sort_values(on_cols, kind="mergesort")
expected = left.join(right, on=on_cols, sort=True)
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame(
[
["X", "Y", "C", "a"],
["W", "Y", "C", "e"],
["V", "Q", "A", "h"],
["V", "R", "D", "i"],
["X", "Y", "D", "b"],
["X", "Y", "A", "c"],
["W", "Q", "B", "f"],
["W", "R", "C", "g"],
["V", "Y", "C", "j"],
["X", "Y", "B", "d"],
],
columns=["cola", "colb", "colc", "tag"],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8],
)
right = DataFrame(
[
["W", "R", "C", 0],
["W", "Q", "B", 3],
["W", "Q", "B", 8],
["X", "Y", "A", 1],
["X", "Y", "A", 4],
["X", "Y", "B", 5],
["X", "Y", "C", 6],
["X", "Y", "C", 9],
["X", "Q", "C", -6],
["X", "R", "C", -9],
["V", "Y", "C", 7],
["V", "R", "D", 2],
["V", "R", "D", -1],
["V", "Q", "A", -3],
],
columns=["col1", "col2", "col3", "val"],
).set_index(["col1", "col2", "col3"])
result = left.join(right, on=["cola", "colb", "colc"], how="left")
expected = DataFrame(
[
["X", "Y", "C", "a", 6],
["X", "Y", "C", "a", 9],
["W", "Y", "C", "e", np.nan],
["V", "Q", "A", "h", -3],
["V", "R", "D", "i", 2],
["V", "R", "D", "i", -1],
["X", "Y", "D", "b", np.nan],
["X", "Y", "A", "c", 1],
["X", "Y", "A", "c", 4],
["W", "Q", "B", "f", 3],
["W", "Q", "B", "f", 8],
["W", "R", "C", "g", 0],
["V", "Y", "C", "j", 7],
["X", "Y", "B", "d", 5],
],
columns=["cola", "colb", "colc", "tag", "val"],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8],
)
tm.assert_frame_equal(result, expected)
result = left.join(right, on=["cola", "colb", "colc"], how="left", sort=True)
expected = expected.sort_values(["cola", "colb", "colc"], kind="mergesort")
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame(
[["c", 0], ["b", 1], ["a", 2], ["b", 3]],
columns=["tag", "val"],
index=[2, 0, 1, 3],
)
right = DataFrame(
[
["a", "v"],
["c", "w"],
["c", "x"],
["d", "y"],
["a", "z"],
["c", "r"],
["e", "q"],
["c", "s"],
],
columns=["tag", "char"],
).set_index("tag")
result = left.join(right, on="tag", how="left")
expected = DataFrame(
[
["c", 0, "w"],
["c", 0, "x"],
["c", 0, "r"],
["c", 0, "s"],
["b", 1, np.nan],
["a", 2, "v"],
["a", 2, "z"],
["b", 3, np.nan],
],
columns=["tag", "val", "char"],
index=[2, 2, 2, 2, 0, 1, 1, 3],
)
tm.assert_frame_equal(result, expected)
result = left.join(right, on="tag", how="left", sort=True)
expected2 = expected.sort_values("tag", kind="mergesort")
tm.assert_frame_equal(result, expected2)
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how="left", on="tag")
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_merge_na_buglet(self):
left = DataFrame(
{
"id": list("abcde"),
"v1": randn(5),
"v2": randn(5),
"dummy": list("abcde"),
"v3": randn(5),
},
columns=["id", "v1", "v2", "dummy", "v3"],
)
right = DataFrame(
{
"id": ["a", "b", np.nan, np.nan, np.nan],
"sv3": [1.234, 5.678, np.nan, np.nan, np.nan],
}
)
result = merge(left, right, on="id", how="left")
rdf = right.drop(["id"], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(result, expected)
def | |
<filename>dace/codegen/targets/rtl.py
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import itertools
from typing import List, Tuple, Dict
from dace import dtypes, config, registry, symbolic, nodes, sdfg
from dace.sdfg import graph, state, find_input_arraynode, find_output_arraynode
from dace.codegen import codeobject, dispatcher, prettycode
from dace.codegen.targets import target, framecode
from dace.codegen.targets.common import sym2cpp
@registry.autoregister_params(name='rtl')
class RTLCodeGen(target.TargetCodeGenerator):
""" RTL Code Generator (SystemVerilog) """
title = 'RTL'
target_name = 'rtl'
languages = [dtypes.Language.SystemVerilog]
def __init__(self, frame_codegen: framecode.DaCeCodeGenerator,
sdfg: sdfg.SDFG):
# store reference to sdfg
self.sdfg: sdfg.SDFG = sdfg
# store reference to frame code generator
self.frame: framecode.DaCeCodeGenerator = frame_codegen
# get dispatcher to register callbacks for allocation/nodes/.. code generators
self.dispatcher: dispatcher.TargetDispatcher = frame_codegen.dispatcher
# register node dispatcher -> generate_node(), predicate: process tasklets only
self.dispatcher.register_node_dispatcher(
self, lambda sdfg, node: isinstance(node, nodes.Tasklet) and node.
language == dtypes.Language.SystemVerilog)
# register all storage types that connect from/to an RTL tasklet
for src_storage, dst_storage in itertools.product(
dtypes.StorageType, dtypes.StorageType):
self.dispatcher.register_copy_dispatcher(
src_storage, dst_storage, None, self,
lambda sdfg, dfg, src_node, dest_node:
(isinstance(src_node, nodes.Tasklet) and src_node.language ==
dtypes.Language.SystemVerilog) or
(isinstance(dest_node, nodes.Tasklet) and dest_node.language ==
dtypes.Language.SystemVerilog))
# local variables
self.verilator_debug: bool = config.Config.get_bool(
"compiler", "rtl", "verilator_enable_debug")
self.code_objects: List[codeobject.CodeObject] = list()
self.cpp_general_header_added: bool = False
def generate_node(self, sdfg: sdfg.SDFG, dfg: state.StateSubgraphView,
state_id: int, node: nodes.Node,
function_stream: prettycode.CodeIOStream,
callsite_stream: prettycode.CodeIOStream):
# check instance type
if isinstance(node, nodes.Tasklet):
"""
handle Tasklet:
(1) generate in->tasklet
(2) generate tasklet->out
(3) generate tasklet
"""
# generate code to handle data input to the tasklet
for edge in dfg.in_edges(node):
# find input array
src_node = find_input_arraynode(dfg, edge)
# dispatch code gen (copy_memory)
self.dispatcher.dispatch_copy(src_node, node, edge, sdfg, dfg,
state_id, function_stream,
callsite_stream)
# generate code to handle data output from the tasklet
for edge in dfg.out_edges(node):
# find output array
dst_node = find_output_arraynode(dfg, edge)
# dispatch code gen (define_out_memlet)
self.dispatcher.dispatch_output_definition(
node, dst_node, edge, sdfg, dfg, state_id, function_stream,
callsite_stream)
# generate tasklet code
self.unparse_tasklet(sdfg, dfg, state_id, node, function_stream,
callsite_stream)
else:
raise RuntimeError(
"Only tasklets are handled here, not {}. This should have been filtered by the predicate"
.format(type(node)))
def copy_memory(self, sdfg: sdfg.SDFG, dfg: state.StateSubgraphView,
state_id: int, src_node: nodes.Node, dst_node: nodes.Node,
edge: graph.MultiConnectorEdge,
function_stream: prettycode.CodeIOStream,
callsite_stream: prettycode.CodeIOStream):
"""
Generate input/output memory copies from the array references to local variables (i.e. for the tasklet code).
"""
if isinstance(edge.src, nodes.AccessNode) and isinstance(
edge.dst, nodes.Tasklet): # handle AccessNode->Tasklet
if isinstance(dst_node.in_connectors[edge.dst_conn],
dtypes.pointer): # pointer accessor
line: str = "{} {} = &{}[0];".format(
dst_node.in_connectors[edge.dst_conn].ctype, edge.dst_conn,
edge.src.data)
elif isinstance(dst_node.in_connectors[edge.dst_conn],
dtypes.vector): # vector accessor
line: str = "{} {} = *({} *)(&{}[0]);".format(
dst_node.in_connectors[edge.dst_conn].ctype, edge.dst_conn,
dst_node.in_connectors[edge.dst_conn].ctype, edge.src.data)
else: # scalar accessor
line: str = "{}* {} = &{}[0];".format(
dst_node.in_connectors[edge.dst_conn].ctype, edge.dst_conn,
edge.src.data)
else:
raise RuntimeError(
"Not handling copy_memory case of type {} -> {}.".format(
type(edge.src), type(edge.dst)))
# write accessor to file
callsite_stream.write(line)
def define_out_memlet(self, sdfg: sdfg.SDFG, dfg: state.StateSubgraphView,
state_id: int, src_node: nodes.Node,
dst_node: nodes.Node, edge: graph.MultiConnectorEdge,
function_stream: prettycode.CodeIOStream,
callsite_stream: prettycode.CodeIOStream):
"""
Generate output copy code (handled within the rtl tasklet code).
"""
if isinstance(edge.src, nodes.Tasklet) and isinstance(
edge.dst, nodes.AccessNode):
if isinstance(src_node.out_connectors[edge.src_conn],
dtypes.pointer): # pointer accessor
line: str = "{} {} = &{}[0];".format(
src_node.out_connectors[edge.src_conn].ctype, edge.src_conn,
edge.dst.data)
elif isinstance(src_node.out_connectors[edge.src_conn],
dtypes.vector): # vector accessor
line: str = "{} {} = *({} *)(&{}[0]);".format(
src_node.out_connectors[edge.src_conn].ctype, edge.src_conn,
src_node.out_connectors[edge.src_conn].ctype, edge.dst.data)
else: # scalar accessor
line: str = "{}* {} = &{}[0];".format(
src_node.out_connectors[edge.src_conn].ctype, edge.src_conn,
edge.dst.data)
else:
raise RuntimeError(
"Not handling define_out_memlet case of type {} -> {}.".format(
type(edge.src), type(edge.dst)))
# write accessor to file
callsite_stream.write(line)
def get_generated_codeobjects(self):
"""
Return list of code objects (that are later generating code files).
"""
return self.code_objects
@property
def has_initializer(self):
"""
Disable initializer method generation.
"""
return False
@property
def has_finalizer(self):
"""
Disable exit/finalizer method generation.
"""
return False
@staticmethod
def cmake_options():
"""
Process variables to be exposed to the CMakeList.txt script.
"""
# get flags from config
verbose = config.Config.get_bool("compiler", "rtl", "verbose")
verilator_flags = config.Config.get("compiler", "rtl",
"verilator_flags")
verilator_lint_warnings = config.Config.get_bool(
"compiler", "rtl", "verilator_lint_warnings")
# create options list
options = [
"-DDACE_RTL_VERBOSE=\"{}\"".format(verbose),
"-DDACE_RTL_VERILATOR_FLAGS=\"{}\"".format(verilator_flags),
"-DDACE_RTL_VERILATOR_LINT_WARNINGS=\"{}\"".format(
verilator_lint_warnings)
]
return options
def generate_rtl_parameters(self, constants):
# construct parameters module header
if len(constants) == 0:
return str()
else:
return "#(\n{}\n)".format(" " + "\n".join([
"{} parameter {} = {}".format("," if i > 0 else "", key,
sym2cpp(constants[key]))
for i, key in enumerate(constants)
]))
def generate_rtl_inputs_outputs(self, sdfg, tasklet):
# construct input / output module header
inputs = list()
for inp in tasklet.in_connectors:
# add vector index
idx_str = ""
# catch symbolic (compile time variables)
check_issymbolic([
tasklet.in_connectors[inp].veclen,
tasklet.in_connectors[inp].bytes
], sdfg)
# extract parameters
vec_len = int(
symbolic.evaluate(tasklet.in_connectors[inp].veclen,
sdfg.constants))
total_size = int(
symbolic.evaluate(tasklet.in_connectors[inp].bytes,
sdfg.constants))
# generate vector representation
if vec_len > 1:
idx_str = "[{}:0]".format(vec_len - 1)
# add element index
idx_str += "[{}:0]".format(int(total_size / vec_len) * 8 - 1)
# generate padded string and add to list
inputs.append(", input{padding}{idx_str} {name}".format(
padding=" " * (17 - len(idx_str)), idx_str=idx_str, name=inp))
outputs = list()
for inp in tasklet.out_connectors:
# add vector index
idx_str = ""
# catch symbolic (compile time variables)
check_issymbolic([
tasklet.out_connectors[inp].veclen,
tasklet.out_connectors[inp].bytes
], sdfg)
# extract parameters
vec_len = int(
symbolic.evaluate(tasklet.out_connectors[inp].veclen,
sdfg.constants))
total_size = int(
symbolic.evaluate(tasklet.out_connectors[inp].bytes,
sdfg.constants))
# generate vector representation
if vec_len > 1:
idx_str = "[{}:0]".format(vec_len - 1)
# add element index
idx_str += "[{}:0]".format(int(total_size / vec_len) * 8 - 1)
# generate padded string and add to list
outputs.append(", output reg{padding}{idx_str} {name}".format(
padding=" " * (12 - len(idx_str)), idx_str=idx_str, name=inp))
return inputs, outputs
def generate_cpp_inputs_outputs(self, tasklet):
# generate cpp input reading/output writing code
"""
input:
for vectors:
for (int i = 0; i < WIDTH; i++){{
model->a[i] = a[i];
}}
for scalars:
model->a = a[0];
output:
for vectors:
for(int i = 0; i < WIDTH; i++){{
b[i] = (int)model->b[i];
}}
for scalars:
b[0] = (int)model->b;
"""
input_read_string = "\n".join([
"model->{name} = {name}[in_ptr++];".format(
name=var_name) if isinstance(tasklet.in_connectors[var_name],
dtypes.pointer) else """\
for(int i = 0; i < {veclen}; i++){{
model->{name}[i] = {name}[i];
}}\
""".format(veclen=tasklet.in_connectors[var_name].veclen, name=var_name)
if isinstance(tasklet.in_connectors[var_name], dtypes.vector) else
"model->{name} = {name}[in_ptr++];".format(name=var_name)
for var_name in tasklet.in_connectors
])
output_read_string = "\n".join([
"{name}[out_ptr++] = (int)model->{name};".format(
name=var_name) if isinstance(tasklet.out_connectors[var_name],
dtypes.pointer) else """\
for(int i = 0; i < {veclen}; i++){{
{name}[i] = (int)model->{name}[i];
}}\
""".format(veclen=tasklet.out_connectors[var_name].veclen, name=var_name)
if isinstance(tasklet.out_connectors[var_name], dtypes.vector) else
"{name}[out_ptr++] = (int)model->{name};".format(name=var_name)
for var_name in tasklet.out_connectors
])
# return generated strings
return input_read_string, output_read_string
def generate_cpp_vector_init(self, tasklet):
init_vector_string = "\n".join([
"""\
for(int i = 0; i < {veclen}; i++){{
model->{name}[i] = 0;
}}\
""".format(veclen=tasklet.in_connectors[var_name].veclen, name=var_name)
if isinstance(tasklet.in_connectors[var_name], dtypes.vector) else
"" for var_name in tasklet.in_connectors
])
return "// initialize vector\n" if len(
init_vector_string) > 0 else "" + init_vector_string
def generate_cpp_num_elements(self):
# TODO: compute num_elements=#elements that enter/leave the pipeline, for now we assume in_elem=out_elem (i.e. no reduction)
return "int num_elements = {};".format(1)
def generate_cpp_internal_state(self, tasklet):
internal_state_str = " ".join([
"{}=0x%x".format(var_name) for var_name in {
**tasklet.in_connectors,
**tasklet.out_connectors
}
])
internal_state_var = ", ".join([
"model->{}".format(var_name) for var_name in {
**tasklet.in_connectors,
**tasklet.out_connectors
}
])
return internal_state_str, internal_state_var
def unparse_tasklet(self, sdfg: sdfg.SDFG, dfg: state.StateSubgraphView,
state_id: int, node: nodes.Node,
function_stream: prettycode.CodeIOStream,
callsite_stream: prettycode.CodeIOStream):
# extract data
state = sdfg.nodes()[state_id]
tasklet = node
# construct variables paths
unique_name: str = "top_{}_{}_{}".format(sdfg.sdfg_id,
sdfg.node_id(state),
state.node_id(tasklet))
# generate system verilog module components
parameter_string: str = self.generate_rtl_parameters(sdfg.constants)
inputs, outputs = self.generate_rtl_inputs_outputs(sdfg, tasklet)
# create rtl code object (that is later written to file)
self.code_objects.append(
codeobject.CodeObject(
name="{}".format(unique_name),
code=RTLCodeGen.RTL_HEADER.format(name=unique_name,
parameters=parameter_string,
inputs="\n".join(inputs),
outputs="\n".join(outputs)) +
tasklet.code.code + RTLCodeGen.RTL_FOOTER,
language="sv",
target=RTLCodeGen,
title="rtl",
target_type="",
additional_compiler_kwargs="",
linkable=True,
environments=None))
# generate verilator simulation cpp code components
inputs, outputs = self.generate_cpp_inputs_outputs(tasklet)
vector_init = self.generate_cpp_vector_init(tasklet)
num_elements = self.generate_cpp_num_elements()
internal_state_str, internal_state_var = self.generate_cpp_internal_state(
tasklet)
# add header code to stream
if not self.cpp_general_header_added:
sdfg.append_global_code(
cpp_code=RTLCodeGen.CPP_GENERAL_HEADER_TEMPLATE.format(
debug_include="// generic includes\n#include <iostream>"
if self.verilator_debug else ""))
self.cpp_general_header_added = True
sdfg.append_global_code(
cpp_code=RTLCodeGen.CPP_MODEL_HEADER_TEMPLATE.format(
name=unique_name))
# add main cpp code to stream
callsite_stream.write(contents=RTLCodeGen.CPP_MAIN_TEMPLATE.format(
name=unique_name,
inputs=inputs,
outputs=outputs,
num_elements=num_elements,
vector_init=vector_init,
internal_state_str=internal_state_str,
internal_state_var=internal_state_var,
debug_sim_start="std::cout << \"SIM {name} START\" << std::endl;"
if self.verilator_debug else "",
debug_feed_element="std::cout << \"feed new element\" << std::endl;"
if self.verilator_debug else "",
debug_export_element="std::cout << \"export element\" << std::endl;"
if self.verilator_debug else "",
debug_internal_state="""
// report internal state
VL_PRINTF("[t=%lu] clk_i=%u rst_i=%u valid_i=%u ready_i=%u valid_o=%u ready_o=%u \\n", main_time, model->clk_i, model->rst_i, model->valid_i, model->ready_i, model->valid_o, model->ready_o);
VL_PRINTF("{internal_state_str}\\n", {internal_state_var});
std::cout << std::flush;
""".format(internal_state_str=internal_state_str,
internal_state_var=internal_state_var)
if self.verilator_debug else "",
debug_read_input_hs=
"std::cout << \"remove | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import version_info
from base64 import b64encode, b64decode
from binascii import hexlify, unhexlify
__all__ = ['encrypt_ecb', 'decrypt_ecb',
'encrypt_cbc', 'decrypt_cbc',
'encrypt', 'decrypt']
if version_info[0] == 2:
# python2
PY2 = True
PY3 = False
else:
# python3
PY2 = False
PY3 = True
if PY2:
_range = xrange
string_types = (basestring,)
text_type = unicode
binary_type = str
else:
_range = range
string_types = (str,)
text_type = str
binary_type = bytes
E_FMT = 'UTF8'
# S盒
S_BOX = {
0X00: 0XD6, 0X01: 0X90, 0X02: 0XE9, 0X03: 0XFE, 0X04: 0XCC, 0X05: 0XE1, 0X06: 0X3D, 0X07: 0XB7,
0X08: 0X16, 0X09: 0XB6, 0X0A: 0X14, 0X0B: 0XC2, 0X0C: 0X28, 0X0D: 0XFB, 0X0E: 0X2C, 0X0F: 0X05,
0X10: 0X2B, 0X11: 0X67, 0X12: 0X9A, 0X13: 0X76, 0X14: 0X2A, 0X15: 0XBE, 0X16: 0X04, 0X17: 0XC3,
0X18: 0XAA, 0X19: 0X44, 0X1A: 0X13, 0X1B: 0X26, 0X1C: 0X49, 0X1D: 0X86, 0X1E: 0X06, 0X1F: 0X99,
0X20: 0X9C, 0X21: 0X42, 0X22: 0X50, 0X23: 0XF4, 0X24: 0X91, 0X25: 0XEF, 0X26: 0X98, 0X27: 0X7A,
0X28: 0X33, 0X29: 0X54, 0X2A: 0X0B, 0X2B: 0X43, 0X2C: 0XED, 0X2D: 0XCF, 0X2E: 0XAC, 0X2F: 0X62,
0X30: 0XE4, 0X31: 0XB3, 0X32: 0X1C, 0X33: 0XA9, 0X34: 0XC9, 0X35: 0X08, 0X36: 0XE8, 0X37: 0X95,
0X38: 0X80, 0X39: 0XDF, 0X3A: 0X94, 0X3B: 0XFA, 0X3C: 0X75, 0X3D: 0X8F, 0X3E: 0X3F, 0X3F: 0XA6,
0X40: 0X47, 0X41: 0X07, 0X42: 0XA7, 0X43: 0XFC, 0X44: 0XF3, 0X45: 0X73, 0X46: 0X17, 0X47: 0XBA,
0X48: 0X83, 0X49: 0X59, 0X4A: 0X3C, 0X4B: 0X19, 0X4C: 0XE6, 0X4D: 0X85, 0X4E: 0X4F, 0X4F: 0XA8,
0X50: 0X68, 0X51: 0X6B, 0X52: 0X81, 0X53: 0XB2, 0X54: 0X71, 0X55: 0X64, 0X56: 0XDA, 0X57: 0X8B,
0X58: 0XF8, 0X59: 0XEB, 0X5A: 0X0F, 0X5B: 0X4B, 0X5C: 0X70, 0X5D: 0X56, 0X5E: 0X9D, 0X5F: 0X35,
0X60: 0X1E, 0X61: 0X24, 0X62: 0X0E, 0X63: 0X5E, 0X64: 0X63, 0X65: 0X58, 0X66: 0XD1, 0X67: 0XA2,
0X68: 0X25, 0X69: 0X22, 0X6A: 0X7C, 0X6B: 0X3B, 0X6C: 0X01, 0X6D: 0X21, 0X6E: 0X78, 0X6F: 0X87,
0X70: 0XD4, 0X71: 0X00, 0X72: 0X46, 0X73: 0X57, 0X74: 0X9F, 0X75: 0XD3, 0X76: 0X27, 0X77: 0X52,
0X78: 0X4C, 0X79: 0X36, 0X7A: 0X02, 0X7B: 0XE7, 0X7C: 0XA0, 0X7D: 0XC4, 0X7E: 0XC8, 0X7F: 0X9E,
0X80: 0XEA, 0X81: 0XBF, 0X82: 0X8A, 0X83: 0XD2, 0X84: 0X40, 0X85: 0XC7, 0X86: 0X38, 0X87: 0XB5,
0X88: 0XA3, 0X89: 0XF7, 0X8A: 0XF2, 0X8B: 0XCE, 0X8C: 0XF9, 0X8D: 0X61, 0X8E: 0X15, 0X8F: 0XA1,
0X90: 0XE0, 0X91: 0XAE, 0X92: 0X5D, 0X93: 0XA4, 0X94: 0X9B, 0X95: 0X34, 0X96: 0X1A, 0X97: 0X55,
0X98: 0XAD, 0X99: 0X93, 0X9A: 0X32, 0X9B: 0X30, 0X9C: 0XF5, 0X9D: 0X8C, 0X9E: 0XB1, 0X9F: 0XE3,
0XA0: 0X1D, 0XA1: 0XF6, 0XA2: 0XE2, 0XA3: 0X2E, 0XA4: 0X82, 0XA5: 0X66, 0XA6: 0XCA, 0XA7: 0X60,
0XA8: 0XC0, 0XA9: 0X29, 0XAA: 0X23, 0XAB: 0XAB, 0XAC: 0X0D, 0XAD: 0X53, 0XAE: 0X4E, 0XAF: 0X6F,
0XB0: 0XD5, 0XB1: 0XDB, 0XB2: 0X37, 0XB3: 0X45, 0XB4: 0XDE, 0XB5: 0XFD, 0XB6: 0X8E, 0XB7: 0X2F,
0XB8: 0X03, 0XB9: 0XFF, 0XBA: 0X6A, 0XBB: 0X72, 0XBC: 0X6D, 0XBD: 0X6C, 0XBE: 0X5B, 0XBF: 0X51,
0XC0: 0X8D, 0XC1: 0X1B, 0XC2: 0XAF, 0XC3: 0X92, 0XC4: 0XBB, 0XC5: 0XDD, 0XC6: 0XBC, 0XC7: 0X7F,
0XC8: 0X11, 0XC9: 0XD9, 0XCA: 0X5C, 0XCB: 0X41, 0XCC: 0X1F, 0XCD: 0X10, 0XCE: 0X5A, 0XCF: 0XD8,
0XD0: 0X0A, 0XD1: 0XC1, 0XD2: 0X31, 0XD3: 0X88, 0XD4: 0XA5, 0XD5: 0XCD, 0XD6: 0X7B, 0XD7: 0XBD,
0XD8: 0X2D, 0XD9: 0X74, 0XDA: 0XD0, 0XDB: 0X12, 0XDC: 0XB8, 0XDD: 0XE5, 0XDE: 0XB4, 0XDF: 0XB0,
0XE0: 0X89, 0XE1: 0X69, 0XE2: 0X97, 0XE3: 0X4A, 0XE4: 0X0C, 0XE5: 0X96, 0XE6: 0X77, 0XE7: 0X7E,
0XE8: 0X65, 0XE9: 0XB9, 0XEA: 0XF1, 0XEB: 0X09, 0XEC: 0XC5, 0XED: 0X6E, 0XEE: 0XC6, 0XEF: 0X84,
0XF0: 0X18, 0XF1: 0XF0, 0XF2: 0X7D, 0XF3: 0XEC, 0XF4: 0X3A, 0XF5: 0XDC, 0XF6: 0X4D, 0XF7: 0X20,
0XF8: 0X79, 0XF9: 0XEE, 0XFA: 0X5F, 0XFB: 0X3E, 0XFC: 0XD7, 0XFD: 0XCB, 0XFE: 0X39, 0XFF: 0X48
}
# 系统参数FK
FK = (0XA3B1BAC6, 0X56AA3350, 0X677D9197, 0XB27022DC)
# 固定参数CK
CK = (0X00070E15, 0X1C232A31, 0X383F464D, 0X545B6269,
0X70777E85, 0X8C939AA1, 0XA8AFB6BD, 0XC4CBD2D9,
0XE0E7EEF5, 0XFC030A11, 0X181F262D, 0X343B4249,
0X50575E65, 0X6C737A81, 0X888F969D, 0XA4ABB2B9,
0XC0C7CED5, 0XDCE3EAF1, 0XF8FF060D, 0X141B2229,
0X30373E45, 0X4C535A61, 0X686F767D, 0X848B9299,
0XA0A7AEB5, 0XBCC3CAD1, 0XD8DFE6ED, 0XF4FB0209,
0X10171E25, 0X2C333A41, 0X484F565D, 0X646B7279)
# 轮密钥缓存
_rk_cache = {}
# 加密
SM4_ENCRYPT = 1
# 解密
SM4_DECRYPT = 0
# 分组byte数
BLOCK_BYTE = 16
BLOCK_HEX = BLOCK_BYTE * 2
def num2hex(num, width=1):
"""
整数转为指定长度的十六进制字符串,不足补0
>>> num2hex(1000, width=4)
'03e8'
:param num: 整数
:param width: 16进制字符串长度, 默认为1
:return str
"""
return '{:0>{width}}'.format(hex(num)[2:].replace('L', ''),
width=width)
def _byte_unpack(num, byte_n=4):
# 分解后元组长度
_len = 4
# 步长
step = (byte_n // _len) * 2
hex_str = num2hex(num=num, width=byte_n * 2)
split_v = list(_range(len(hex_str)))[::step] + [len(hex_str)]
return tuple([int(hex_str[s:e], base=16) for s, e in
zip(split_v[:-1], split_v[1:])])
def _byte_pack(byte_array, byte_n=4):
_len = 4
# byte_array每一项16进制字符串的长度
width = (byte_n // _len) * 2
if len(byte_array) != _len:
raise ValueError('byte_array length must be 4.')
return int(''.join([num2hex(num=v, width=width)
for v in byte_array]), 16)
def _s_box(byte):
return S_BOX.get(byte)
def _non_linear_map(byte_array):
"""
非线性变换, 输入A=(a0, a1, a2, a3)
(b0, b1, b2, b3) = (Sbox(a0), Sbox(a1), Sbox(a2), Sbox(a3))
"""
return (_s_box(byte_array[0]), _s_box(byte_array[1]),
_s_box(byte_array[2]), _s_box(byte_array[3]))
def _linear_map(byte4):
"""
线性变换L
L(B) = B ⊕ (B <<< 2) ⊕ (B <<< 10) ⊕ (B <<< 18) ⊕ (B <<< 24)
"""
_left = loop_left_shift
return byte4 ^ _left(byte4, 2) ^ _left(byte4, 10) ^ _left(byte4, 18) ^ _left(byte4, 24)
def _linear_map_s(byte4):
"""
线性变换L'
L'(B) = B ⊕ (B <<< 13) ⊕ (B <<< 23)
"""
_left = loop_left_shift
return byte4 ^ _left(byte4, 13) ^ _left(byte4, 23)
def loop_left_shift(num, offset, base=32):
"""
循环向左移位
>>> loop_left_shift(0b11010000, 3, base=8)
>>> 0b10000110
"""
bin_str = '{:0>{width}}'.format(bin(num)[2:], width=base)
rem = offset % base
return int(bin_str[rem:] + bin_str[:rem], 2)
def _rep_t(byte4):
"""合成置换T, 由非线性变换和线性变换L复合而成"""
# 非线性变换
b_array = _non_linear_map(_byte_unpack(byte4))
# 线性变换L
return _linear_map(_byte_pack(b_array))
def _rep_t_s(byte4):
"""
合成置换T', 由非线性变换和线性变换L'复合而成
"""
# 非线性变换
b_array = _non_linear_map(_byte_unpack(byte4))
# 线性变换L'
return _linear_map_s(_byte_pack(b_array))
def _round_keys(mk):
"""
轮密钥由加密密钥通过密钥扩展算法生成
加密密钥MK = (MK0, MK1, MK2, MK3)
轮密钥生成算法:
(K0, K1, K2, K3) = (MK0 ⊕ FK0, MK1 ⊕ FK1, MK2 ⊕ FK2, MK3 ⊕ FK3)
rki = Ki+4 = Ki⊕T'(Ki+1 ⊕ Ki+2 ⊕ Ki+3 ⊕ CKi) i=0, 1,...,31
:param mk: 加密密钥, 16byte, 128bit
:return list
"""
# 尝试从轮密钥缓存中获取轮密钥
# 没有获取到, 根据密钥扩展算法生成
_rk_keys = _rk_cache.get(mk)
if _rk_keys is None:
mk0, mk1, mk2, mk3 = _byte_unpack(mk, byte_n=16)
keys = [mk0 ^ FK[0], mk1 ^ FK[1], mk2 ^ FK[2], mk3 ^ FK[3]]
for i in _range(32):
rk = keys[i] ^ _rep_t_s(keys[i + 1] ^ keys[i + 2] ^ keys[i + 3] ^ CK[i])
keys.append(rk)
_rk_keys = keys[4:]
# 加入轮密钥缓存中
_rk_cache[mk] = _rk_keys
return _rk_keys
def _round_f(byte4_array, rk):
"""
轮函数, F(X0, X1, X2, X3, rk) = X0 ⊕ T(X1 ⊕ X2 ⊕ X3 ⊕ rk)
:param byte4_array: (X0, X1, X2, X3), 每一项4byte, 32bit
:param rk: 轮密钥, 4byte, 32bit
"""
x0, x1, x2, x3 = byte4_array
return x0 ^ _rep_t(x1 ^ x2 ^ x3 ^ rk)
def _crypt(num, mk, mode=SM4_ENCRYPT):
"""
SM4加密和解密
:param num: 密文或明文 16byte
:param mk: 密钥 16byte
:param mode: 轮密钥顺序
"""
x_keys = list(_byte_unpack(num, byte_n=16))
round_keys = _round_keys(mk)
if mode == SM4_DECRYPT:
round_keys = round_keys[::-1]
for i in _range(32):
x_keys.append(_round_f(x_keys[i:i+4], round_keys[i]))
return _byte_pack(x_keys[-4:][::-1], byte_n=16)
def encrypt(clear_num, mk):
"""
SM4加密算法由32次迭代运算和1次反序变换R组成.
明文输入为(X0, X1, X2, X3), 每一项4byte, 密文输出为(Y0, Y1, Y2, Y3), 每一项4byte
轮密钥为rki, i=0,1,...,32, 4byte, 运算过程如下:
1). 32次迭代运算: Xi+4 = F(Xi, Xi+1, Xi+2, Xi+3, rki), i=0,1,...,32
2). 反序变换: (Y0, Y1, Y2, Y3) = (X35, X34, X33, X32)
:param clear_num: 明文, 16byte
:param mk: 密钥, 16byte
"""
return _crypt(num=clear_num, mk=mk)
def decrypt(cipher_num, mk):
"""
SM4解密算法, 解密变换与加密变换结构相同, 不同的仅是轮密钥的使用顺序.
解密时轮密钥使用顺序为(rk31,rk30,...,rk0)
:param cipher_num: 密文, 16byte
:param mk: 密钥, 16byte
"""
return _crypt(num=cipher_num, mk=mk, mode=SM4_DECRYPT)
def _padding(text, mode=SM4_ENCRYPT):
"""
加密填充和解密去填充
"""
# python2 is (basestring, )
# python3 is (str, bytes)
_str_or_bytes = string_types if PY2 else (string_types + (binary_type,))
if text is None or not isinstance(text, _str_or_bytes):
return
# unicode
if isinstance(text, text_type):
text = text.encode(encoding=E_FMT)
if mode == SM4_ENCRYPT:
# 填充
p_num = BLOCK_BYTE - (len(text) % BLOCK_BYTE)
space = '' if PY2 else b''
pad_s = (chr(p_num) * p_num) if PY2 else (chr(p_num).encode(E_FMT) * p_num)
res = space.join([text, pad_s])
else:
# 去填充
p_num = ord(text[-1]) if PY2 else text[-1]
res = text[:-p_num]
return res
def _key_iv_check(key_iv):
"""
密钥或初始化向量检测
"""
# 密钥
if key_iv is None or not isinstance(key_iv, string_types):
raise TypeError('Parameter key or iv:{} not a basestring'.format(key_iv))
if isinstance(key_iv, text_type):
key_iv = key_iv.encode(encoding=E_FMT)
if len(key_iv) > BLOCK_BYTE:
raise ValueError('Parameter key or iv:{} byte greater than {}'.format(key_iv.decode(E_FMT),
BLOCK_BYTE))
| |
not None:
pulumi.set(__self__, "delay_evaluation", delay_evaluation)
if evaluation_interval is not None:
pulumi.set(__self__, "evaluation_interval", evaluation_interval)
if truncation_percentage is not None:
pulumi.set(__self__, "truncation_percentage", truncation_percentage)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> str:
"""
Expected value is 'TruncationSelection'.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter(name="delayEvaluation")
def delay_evaluation(self) -> Optional[int]:
"""
Number of intervals by which to delay the first evaluation.
"""
return pulumi.get(self, "delay_evaluation")
@property
@pulumi.getter(name="evaluationInterval")
def evaluation_interval(self) -> Optional[int]:
"""
Interval (number of runs) between policy evaluations.
"""
return pulumi.get(self, "evaluation_interval")
@property
@pulumi.getter(name="truncationPercentage")
def truncation_percentage(self) -> Optional[int]:
"""
The percentage of runs to cancel at each evaluation interval.
"""
return pulumi.get(self, "truncation_percentage")
@pulumi.output_type
class UserAccountCredentialsResponse(dict):
"""
Settings for user account that gets created on each on the nodes of a compute.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminUserName":
suggest = "admin_user_name"
elif key == "adminUserPassword":
suggest = "<PASSWORD>_user_password"
elif key == "adminUserSshPublicKey":
suggest = "admin_user_ssh_public_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAccountCredentialsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAccountCredentialsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAccountCredentialsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_user_name: str,
admin_user_password: Optional[str] = None,
admin_user_ssh_public_key: Optional[str] = None):
"""
Settings for user account that gets created on each on the nodes of a compute.
:param str admin_user_name: Name of the administrator user account which can be used to SSH to nodes.
:param str admin_user_password: <PASSWORD> the <PASSWORD>.
:param str admin_user_ssh_public_key: SSH public key of the administrator user account.
"""
pulumi.set(__self__, "admin_user_name", admin_user_name)
if admin_user_password is not None:
pulumi.set(__self__, "admin_user_password", admin_user_password)
if admin_user_ssh_public_key is not None:
pulumi.set(__self__, "admin_user_ssh_public_key", admin_user_ssh_public_key)
@property
@pulumi.getter(name="adminUserName")
def admin_user_name(self) -> str:
"""
Name of the administrator user account which can be used to SSH to nodes.
"""
return pulumi.get(self, "admin_user_name")
@property
@pulumi.getter(name="adminUserPassword")
def admin_user_password(self) -> Optional[str]:
"""
Password of the administrator user account.
"""
return pulumi.get(self, "admin_user_password")
@property
@pulumi.getter(name="adminUserSshPublicKey")
def admin_user_ssh_public_key(self) -> Optional[str]:
"""
SSH public key of the administrator user account.
"""
return pulumi.get(self, "admin_user_ssh_public_key")
@pulumi.output_type
class UserAssignedIdentityMetaResponse(dict):
"""
User assigned identities associated with a resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAssignedIdentityMetaResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAssignedIdentityMetaResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAssignedIdentityMetaResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: Optional[str] = None,
principal_id: Optional[str] = None):
"""
User assigned identities associated with a resource.
:param str client_id: Aka application ID, a unique identifier generated by Azure AD that is tied to an application and service principal during its initial provisioning.
:param str principal_id: The object ID of the service principal object for your managed identity that is used to grant role-based access to an Azure resource.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
Aka application ID, a unique identifier generated by Azure AD that is tied to an application and service principal during its initial provisioning.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
The object ID of the service principal object for your managed identity that is used to grant role-based access to an Azure resource.
"""
return pulumi.get(self, "principal_id")
@pulumi.output_type
class UserAssignedIdentityResponse(dict):
"""
User Assigned Identity
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAssignedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAssignedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAssignedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str,
tenant_id: str):
"""
User Assigned Identity
:param str client_id: The clientId(aka appId) of the user assigned identity.
:param str principal_id: The principal ID of the user assigned identity.
:param str tenant_id: The tenant ID of the user assigned identity.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The clientId(aka appId) of the user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of the user assigned identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of the user assigned identity.
"""
return pulumi.get(self, "tenant_id")
@pulumi.output_type
class UserInfoResponse(dict):
"""
User who created.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userAltSecId":
suggest = "user_alt_sec_id"
elif key == "userIdp":
suggest = "user_idp"
elif key == "userIss":
suggest = "user_iss"
elif key == "userName":
suggest = "user_name"
elif key == "userObjectId":
suggest = "user_object_id"
elif key == "userPuId":
suggest = "user_pu_id"
elif key == "userTenantId":
suggest = "user_tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserInfoResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserInfoResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserInfoResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_alt_sec_id: Optional[str] = None,
user_idp: Optional[str] = None,
user_iss: Optional[str] = None,
user_name: Optional[str] = None,
user_object_id: Optional[str] = None,
user_pu_id: Optional[str] = None,
user_tenant_id: Optional[str] = None):
"""
User who created.
:param str user_alt_sec_id: A user alternate sec id. This represents the user in a different identity provider system Eg.1:live.com:puid
:param str user_idp: A user identity provider. Eg live.com
:param str user_iss: The issuer which issued the token for this user.
:param str user_name: A user's full name or a service principal's app ID.
:param str user_object_id: A user or service principal's object ID..
:param str user_pu_id: A user or service principal's PuID.
:param str user_tenant_id: A user or service principal's tenant ID.
"""
if user_alt_sec_id is not None:
pulumi.set(__self__, "user_alt_sec_id", user_alt_sec_id)
if user_idp is not None:
pulumi.set(__self__, "user_idp", user_idp)
if user_iss is not None:
pulumi.set(__self__, "user_iss", user_iss)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
if user_object_id is not None:
pulumi.set(__self__, "user_object_id", user_object_id)
if user_pu_id is not None:
pulumi.set(__self__, "user_pu_id", user_pu_id)
if user_tenant_id is not None:
pulumi.set(__self__, "user_tenant_id", user_tenant_id)
@property
@pulumi.getter(name="userAltSecId")
def user_alt_sec_id(self) -> Optional[str]:
"""
A user alternate sec id. This represents the user in a different identity provider system Eg.1:live.com:puid
"""
return pulumi.get(self, "user_alt_sec_id")
@property
@pulumi.getter(name="userIdp")
def user_idp(self) -> Optional[str]:
"""
A user identity provider. Eg live.com
"""
return pulumi.get(self, "user_idp")
@property
@pulumi.getter(name="userIss")
def user_iss(self) -> Optional[str]:
"""
The issuer which issued the token for this user.
"""
return pulumi.get(self, "user_iss")
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[str]:
"""
A user's full name or a service principal's app ID.
"""
return pulumi.get(self, "user_name")
@property
@pulumi.getter(name="userObjectId")
def user_object_id(self) -> Optional[str]:
"""
A user or service principal's object ID..
"""
return pulumi.get(self, "user_object_id")
@property
@pulumi.getter(name="userPuId")
def user_pu_id(self) -> Optional[str]:
"""
A user or service principal's PuID.
"""
return pulumi.get(self, "user_pu_id")
@property
@pulumi.getter(name="userTenantId")
def user_tenant_id(self) -> Optional[str]:
"""
A user or service principal's tenant ID.
"""
return pulumi.get(self, "user_tenant_id")
@pulumi.output_type
class VirtualMachineImageResponse(dict):
"""
Virtual Machine image for Windows AML Compute
"""
def __init__(__self__, *,
id: str):
"""
Virtual Machine image for Windows AML Compute
:param str id: Virtual Machine image path
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
Virtual Machine image path
"""
return pulumi.get(self, "id")
@pulumi.output_type
class VirtualMachineResponse(dict):
"""
A Machine Learning compute based on Azure Virtual Machines.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeType":
suggest = "compute_type"
elif key == "isAttachedCompute":
suggest = "is_attached_compute"
elif key == "provisioningErrors":
suggest = "provisioning_errors"
elif key == "provisioningState":
suggest = "provisioning_state"
elif key == "computeLocation":
suggest = "compute_location"
elif key == "resourceId":
suggest = "resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VirtualMachineResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: | |
<reponame>aguirguis/python-swiftclient<filename>test/unit/utils.py
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import sys
from requests import RequestException
from requests.structures import CaseInsensitiveDict
from time import sleep
import unittest
import mock
import six
import os
from six.moves import reload_module
from six.moves.urllib.parse import urlparse, ParseResult
from swiftclient import client as c
from swiftclient import shell as s
from swiftclient.utils import EMPTY_ETAG
def fake_get_auth_keystone(expected_os_options=None, exc=None,
storage_url='http://url/', token='<PASSWORD>',
**kwargs):
def fake_get_auth_keystone(auth_url,
user,
key,
actual_os_options, **actual_kwargs):
if exc:
raise exc('test')
# TODO: some way to require auth_url, user and key?
if expected_os_options:
for key, value in actual_os_options.items():
if value and value != expected_os_options.get(key):
return "", None
if 'required_kwargs' in kwargs:
for k, v in kwargs['required_kwargs'].items():
if v != actual_kwargs.get(k):
return "", None
if auth_url.startswith("https") and \
auth_url.endswith("invalid-certificate") and \
not actual_kwargs['insecure']:
from swiftclient import client as c
raise c.ClientException("invalid-certificate")
if auth_url.startswith("https") and \
auth_url.endswith("self-signed-certificate") and \
not actual_kwargs['insecure'] and \
actual_kwargs['cacert'] is None:
from swiftclient import client as c
raise c.ClientException("unverified-certificate")
if auth_url.startswith("https") and \
auth_url.endswith("client-certificate") and \
not (actual_kwargs['cert'] and actual_kwargs['cert_key']):
from swiftclient import client as c
raise c.ClientException("noclient-certificate")
return storage_url, token
return fake_get_auth_keystone
class StubResponse(object):
"""
Placeholder structure for use with fake_http_connect's code_iter to modify
response attributes (status, body, headers) on a per-request basis.
"""
def __init__(self, status=200, body='', headers=None):
self.status = status
self.body = body
self.headers = headers or {}
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.status,
self.body, self.headers)
def fake_http_connect(*code_iter, **kwargs):
"""
Generate a callable which yields a series of stubbed responses. Because
swiftclient will reuse an HTTP connection across pipelined requests it is
not always the case that this fake is used strictly for mocking an HTTP
connection, but rather each HTTP response (i.e. each call to requests
get_response).
"""
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None):
self.status_code = self.status = status
self.reason = 'Fake'
self.scheme = 'http'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.content = self.body = body
self.timestamp = timestamp
self.headers = headers or {}
self.request = None
self._closed = False
def getresponse(self):
if kwargs.get('raise_exc'):
raise Exception('test')
return self
def getheaders(self):
if self.headers:
return self.headers.items()
headers = {'content-length': str(len(self.body)),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'etag':
self.etag or '"%s"' % EMPTY_ETAG,
'x-works': 'yes',
'x-account-container-count': '12345'}
if not self.timestamp:
del headers['x-timestamp']
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
if 'slow' in kwargs:
headers['content-length'] = '4'
if 'headers' in kwargs:
headers.update(kwargs['headers'])
if 'auth_v1' in kwargs:
headers.update(
{'x-storage-url': 'storageURL',
'x-auth-token': '<PASSWORD>'})
return headers.items()
def read(self, amt=None):
if 'slow' in kwargs:
if self.sent < 4:
self.sent += 1
sleep(0.1)
return ' '
rv = self.body[:amt]
if amt is not None:
self.body = self.body[amt:]
else:
self.body = ''
return rv
def send(self, amt=None):
if 'slow' in kwargs:
if self.received < 4:
self.received += 1
sleep(0.1)
def getheader(self, name, default=None):
return dict(self.getheaders()).get(name.lower(), default)
def close(self):
self._closed = True
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
def connect(*args, **ckwargs):
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
if 'give_connect' in kwargs:
kwargs['give_connect'](*args, **ckwargs)
status = next(code_iter)
if isinstance(status, StubResponse):
fake_conn = FakeConn(status.status, body=status.body,
headers=status.headers)
else:
etag = next(etag_iter)
timestamp = next(timestamps_iter)
fake_conn = FakeConn(status, etag, body=kwargs.get('body', ''),
timestamp=timestamp)
if fake_conn.status <= 0:
raise RequestException()
return fake_conn
connect.code_iter = code_iter
return connect
class MockHttpTest(unittest.TestCase):
def setUp(self):
super(MockHttpTest, self).setUp()
self.fake_connect = None
self.request_log = []
# Capture output, since the test-runner stdout/stderr monkey-patching
# won't cover the references to sys.stdout/sys.stderr in
# swiftclient.multithreading
self.capture_output = CaptureOutput()
if 'SWIFTCLIENT_DEBUG' not in os.environ:
self.capture_output.__enter__()
self.addCleanup(self.capture_output.__exit__)
# since we're going to steal all stderr output globally; we should
# give the developer an escape hatch or risk scorn
def blowup_but_with_the_helpful(*args, **kwargs):
raise Exception(
"You tried to enter a debugger while stderr is "
"patched, you need to set SWIFTCLIENT_DEBUG=1 "
"and try again")
import pdb
pdb.set_trace = blowup_but_with_the_helpful
def fake_http_connection(*args, **kwargs):
self.validateMockedRequestsConsumed()
self.request_log = []
self.fake_connect = fake_http_connect(*args, **kwargs)
_orig_http_connection = c.http_connection
query_string = kwargs.get('query_string')
storage_url = kwargs.get('storage_url')
auth_token = kwargs.get('auth_token')
exc = kwargs.get('exc')
on_request = kwargs.get('on_request')
def wrapper(url, proxy=None, cacert=None, insecure=False,
cert=None, cert_key=None,
ssl_compression=True, timeout=None):
if storage_url:
self.assertEqual(storage_url, url)
parsed, _conn = _orig_http_connection(url, proxy=proxy)
class RequestsWrapper(object):
def close(self):
if hasattr(self, 'resp'):
self.resp.close()
conn = RequestsWrapper()
def request(method, path, *args, **kwargs):
try:
conn.resp = self.fake_connect()
except StopIteration:
self.fail('Unexpected %s request for %s' % (
method, path))
self.request_log.append((parsed, method, path, args,
kwargs, conn.resp))
conn.host = conn.resp.host
conn.resp.request = RequestsWrapper()
conn.resp.request.url = '%s://%s%s' % (
conn.resp.scheme, conn.resp.host, path)
conn.resp.has_been_read = False
_orig_read = conn.resp.read
def read(*args, **kwargs):
conn.resp.has_been_read = True
return _orig_read(*args, **kwargs)
conn.resp.read = read
if on_request:
status = on_request(method, path, *args, **kwargs)
conn.resp.status = status
if auth_token:
headers = args[1]
self.assertEqual(auth_token,
headers.get('X-Auth-Token'))
if query_string:
self.assertTrue(path.endswith('?' + query_string))
if path.endswith('invalid_cert') and not insecure:
from swiftclient import client as c
raise c.ClientException("invalid_certificate")
if exc:
raise exc
return conn.resp
def putrequest(path, data=None, headers=None, **kwargs):
request('PUT', path, data, headers, **kwargs)
conn.request = request
conn.putrequest = putrequest
def getresponse():
return conn.resp
conn.getresponse = getresponse
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def iter_request_log(self):
for parsed, method, path, args, kwargs, resp in self.request_log:
parts = parsed._asdict()
parts['path'] = path
full_path = ParseResult(**parts).geturl()
args = list(args)
log = dict(zip(('body', 'headers'), args))
log.update({
'method': method,
'full_path': full_path,
'parsed_path': urlparse(full_path),
'path': path,
'headers': CaseInsensitiveDict(log.get('headers')),
'resp': resp,
'status': resp.status,
})
yield log
orig_assertEqual = unittest.TestCase.assertEqual
def assert_request_equal(self, expected, real_request):
method, path = expected[:2]
if urlparse(path).scheme:
match_path = real_request['full_path']
else:
match_path = real_request['path']
self.assertEqual((method, path), (real_request['method'],
match_path))
if len(expected) > 2:
body = expected[2]
real_request['expected'] = body
err_msg = 'Body mismatch for %(method)s %(path)s, ' \
'expected %(expected)r, and got %(body)r' % real_request
self.orig_assertEqual(body, real_request['body'], err_msg)
if len(expected) > 3:
headers = CaseInsensitiveDict(expected[3])
for key, value in headers.items():
real_request['key'] = key
real_request['expected_value'] = value
real_request['value'] = real_request['headers'].get(key)
err_msg = (
'Header mismatch on %(key)r, '
'expected %(expected_value)r and got %(value)r '
'for %(method)s %(path)s %(headers)r' % real_request)
self.orig_assertEqual(value, real_request['value'],
err_msg)
real_request['extra_headers'] = dict(
(key, value) for key, value in real_request['headers'].items()
if key not in headers)
if real_request['extra_headers']:
self.fail('Received unexpected headers for %(method)s '
'%(path)s, got %(extra_headers)r' % real_request)
def assertRequests(self, expected_requests):
"""
Make sure some requests were made like you expected, provide a list of
expected requests, typically in the form of [(method, path), ...]
or [(method, path, body, headers), ...]
"""
real_requests = self.iter_request_log()
for expected in expected_requests:
real_request = next(real_requests)
self.assert_request_equal(expected, real_request)
try:
real_request = next(real_requests)
except StopIteration:
pass
else:
self.fail('At least one extra request received: %r' %
real_request)
def assert_request(self, expected_request):
"""
Make sure a request was made as expected. Provide the
expected request in the form of [(method, path), ...]
"""
real_requests = self.iter_request_log()
for real_request in real_requests:
try:
self.assert_request_equal(expected_request, real_request)
break
except AssertionError:
pass
else:
raise AssertionError(
"Expected request %s not found in actual requests %s"
% (expected_request, self.request_log)
)
def validateMockedRequestsConsumed(self):
if not self.fake_connect:
return
unused_responses = list(self.fake_connect.code_iter)
if unused_responses:
self.fail('Unused responses %r' % (unused_responses,))
def tearDown(self):
self.validateMockedRequestsConsumed()
super(MockHttpTest, self).tearDown()
# TODO: this nuke from orbit clean up seems to be encouraging
# un-hygienic mocking on the swiftclient.client module; which may lead
# to some unfortunate test order dependency bugs by way of the broken
# window theory if any other modules are similarly patched
reload_module(c)
class CaptureStreamPrinter(object):
"""
CaptureStreamPrinter is used for testing unicode writing for PY3. Anything
written here is encoded as | |
return G.internal_scaling_dimension(node)
def __connectivity_func(self, G, node):
return G.connectivity_dimension(node)
def __kcoreness_func(self, G, node, pre_dic):
return pre_dic[node]
def __triangles(self, big_graph, node):
#if 'triangles' in self.__params and len(self.__params['triangles']) == self.number_of_nodes():
# return self.__params['triangles'][node]
deg = self.degree(node)
clust = nx.clustering(big_graph, node)
return int( clust * deg * (deg-1) / 2 )
def show(self, mode=None):
if not mode:
nx.draw(self)
elif mode == 'random':
nx.draw_random(self)
elif mode == 'circular':
nx.draw_circular(self)
elif mode == 'spectral':
nx.draw_spectral(self)
plt.show()
def random_edges(self, k=1, data=False):
"""Choose k random edges uniformly from graph.
For undirected graphs this might produce duplicates
since each edge is considered twice, once for each
representation u-v and v-u. Duplicates can be removed by
using set(random_edges()).
Extracted from Eric Hagberg post:
http://groups.google.com/group/networkx-discuss/browse_thread/thread/a87dd6ca7063a778?pli=1
"""
return random_items(self.edges(data=data), k=k)
def random_nodes(self, k=1):
"""Choose k random nodes uniformly from graph.
"""
ret = []
use_random = True
for node, _ in self.get_parameter_cache_iter('degree', random=use_random):
ret.append( node )
return ret
#return random_items(self.nodes_iter(), k=k)
def lookahead_edges(self, nbunch, lookahead):
nbunch = [n for n in nbunch if n in self.nodes()]
edge_bunch_list = [self.edges(nbunch)]
for _ in range(lookahead - 1):
new_nodes = [d for _, d in edge_bunch_list[-1]]
edge_bunch_list.append(self.edges(new_nodes))
ret = set([])
for edge_set in edge_bunch_list:
ret = ret.union(edge_set)
return ret
def diameter(self):
if self.debug:
print 'INFO: computing graph diameter...'
dia = nx.diameter(self)
if self.debug:
print 'INFO: done computing graph diameter.'
return dia
def internal_scaling_dimension_iter(self, node, diameter=100):
return self.__dimension_iter(node, self.internal_scaling_growth_iter, diameter)
def internal_scaling_dimension(self, node, diameter=100):
return self.__dimension(node, self.internal_scaling_growth, diameter)
def __dimension_iter(self, node, growth_func, diameter=None):
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
growth = growth_func( node, diameter )
for g, l in izip(growth,range(diameter)):
if g == 0 or l <= 1:
yield -1.0
else:
yield log(g)/log(l)
def __dimension(self, node, growth_func, diameter=None):
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
growth = growth_func( node, diameter )
ret = []
for g, l in izip(growth,range(diameter)):
if g == 0 or l <= 1:
ret.append( -1.0 )
else:
ret.append( log(g)/log(l) )
return ret
def internal_scaling_growth_iter(self, node, diameter=None):
nodes = set([node])
visited_nodes = set([])
yield 1
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
prev = None
if diameter:
diameter -= 1
for _ in range( diameter ):
new_edges = self.edges(nodes)
visited_nodes.union( nodes )
new_nodes = set([])
for v, w in new_edges:
if not w in visited_nodes:
new_nodes.add( w )
if not v in visited_nodes:
new_nodes.add( v )
if not prev:
prev = len(visited_nodes) + len(new_nodes)
elif prev == len(visited_nodes) + len(new_nodes):
break
else:
prev = len(visited_nodes) + len(new_nodes)
if self.debug:
#print 'internal scaling growth (iter) : %d' % (len(visited_nodes) + len(new_nodes) )
pass
yield len(visited_nodes) + len(new_nodes)
nodes = new_nodes
def internal_scaling_growth(self, node, diameter=None):
nodes = set([node])
visited_nodes = set([])
ret = []
ret.append( 1 )
if not diameter:
if not self.cached_diameter:
self.cached_diameter = self.diameter()
diameter = self.cached_diameter
if (node,diameter) in self.cache_internal_growth:
if self.debug:
print 'INFO: using cached internal_growth'
return self.cache_internal_growth[(node,diameter)]
prev = None
for _ in range( diameter - 1 ):
new_edges = self.edges(nodes)
visited_nodes.union( nodes )
new_nodes = set([])
for v, w in new_edges:
if not w in visited_nodes:
new_nodes.add( w )
if not v in visited_nodes:
new_nodes.add( v )
if not prev:
prev = len(visited_nodes) + len(new_nodes)
elif prev == len(visited_nodes) + len(new_nodes):
break
else:
prev = len(visited_nodes) + len(new_nodes)
if self.debug:
#print 'internal scaling growth : %d' % (len(visited_nodes) + len(new_nodes) )
pass
ret.append( len(visited_nodes) + len(new_nodes) )
nodes = new_nodes
if self.debug:
print 'INFO: caching internal growth for node %s and diameter %d' % (str(node),diameter)
self.cache_internal_growth[(node,diameter)] = ret
return ret
def connectivity_dimension_iter(self, node, diameter=100):
return self.__dimension_iter(node, self.connectivity_growth_iter, diameter)
def connectivity_dimension(self, node, diameter=100):
return self.__dimension(node, self.connectivity_growth, diameter)
def connectivity_growth_iter(self, node, diameter=None):
internal_growth = self.internal_scaling_growth_iter(node, diameter)
prev = None
for i in internal_growth:
if not prev:
prev = i
else:
yield i - prev
yield 0
def connectivity_growth(self, node, diameter=None):
internal_growth = self.internal_scaling_growth(node, diameter)
prev = None
ret = []
for i in internal_growth:
if not prev:
prev = i
else:
ret.append( i - prev )
ret.append( 0 )
return ret
def compressed_by_degree_graph(self, use_big_alphabet=True):
orig_max_nodes_analysis = self.max_nodes_analysis
self.max_nodes_analysis = self.number_of_nodes()
encoding = zip(self.nodes_iter(), self.degrees_iter())
encoding.sort( lambda x, y: cmp(x[1],y[1]) )
encoding.reverse()
if use_big_alphabet:
base = Base()
base_enc = base.num2base
else:
base_enc = identity
encoding = dict( zip( [t[0] for t in encoding], range(len(encoding)) ) )
new_graph = Graph()
if self.debug:
print 'encoding nodes...'
for node in self.nodes_iter():
new_graph.add_node( base_enc( encoding[node] ) )
if self.debug:
print 'encoding edges...'
for v, w in self.edges_iter():
new_graph.add_edge( base_enc( encoding[v] ), base_enc( encoding[w] ), )
self.max_nodes_analysis = orig_max_nodes_analysis
return new_graph
def save_compressed_graph(self, outfile, use_big_alphabet=True):
g2 = self.compressed_by_degree_graph(use_big_alphabet)
output = StringIO()
g2.save_edgelist(output)
cont = output.getvalue()
output.close()
comp_cont = zlib.compress( cont, 9 )
enc_comp_cont = base64.b64encode( comp_cont )
if outfile == str(outfile):
outfile = open(outfile,'w')
outfile.write( "compressed_graph = '''\n%s\n'''" % enc_comp_cont )
def load_compressed_graph(self, module, use_big_alphabet=True, has_num=True):
enc_comp_cont = module.compressed_graph.strip()
comp_cont = base64.b64decode( enc_comp_cont )
cont = zlib.decompress(comp_cont)
self.load_edgelist(StringIO(cont), has_num, use_big_alphabet)
def bigger_component(self):
#if nx.is_connected(self):
# return self
graph = Graph()
graph.add_edges_from(nx.connected_component_subgraphs(self)[0].edges_iter())
return graph
def add_bigger_component_to(self, graph):
#if nx.is_connected(self):
# return self
print 'nx.connected_components(self) ...'
nx.connected_components(self)
graph.add_edges_from(self.edges_iter(nx.connected_components(self)[0]))
return graph
def connected_components(self):
return nx.connected_components(self)
def save_bigger_component(self, filename):
graph = self.bigger_component()
graph.save_edgelist(filename)
# Indexed parameters methods.
def check_parameter_name(self, parameter_name):
# check that the parameter name is ok
findings = re.findall('[a-z_]+[a-z_0-9]*', parameter_name)
if len(findings)==0 or len(findings[0]) != len(parameter_name):
raise GraphException('Error: bad parameter name, only [a-z_]+ allowed!')
def add_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
if not parameter_name in self.__params:
self.__params[parameter_name] = IndexedTable()
def has_parameter_cache(self, parameter_name):
return parameter_name in self.__params
def remove_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
del self.__params[parameter_name]
def index_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
pass
def remove_index_parameter_cache(self, parameter_name):
self.check_parameter_name(parameter_name)
pass
def check_float(self, value):
try:
return float(value)
except:
raise GraphException('Error: value %s is not a floating-point or equivalent number!' % str(value))
def insert_parameter_cache(self, param_name, node, value):
value = self.check_float(value)
self.__params[param_name][node] = value
def update_parameter_cache(self, param_name, node, value):
'''
'''
#self.check_parameter_name(param_name)
#self.check_node(node)
value = self.check_float(value)
if not self.has_node(node):
raise GraphException('Error: node %s not in BigGraph instance!' % str(node) )
self.__params[param_name][node] = value
def dec_parameter_cache(self, param_name, node):
old_val = self.__params[param_name][node]
self.__params[param_name][node] = (old_val - 1 > 0) and (old_val - 1) or 0
def inc_parameter_cache(self, param_name, node):
old_val = self.__params[param_name][node]
self.__params[param_name][node] = old_val + 1
def get_parameter_cache(self, param_name, node):
if node in self.__params[param_name]:
return self.__params[param_name][node]
else:
return None
def get_max_value_parameter_cache(self, param_name):
raise GraphException('Not implemmented!')
def get_sum_value_parameter_cache(self, param_name):
print 'summing table ...'
ret = 0
indexed_table = self.__params[param_name]
for item in indexed_table.items():
#print 'item', item
#print 'indexed_table.preimage_size(item)', indexed_table.preimage_size(item)
ret += indexed_table.preimage_size(item) * item
#print 'unseen_triangles', ret / 3
#print 'total_triangles', self.total_triangles()
#print '-'*50
print 'end summing table ...'
return ret
def get_parameter_cache_inverse(self, param_name, value):
for node in self.__params[param_name].preimage(value):
yield node
def get_parameter_cache_inverse_between(self, param_name, lower, upper):
raise GraphException('Not implemmented!')
def get_parameter_cache_inverse_count(self, param_name, value):
return self.__params[param_name].preimage_size(value)
def get_parameter_cache_iter(self, param_name, random=False, ascending=False):
self.check_parameter_name(param_name)
for node, value in self.__params[param_name].iterate(random, ascending):
yield node, value
def create_indices(self):
pass
def create_index_degree(self):
self.index_parameter_generic('degree', self.degrees_iter)
def create_index_unseen_degree(self):
self.index_parameter_generic('unseen_degree', self.degrees_iter)
def remove_degree_cache(self):
self.remove_parameter_cache('degree')
def create_index_clustering(self):
self.index_parameter_generic('clustering', self.clustering_indices_iter)
def create_index_triangles(self):
self.index_parameter_generic('triangles', self.triangles_iter)
def create_index_knn(self):
self.index_parameter_generic('knn', self.average_neighbor_degrees_iter)
def create_index_kcores(self):
self.index_parameter_generic('shell', self.kcoreness_iter)
def | |
<reponame>ellenjkr/LattesXML2PDF
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Pt
from docx.shared import RGBColor
class WordFile():
def __init__(self, resume):
super(WordFile, self).__init__()
self.presentation = resume.presentation
self.abstract = resume.abstract
self.identification = resume.identification
self.address = resume.address
self.academic_titles = resume.academic_titles
self.complementary_courses = resume.complementary_courses
self.professional_activities_list = resume.professional_activities_list
self.lines_of_research = resume.lines_of_research
self.projects_dict = resume.projects_dict
self.other_professional_activities_dict = resume.other_professional_activities_dict
self.areas_of_expertise = resume.areas_of_expertise
self.languages = resume.languages
self.awards = resume.awards
self.bibliographic_productions_dict = resume.bibliographic_productions_dict
self.technical_productions_dict = resume.technical_productions_dict
self.orientations = resume.orientations
self.document = Document()
self.define_style()
self.document.add_heading("Apresentação", 0) # Add a section
self.add_presentation()
self.add_abstract()
self.add_identification()
self.add_address()
self.add_academic_titles()
self.add_complementary_courses()
self.document.add_heading("Atuação Profissional", 0) # Add a section
self.add_professional_activities()
self.document.add_heading("Linhas de pesquisa", 0) # Add a section
self.add_lines_of_research()
self.add_projects()
self.add_other_professional_activities()
self.document.add_heading("Áreas de atuação", 0) # Add a section
self.add_numbered_table(self.areas_of_expertise)
self.document.add_heading("Idiomas", 0) # Add a section
self.add_languages()
self.document.add_heading("Prêmios e títulos", 0) # Add a section
self.add_awards()
self.document.add_heading("Produções", 0) # Add a section
self.add_productions(self.bibliographic_productions_dict, "Produção bibliográfica")
self.add_productions(self.technical_productions_dict, "Produção técnica")
self.document.add_heading("Orientações", 0) # Add a section
self.add_orientations()
def define_style(self):
style = self.document.styles['Normal']
font = style.font
font.name = 'Arial'
font.size = Pt(10)
def add_presentation(self):
self.document.add_heading(self.presentation[0], 1) # Add the name as a title
for item in self.presentation[1:]: # Add the other items
paragraph = self.document.add_paragraph(item)
# Format paragraph
paragraph_format = paragraph.paragraph_format
paragraph_format.space_before = Pt(4)
paragraph_format.space_after = Pt(4)
def add_abstract(self):
self.document.add_heading("Resumo", 1) # Add "Resumo" as a title
self.abstract = self.abstract[0].upper() + self.abstract[1:] # First letter uppercased
paragraph = self.document.add_paragraph(self.abstract)
# Format paragraph
paragraph_format = paragraph.paragraph_format
paragraph_format.line_spacing = Pt(15)
paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
def add_identification(self):
self.document.add_heading("Identificação", 1) # Add "Identificação" as a title
table = self.document.add_table(rows=0, cols=2)
for key, value in self.identification.items():
row_cells = table.add_row().cells
row_cells[0].text = key
row_cells[1].text = value
def add_address(self):
self.document.add_heading("Endereço", 1) # Add "Endereço" as a title
table = self.document.add_table(rows=0, cols=2)
row_cells = table.add_row().cells
row_cells[0].text = "Endereço Profissional"
row_cells[1].text = self.address[0]
for value in self.address[1:]:
row_cells = table.add_row().cells
row_cells[1].text = value
def add_academic_titles(self):
self.document.add_heading("Formação acadêmica/titulação", 1) # Add "Formação acadêmica/titulação" as a title
table = self.document.add_table(rows=0, cols=2) # Create table
for pos, academic_title in enumerate(self.academic_titles['academic_title']):
row_cells = table.add_row().cells # Get cells from row
row_cells[0].width = Pt(80) # Make the first cell smaller
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph.add_run(self.academic_titles['year_range'][pos]).bold = True # Add a number for each research and make it bold
run = paragraph.runs[0]
font = run.font
font.color.rgb = RGBColor.from_string('0b306b')
row_cells[1].width = Pt(480) # Make the second cell bigger
academic_title_paragraph = row_cells[1].paragraphs[0] # Get the cell first paragraph
academic_title_paragraph.text = academic_title # Add the academic title to the first paragraph
institution_paragraph = row_cells[1].add_paragraph() # Add a second paragraph
institution_paragraph.text = self.academic_titles['institution'][pos] # Add the institution to the paragraph
if self.academic_titles['project_title'][pos] != "": # If it has keywords
project_title_paragraph = row_cells[1].add_paragraph() # Add the project title paragraph
project_title_paragraph.text = self.academic_titles['project_title'][pos] # Add the project title content
if self.academic_titles['advisor'][pos] != "": # If it has keywords
advisor_paragraph = row_cells[1].add_paragraph() # Add the advisor paragraph
advisor_paragraph.text = self.academic_titles['advisor'][pos] # Add the advisor content
if self.academic_titles['scholarship'][pos] != "": # If it has keywords
scholarship_paragraph = row_cells[1].add_paragraph() # Add the scholarship paragraph
scholarship_paragraph.text = self.academic_titles['scholarship'][pos] # Add the scholarship content
if self.academic_titles['key_words'][pos] != "": # If it has keywords
keywords_paragraph = row_cells[1].add_paragraph() # Add the keywords paragraph
keywords_paragraph.text = self.academic_titles['key_words'][pos] # Add the keywords content
def add_complementary_courses(self):
self.document.add_heading("Formação Complementar", 1) # Add "Formação acadêmica/titulação" as a title
table = self.document.add_table(rows=0, cols=2) # Create table
for pos, course in enumerate(self.complementary_courses['course_name']):
row_cells = table.add_row().cells # Get cells from row
row_cells[0].width = Pt(80) # Make the first cell smaller
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph.add_run(self.complementary_courses['year_range'][pos]).bold = True # Add a number for each research and make it bold
run = paragraph.runs[0]
font = run.font
font.color.rgb = RGBColor.from_string('0b306b')
row_cells[1].width = Pt(480) # Make the second cell bigger
course_paragraph = row_cells[1].paragraphs[0] # Get the cell first paragraph
course_paragraph.text = course # Add the academic title to the first paragraph
institution_paragraph = row_cells[1].add_paragraph() # Add a second paragraph
institution_paragraph.text = self.complementary_courses['institution'][pos] # Add the institution to the paragraph
def set_cell_background(self, cell, fill, color=None, val=None):
from docx.oxml.shared import qn # feel free to move these out
from docx.oxml.xmlchemy import OxmlElement
cell_properties = cell._element.tcPr
try:
cell_shading = cell_properties.xpath('w:shd')[0] # in case there's already shading
except IndexError:
cell_shading = OxmlElement('w:shd') # add new w:shd element to it
if fill:
cell_shading.set(qn('w:fill'), fill) # set fill property, respecting namespace
if color:
pass # TODO
if val:
pass # TODO
cell_properties.append(cell_shading) # finally extend cell props with shading element
def add_subsection(self, subsection):
table = self.document.add_table(rows=0, cols=1) # Create table row
row_cells = table.add_row().cells # Get cells from first row
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph_format = paragraph.paragraph_format # Get the paragraph format
# Adjust space before and after
paragraph_format.space_before = Pt(3)
paragraph_format.space_after = Pt(3)
# Add a new run to the paragraph, make the text bold and white and change the size and the background color
paragraph.add_run(subsection).bold = True # Add a number for each publication and make it bold
run = paragraph.runs[0]
font = run.font
font.size = Pt(13)
font.color.rgb = RGBColor.from_string('FFFFFF')
self.set_cell_background(row_cells[0], '0b306b')
def add_professional_activities(self):
for item in self.professional_activities_list:
self.add_subsection(item['institution'])
bonds_paragraph = self.document.add_paragraph()
bonds_paragraph.add_run('Vínculo institucional').bold = True # Add the year range for each bond and make it bold
run = bonds_paragraph.runs[0]
font = run.font
font.size = Pt(12)
# Format paragraph
bonds_paragraph_format = bonds_paragraph.paragraph_format
bonds_paragraph_format.space_before = Pt(4)
bonds_paragraph_format.space_after = Pt(4)
table = self.document.add_table(rows=0, cols=2) # Create table
for bond in item['Vínculo institucional']:
row_cells = table.add_row().cells # Get cells from row
row_cells[0].width = Pt(120) # Make the first cell smaller
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph.add_run(bond['year_range']).bold = True # Add the year range for each bond and make it bold
run = paragraph.runs[0]
font = run.font
font.color.rgb = RGBColor.from_string('0b306b')
row_cells[1].width = Pt(440) # Make the second cell bigger
bond_paragraph = row_cells[1].paragraphs[0] # Get the cell first paragraph
bond_paragraph.text = bond['content'] # Add the bond content to the paragraph
if len(bond['content']) >= 70:
bond_paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
if 'second_line' in bond.keys():
row_cells = table.add_row().cells # Get cells from row
row_cells[0].width = Pt(120) # Make the first cell smaller
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph.add_run(bond['second_line']).bold = True # Add the year range for each bond and make it bold
run = paragraph.runs[0]
font = run.font
font.color.rgb = RGBColor.from_string('0b306b')
row_cells[1].width = Pt(440) # Make the second cell bigger
second_line_paragraph = row_cells[1].paragraphs[0] # Get the cell first paragraph
second_line_paragraph.text = bond['second_line_content'] # Add the bond content to the paragraph
if len(bond['second_line_content']) >= 70:
second_line_paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
if item['Atividades'] is not None:
activities_paragraph = self.document.add_paragraph()
activities_paragraph.add_run('Atividades').bold = True # Add the year range for each bond and make it bold
run = activities_paragraph.runs[0]
font = run.font
font.size = Pt(12)
activities_paragraph_format = activities_paragraph.paragraph_format
activities_paragraph_format.space_before = Pt(4)
activities_paragraph_format.space_after = Pt(4)
table2 = self.document.add_table(rows=0, cols=2) # Create table
for activity in item['Atividades']:
row_cells2 = table2.add_row().cells # Get cells from row
row_cells2[0].width = Pt(120) # Make the first cell smaller
paragraph2 = row_cells2[0].paragraphs[0] # Get the paragraph
paragraph2.add_run(activity['year_range']).bold = True # Add the year range for each activity and make it bold
run2 = paragraph2.runs[0]
font2 = run2.font
font2.color.rgb = RGBColor.from_string('0b306b')
row_cells2[1].width = Pt(440) # Make the second cell bigger
activity_paragraph = row_cells2[1].paragraphs[0] # Get the cell first paragraph
activity_paragraph.text = activity['content'] # Add the activity content to the paragraph
def add_lines_of_research(self):
table = self.document.add_table(rows=0, cols=2) # Create table
for pos, research in enumerate(self.lines_of_research['title']):
row_cells = table.add_row().cells # Get cells from row
row_cells[0].width = 5 # Make the first cell smaller
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph.add_run(str(pos + 1)).bold = True # Add a number for each research and make it bold
run = paragraph.runs[0]
font = run.font
font.color.rgb = RGBColor.from_string('0b306b')
row_cells[1].width = Pt(500) # Make the second cell bigger
title_paragraph = row_cells[1].paragraphs[0] # Get the cell first paragraph
title_paragraph.text = research # Add the title to the first paragraph
goals_paragraph = row_cells[1].add_paragraph() # Add a second paragraph
goals_paragraph.text = self.lines_of_research['goals'][pos] # Add the goals to the paragraph
goals_paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Justify the paragraph
if self.lines_of_research['key_words'][pos] != "": # If it has keywords
keywords_paragraph = row_cells[1].add_paragraph() # Add the keywords paragraph
keywords_paragraph.text = self.lines_of_research['key_words'][pos] # Add the keywords to the paragraph
def add_projects(self):
for project_nature in self.projects_dict.keys():
self.document.add_heading(project_nature, 0) # Add a section
projects_list = self.projects_dict[project_nature]
table = self.document.add_table(rows=0, cols=2) # Create table
for pos, research in enumerate(projects_list['title']):
row_cells = table.add_row().cells # Get cells from row
row_cells[0].width = Pt(80) # Make the first cell smaller
paragraph = row_cells[0].paragraphs[0] # Get the paragraph
paragraph.add_run(projects_list['year_range'][pos]).bold = True # Add a number for each research and make it bold
run = paragraph.runs[0]
font = run.font
font.color.rgb = RGBColor.from_string('0b306b')
row_cells[1].width = Pt(480) # Make the second cell bigger
title_paragraph = row_cells[1].paragraphs[0] # Get the cell first paragraph
title_paragraph.text = research # Add the title to the first paragraph
description_paragraph = row_cells[1].add_paragraph() # Add a description paragraph
description_paragraph.text = projects_list['description'][pos] # Add the description paragraph content
description_paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Justify the paragraph
# Format description paragraph
paragraph_format = description_paragraph.paragraph_format
paragraph_format.space_after = Pt(2)
sit_nat_paragraph = row_cells[1].add_paragraph() # Add a situation/nature paragraph
sit_nat_paragraph.text = projects_list['situation/nature'][pos] # Add the situation/nature paragraph content
# Format situation/nature paragraph
paragraph_format = sit_nat_paragraph.paragraph_format
paragraph_format.space_before = Pt(0)
paragraph_format.space_after = Pt(2)
students_paragraph = row_cells[1].add_paragraph() # Add a students paragraph
students_paragraph.text = projects_list['students'][pos] # Add the students paragraph content
students_paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Justify the paragraph
members_paragraph = row_cells[1].add_paragraph() # Add a members paragraph
members_paragraph.text = projects_list['members'][pos] # Add the members paragraph content
if len(projects_list['members'][pos]) >= 70:
members_paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Justify the paragraph
# Format members paragraph
paragraph_format = members_paragraph.paragraph_format
paragraph_format.space_after = Pt(2)
if projects_list['financiers'][pos] is not None: # If there's financiers
financiers_paragraph = row_cells[1].add_paragraph() # Add a financiers paragraph
financiers_paragraph.text = | |
# a string consisting of characters that are valid identifiers in both
# Python 2 and Python 3
import string
valid_ident = string.ascii_letters + string.digits + "_"
def str_to_identifier(s):
"""Convert a "bytes" to a valid (in Python 2 and 3) identifier."""
# convert str/bytes to unicode string
s = s.decode()
def filter_chars(s):
for c in s:
# periods are used for abbreviations and look ugly when converted
# to underscore, so filter them out completely
if c == ".":
yield ""
elif c in valid_ident or c == "_":
yield c
else:
yield "_"
if s[0] in string.digits:
s = "_"+s
return ''.join(filter_chars(s))
class Param(object):
"""A UI parameter object.
This objects represents a FAUST UI input. It makes sure to enforce the
constraints specified by the minimum, maximum and step size.
This object implements the descriptor protocol: reading it works just like
normal objects, but assignment is redirects to its "zone" attribute.
"""
def __init__(self, label, zone, init, min, max, step, param_type):
"""Initialise a Param object.
Parameters:
-----------
label : str
The full label as specified in the FAUST DSP file.
zone : cffi.CData
Points to the FAUSTFLOAT object inside the DSP C object.
init : float
The initialisation value.
min : float
The minimum allowed value.
max : float
The maximum allowed value.
step : float
The step size of the parameter.
param_type : str
The parameter type (e.g., HorizontalSlider)
"""
# NOTE: _zone is a CData holding a float*
self.label = label
self._zone = zone
self._zone[0] = init
self.min = min
self.max = max
self.step = step
self.type = param_type
# extra attributes
self.default = init
self.metadata = {}
self.__doc__ = "min={0}, max={1}, step={2}".format(min, max, step)
def __zone_getter(self):
return self._zone[0]
def __zone_setter(self, x):
if x >= self.max:
self._zone[0] = self.max
elif x <= self.min:
self._zone[0] = self.min
else:
self._zone[0] = self.min + round((x-self.min)/self.step)*self.step
zone = property(fget=__zone_getter, fset=__zone_setter,
doc="Pointer to the value of the parameter.")
def __set__(self, obj, value):
self.zone = value
class Box(object):
def __init__(self, label, layout):
self.label = label
self.layout = layout
self.metadata = {}
def __setattr__(self, name, value):
if name in self.__dict__ and hasattr(self.__dict__[name], "__set__"):
self.__dict__[name].__set__(self, value)
else:
object.__setattr__(self, name, value)
# TODO: implement the *Display() and *Bargraph() methods
class PythonUI(object):
"""
Maps the UI elements of a FAUST DSP to attributes of another object,
specifically a FAUST wrapper object.
In FAUST, UI's are specified by the DSP object, which calls methods of a UI
object to create them. The PythonUI class implements such a UI object. It
creates C callbacks to its methods and stores then in a UI struct, which
can then be passed to the buildUserInterface() function of a FAUST DSP
object.
The DSP object basically calls the methods of the PythonUI class from C via
the callbacks in the UI struct and thus creates a hierarchical namespace of
attributes which map back to the DSP's UI elements.
Notes:
------
Box and Param attributes are prefixed with "b_" and "p_", respectively, in
order to differentiate them from each other and from regular attributes.
Boxes and parameters without a label are given a default name of "anon<N>",
where N is an integer (e.g., "p_anon1" for a label-less parameter).
See also:
---------
FAUSTPy.Param - wraps the UI input parameters.
"""
def __init__(self, ffi, obj=None):
"""
Initialise a PythonUI object.
Parameters:
-----------
ffi : cffi.FFI
The CFFI instance that holds all the data type declarations.
obj : object (optional)
The Python object to which the UI elements are to be added. If
None (the default) the PythonUI instance manipulates itself.
"""
if obj:
self.__boxes = [obj]
else:
self.__boxes = [self]
self.__num_anon_boxes = [0]
self.__num_anon_params = [0]
self.__metadata = [{}]
self.__group_metadata = {}
# define C callbacks that know the global PythonUI object
@ffi.callback("void(void*, FAUSTFLOAT*, char*, char*)")
def declare(mInterface, zone, key, value):
self.declare(zone, ffi.string(key), ffi.string(value))
@ffi.callback("void(void*, char*)")
def openVerticalBox(mInterface, label):
self.openVerticalBox(ffi.string(label))
@ffi.callback("void(void*, char*)")
def openHorizontalBox(mInterface, label):
self.openHorizontalBox(ffi.string(label))
@ffi.callback("void(void*, char*)")
def openTabBox(mInterface, label):
self.openTabBox(ffi.string(label))
@ffi.callback("void(void*)")
def closeBox(mInterface):
self.closeBox()
@ffi.callback("void(void*, char*, FAUSTFLOAT*, FAUSTFLOAT, FAUSTFLOAT, FAUSTFLOAT, FAUSTFLOAT)")
def addHorizontalSlider(ignore, c_label, zone, init, min, max, step):
label = ffi.string(c_label)
self.addHorizontalSlider(label, zone, init, min, max, step)
@ffi.callback("void(void*, char*, FAUSTFLOAT*, FAUSTFLOAT, FAUSTFLOAT, FAUSTFLOAT, FAUSTFLOAT)")
def addVerticalSlider(ignore, c_label, zone, init, min, max, step):
label = ffi.string(c_label)
self.addVerticalSlider(label, zone, init, min, max, step)
@ffi.callback("void(void*, char*, FAUSTFLOAT*, FAUSTFLOAT, FAUSTFLOAT, FAUSTFLOAT, FAUSTFLOAT)")
def addNumEntry(ignore, c_label, zone, init, min, max, step):
label = ffi.string(c_label)
self.addNumEntry(label, zone, init, min, max, step)
@ffi.callback("void(void*, char*, FAUSTFLOAT*)")
def addButton(ignore, c_label, zone):
self.addButton(ffi.string(c_label), zone)
@ffi.callback("void(void*, char*, FAUSTFLOAT*)")
def addToggleButton(ignore, c_label, zone):
self.addToggleButton(ffi.string(c_label), zone)
@ffi.callback("void(void*, char*, FAUSTFLOAT*)")
def addCheckButton(ignore, c_label, zone):
self.addCheckButton(ffi.string(c_label), zone)
@ffi.callback("void(void*, char*, FAUSTFLOAT*, int)")
def addNumDisplay(ignore, c_label, zone, p):
self.addNumDisplay(ffi.string(c_label), zone, p)
@ffi.callback("void(void*, char*, FAUSTFLOAT*, char*[], FAUSTFLOAT, FAUSTFLOAT)")
def addTextDisplay(ignore, c_label, zone, names, min, max):
self.addTextDisplay(ffi.string(c_label), zone, names, min, max)
@ffi.callback("void(void*, char*, FAUSTFLOAT*, FAUSTFLOAT, FAUSTFLOAT)")
def addHorizontalBargraph(ignore, c_label, zone, min, max):
label = ffi.string(c_label)
self.addHorizontalBargraph(label, zone, min, max)
@ffi.callback("void(void*, char*, FAUSTFLOAT*, FAUSTFLOAT, FAUSTFLOAT)")
def addVerticalBargraph(ignore, c_label, zone, min, max):
label = ffi.string(c_label)
self.addVerticalBargraph(label, zone, min, max)
# create a UI object and store the above callbacks as it's function
# pointers; also store the above functions in self so that they don't
# get garbage collected
ui = ffi.new("UIGlue*")
ui.declare = self.__declare_c = declare
ui.openVerticalBox = self.__openVerticalBox_c = openVerticalBox
ui.openHorizontalBox = self.__openHorizontalBox_c = openHorizontalBox
ui.openTabBox = self.__openTabBox_c = openTabBox
ui.closeBox = self.__closeBox_c = closeBox
ui.addHorizontalSlider = self.__addHorizontalSlider_c = addHorizontalSlider
ui.addVerticalSlider = self.__addVerticalSlider_c = addVerticalSlider
ui.addNumEntry = self.__addNumEntry_c = addNumEntry
ui.addButton = self.__addButton_c = addButton
ui.addToggleButton = self.__addToggleButton_c = addToggleButton
ui.addCheckButton = self.__addCheckButton_c = addCheckButton
ui.addNumDisplay = self.__addNumDisplay_c = addNumDisplay
ui.addTextDisplay = self.__addTextDisplay_c = addTextDisplay
ui.addHorizontalBargraph = self.__addHorizontalBargraph_c = addHorizontalBargraph
ui.addVerticalBargraph = self.__addVerticalBargraph_c = addVerticalBargraph
ui.uiInterface = ffi.NULL # we don't use this anyway
self.__ui = ui
self.__ffi = ffi
ui = property(fget=lambda x: x.__ui,
doc="The UI struct that calls back to its parent object.")
def declare(self, zone, key, value):
if zone == self.__ffi.NULL:
# set group meta-data
#
# the group meta-data is stored temporarily here and is set during
# the next openBox()
self.__group_metadata[key] = value
else:
# store parameter meta-data
#
# since the only identifier we get is the zone (pointer to the
# control value), we have to store this for now and assign it to
# the corresponding parameter later in closeBox()
if zone not in self.__metadata[-1]:
self.__metadata[-1][zone] = {}
self.__metadata[-1][zone][key] = value
##########################
# stuff to do with boxes
##########################
def openBox(self, label, layout):
# If the label is an empty string, don't do anything, just stay in the
# current Box
if label:
# special case the first box, which is always "0x00" (the ASCII
# Null character), so that it has a consistent name
if label.decode() == '0x00':
sane_label = "ui"
else:
sane_label = "b_"+str_to_identifier(label)
else:
# if the label is empty, create a default label
self.__num_anon_boxes[-1] += 1
sane_label = "b_anon" + str(self.__num_anon_boxes[-1])
# create a new sub-Box and make it a child of the current Box
box = Box(label, layout)
setattr(self.__boxes[-1], sane_label, box)
self.__boxes.append(box)
# store the group meta-data in the newly opened box and reset
# self.__group_metadata
self.__boxes[-1].metadata.update(self.__group_metadata)
self.__group_metadata = {}
self.__num_anon_boxes.append(0)
self.__num_anon_params.append(0)
self.__metadata.append({})
def openVerticalBox(self, label):
self.openBox(label, "vertical")
def openHorizontalBox(self, label):
self.openBox(label, "horizontal")
def openTabBox(self, label):
self.openBox(label, "tab")
def closeBox(self):
cur_metadata = self.__metadata.pop()
# iterate over the objects in the current box and assign the meta-data
# to the correct parameters
for p in self.__boxes[-1].__dict__.values():
# TODO: add the Display class (or whatever it will be called) to
# this list once *Display and *Bargraph are implemented
if type(p) not in (Param,):
continue
# iterate over the meta-data that has accumulated in the current
# box and assign it to its corresponding Param objects
for zone, mdata in cur_metadata.items():
if p._zone == zone:
p.metadata.update(mdata)
self.__num_anon_boxes.pop()
self.__num_anon_params.pop()
# now pop the box off the stack
self.__boxes.pop()
##########################
# stuff to do with inputs
##########################
def add_input(self, label, zone, | |
GLfloat, GLfloat) # GL/glext.h:5993
PFNGLPROGRAMPARAMETER4FVNVPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:5994
PFNGLPROGRAMPARAMETERS4DVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, POINTER(GLdouble)) # GL/glext.h:5995
PFNGLPROGRAMPARAMETERS4FVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, POINTER(GLfloat)) # GL/glext.h:5996
PFNGLREQUESTRESIDENTPROGRAMSNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:5997
PFNGLTRACKMATRIXNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, GLenum) # GL/glext.h:5998
PFNGLVERTEXATTRIBPOINTERNVPROC = CFUNCTYPE(None, GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5999
PFNGLVERTEXATTRIB1DNVPROC = CFUNCTYPE(None, GLuint, GLdouble) # GL/glext.h:6000
PFNGLVERTEXATTRIB1DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:6001
PFNGLVERTEXATTRIB1FNVPROC = CFUNCTYPE(None, GLuint, GLfloat) # GL/glext.h:6002
PFNGLVERTEXATTRIB1FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:6003
PFNGLVERTEXATTRIB1SNVPROC = CFUNCTYPE(None, GLuint, GLshort) # GL/glext.h:6004
PFNGLVERTEXATTRIB1SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:6005
PFNGLVERTEXATTRIB2DNVPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble) # GL/glext.h:6006
PFNGLVERTEXATTRIB2DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:6007
PFNGLVERTEXATTRIB2FNVPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat) # GL/glext.h:6008
PFNGLVERTEXATTRIB2FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:6009
PFNGLVERTEXATTRIB2SNVPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort) # GL/glext.h:6010
PFNGLVERTEXATTRIB2SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:6011
PFNGLVERTEXATTRIB3DNVPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble) # GL/glext.h:6012
PFNGLVERTEXATTRIB3DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:6013
PFNGLVERTEXATTRIB3FNVPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat) # GL/glext.h:6014
PFNGLVERTEXATTRIB3FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:6015
PFNGLVERTEXATTRIB3SNVPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort) # GL/glext.h:6016
PFNGLVERTEXATTRIB3SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:6017
PFNGLVERTEXATTRIB4DNVPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:6018
PFNGLVERTEXATTRIB4DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:6019
PFNGLVERTEXATTRIB4FNVPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:6020
PFNGLVERTEXATTRIB4FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:6021
PFNGLVERTEXATTRIB4SNVPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:6022
PFNGLVERTEXATTRIB4SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:6023
PFNGLVERTEXATTRIB4UBNVPROC = CFUNCTYPE(None, GLuint, GLubyte, GLubyte, GLubyte, GLubyte) # GL/glext.h:6024
PFNGLVERTEXATTRIB4UBVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:6025
PFNGLVERTEXATTRIBS1DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:6026
PFNGLVERTEXATTRIBS1FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:6027
PFNGLVERTEXATTRIBS1SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:6028
PFNGLVERTEXATTRIBS2DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:6029
PFNGLVERTEXATTRIBS2FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:6030
PFNGLVERTEXATTRIBS2SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:6031
PFNGLVERTEXATTRIBS3DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:6032
PFNGLVERTEXATTRIBS3FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:6033
PFNGLVERTEXATTRIBS3SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:6034
PFNGLVERTEXATTRIBS4DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:6035
PFNGLVERTEXATTRIBS4FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:6036
PFNGLVERTEXATTRIBS4SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:6037
PFNGLVERTEXATTRIBS4UBVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte)) # GL/glext.h:6038
# SGIX_texture_coordinate_clamp (GL/glext.h:6041)
GL_SGIX_texture_coordinate_clamp = 1 # GL/glext.h:6042
# SGIX_scalebias_hint (GL/glext.h:6045)
GL_SGIX_scalebias_hint = 1 # GL/glext.h:6046
# OML_interlace (GL/glext.h:6049)
GL_OML_interlace = 1 # GL/glext.h:6050
# OML_subsample (GL/glext.h:6053)
GL_OML_subsample = 1 # GL/glext.h:6054
# OML_resample (GL/glext.h:6057)
GL_OML_resample = 1 # GL/glext.h:6058
# NV_copy_depth_to_color (GL/glext.h:6061)
GL_NV_copy_depth_to_color = 1 # GL/glext.h:6062
# ATI_envmap_bumpmap (GL/glext.h:6065)
GL_ATI_envmap_bumpmap = 1 # GL/glext.h:6066
# GL/glext.h:6068
glTexBumpParameterivATI = _link_function('glTexBumpParameterivATI', None, [GLenum, POINTER(GLint)], 'ATI_envmap_bumpmap')
# GL/glext.h:6069
glTexBumpParameterfvATI = _link_function('glTexBumpParameterfvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_envmap_bumpmap')
# GL/glext.h:6070
glGetTexBumpParameterivATI = _link_function('glGetTexBumpParameterivATI', None, [GLenum, POINTER(GLint)], 'ATI_envmap_bumpmap')
# GL/glext.h:6071
glGetTexBumpParameterfvATI = _link_function('glGetTexBumpParameterfvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_envmap_bumpmap')
PFNGLTEXBUMPPARAMETERIVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:6073
PFNGLTEXBUMPPARAMETERFVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:6074
PFNGLGETTEXBUMPPARAMETERIVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:6075
PFNGLGETTEXBUMPPARAMETERFVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:6076
# ATI_fragment_shader (GL/glext.h:6079)
GL_ATI_fragment_shader = 1 # GL/glext.h:6080
# GL/glext.h:6082
glGenFragmentShadersATI = _link_function('glGenFragmentShadersATI', GLuint, [GLuint], 'ATI_fragment_shader')
# GL/glext.h:6083
glBindFragmentShaderATI = _link_function('glBindFragmentShaderATI', None, [GLuint], 'ATI_fragment_shader')
# GL/glext.h:6084
glDeleteFragmentShaderATI = _link_function('glDeleteFragmentShaderATI', None, [GLuint], 'ATI_fragment_shader')
# GL/glext.h:6085
glBeginFragmentShaderATI = _link_function('glBeginFragmentShaderATI', None, [], 'ATI_fragment_shader')
# GL/glext.h:6086
glEndFragmentShaderATI = _link_function('glEndFragmentShaderATI', None, [], 'ATI_fragment_shader')
# GL/glext.h:6087
glPassTexCoordATI = _link_function('glPassTexCoordATI', None, [GLuint, GLuint, GLenum], 'ATI_fragment_shader')
# GL/glext.h:6088
glSampleMapATI = _link_function('glSampleMapATI', None, [GLuint, GLuint, GLenum], 'ATI_fragment_shader')
# GL/glext.h:6089
glColorFragmentOp1ATI = _link_function('glColorFragmentOp1ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:6090
glColorFragmentOp2ATI = _link_function('glColorFragmentOp2ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:6091
glColorFragmentOp3ATI = _link_function('glColorFragmentOp3ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:6092
glAlphaFragmentOp1ATI = _link_function('glAlphaFragmentOp1ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:6093
glAlphaFragmentOp2ATI = _link_function('glAlphaFragmentOp2ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:6094
glAlphaFragmentOp3ATI = _link_function('glAlphaFragmentOp3ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:6095
glSetFragmentShaderConstantATI = _link_function('glSetFragmentShaderConstantATI', None, [GLuint, POINTER(GLfloat)], 'ATI_fragment_shader')
PFNGLGENFRAGMENTSHADERSATIPROC = CFUNCTYPE(GLuint, GLuint) # GL/glext.h:6097
PFNGLBINDFRAGMENTSHADERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6098
PFNGLDELETEFRAGMENTSHADERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6099
PFNGLBEGINFRAGMENTSHADERATIPROC = CFUNCTYPE(None) # GL/glext.h:6100
PFNGLENDFRAGMENTSHADERATIPROC = CFUNCTYPE(None) # GL/glext.h:6101
PFNGLPASSTEXCOORDATIPROC = CFUNCTYPE(None, GLuint, GLuint, GLenum) # GL/glext.h:6102
PFNGLSAMPLEMAPATIPROC = CFUNCTYPE(None, GLuint, GLuint, GLenum) # GL/glext.h:6103
PFNGLCOLORFRAGMENTOP1ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6104
PFNGLCOLORFRAGMENTOP2ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6105
PFNGLCOLORFRAGMENTOP3ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6106
PFNGLALPHAFRAGMENTOP1ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6107
PFNGLALPHAFRAGMENTOP2ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6108
PFNGLALPHAFRAGMENTOP3ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6109
PFNGLSETFRAGMENTSHADERCONSTANTATIPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:6110
# ATI_pn_triangles (GL/glext.h:6113)
GL_ATI_pn_triangles = 1 # GL/glext.h:6114
# GL/glext.h:6116
glPNTrianglesiATI = _link_function('glPNTrianglesiATI', None, [GLenum, GLint], 'ATI_pn_triangles')
# GL/glext.h:6117
glPNTrianglesfATI = _link_function('glPNTrianglesfATI', None, [GLenum, GLfloat], 'ATI_pn_triangles')
PFNGLPNTRIANGLESIATIPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:6119
PFNGLPNTRIANGLESFATIPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:6120
# ATI_vertex_array_object (GL/glext.h:6123)
GL_ATI_vertex_array_object = 1 # GL/glext.h:6124
# GL/glext.h:6126
glNewObjectBufferATI = _link_function('glNewObjectBufferATI', GLuint, [GLsizei, POINTER(GLvoid), GLenum], 'ATI_vertex_array_object')
# GL/glext.h:6127
glIsObjectBufferATI = _link_function('glIsObjectBufferATI', GLboolean, [GLuint], 'ATI_vertex_array_object')
# GL/glext.h:6128
glUpdateObjectBufferATI = _link_function('glUpdateObjectBufferATI', None, [GLuint, GLuint, GLsizei, POINTER(GLvoid), GLenum], 'ATI_vertex_array_object')
# GL/glext.h:6129
glGetObjectBufferfvATI = _link_function('glGetObjectBufferfvATI', None, [GLuint, GLenum, POINTER(GLfloat)], 'ATI_vertex_array_object')
# GL/glext.h:6130
glGetObjectBufferivATI = _link_function('glGetObjectBufferivATI', None, [GLuint, GLenum, POINTER(GLint)], 'ATI_vertex_array_object')
# GL/glext.h:6131
glFreeObjectBufferATI = _link_function('glFreeObjectBufferATI', None, [GLuint], 'ATI_vertex_array_object')
# GL/glext.h:6132
glArrayObjectATI = _link_function('glArrayObjectATI', None, [GLenum, GLint, GLenum, GLsizei, GLuint, GLuint], 'ATI_vertex_array_object')
# GL/glext.h:6133
glGetArrayObjectfvATI = _link_function('glGetArrayObjectfvATI', None, [GLenum, GLenum, POINTER(GLfloat)], 'ATI_vertex_array_object')
# GL/glext.h:6134
glGetArrayObjectivATI = _link_function('glGetArrayObjectivATI', None, [GLenum, GLenum, POINTER(GLint)], 'ATI_vertex_array_object')
# GL/glext.h:6135
glVariantArrayObjectATI = _link_function('glVariantArrayObjectATI', None, [GLuint, GLenum, GLsizei, GLuint, GLuint], 'ATI_vertex_array_object')
# GL/glext.h:6136
glGetVariantArrayObjectfvATI = _link_function('glGetVariantArrayObjectfvATI', None, [GLuint, GLenum, POINTER(GLfloat)], 'ATI_vertex_array_object')
# GL/glext.h:6137
glGetVariantArrayObjectivATI = _link_function('glGetVariantArrayObjectivATI', None, [GLuint, GLenum, POINTER(GLint)], 'ATI_vertex_array_object')
PFNGLNEWOBJECTBUFFERATIPROC = CFUNCTYPE(GLuint, GLsizei, POINTER(GLvoid), GLenum) # GL/glext.h:6139
PFNGLISOBJECTBUFFERATIPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6140
PFNGLUPDATEOBJECTBUFFERATIPROC = CFUNCTYPE(None, GLuint, GLuint, GLsizei, POINTER(GLvoid), GLenum) # GL/glext.h:6141
PFNGLGETOBJECTBUFFERFVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:6142
PFNGLGETOBJECTBUFFERIVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:6143
PFNGLFREEOBJECTBUFFERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6144
PFNGLARRAYOBJECTATIPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLuint, GLuint) # GL/glext.h:6145
PFNGLGETARRAYOBJECTFVATIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:6146
PFNGLGETARRAYOBJECTIVATIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:6147
PFNGLVARIANTARRAYOBJECTATIPROC = CFUNCTYPE(None, GLuint, GLenum, GLsizei, GLuint, GLuint) # GL/glext.h:6148
PFNGLGETVARIANTARRAYOBJECTFVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:6149
PFNGLGETVARIANTARRAYOBJECTIVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:6150
# EXT_vertex_shader (GL/glext.h:6153)
GL_EXT_vertex_shader = 1 # GL/glext.h:6154
# GL/glext.h:6156
glBeginVertexShaderEXT = _link_function('glBeginVertexShaderEXT', None, [], 'EXT_vertex_shader')
# GL/glext.h:6157
glEndVertexShaderEXT = _link_function('glEndVertexShaderEXT', None, [], 'EXT_vertex_shader')
# GL/glext.h:6158
glBindVertexShaderEXT = _link_function('glBindVertexShaderEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:6159
glGenVertexShadersEXT = _link_function('glGenVertexShadersEXT', GLuint, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:6160
glDeleteVertexShaderEXT = _link_function('glDeleteVertexShaderEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:6161
glShaderOp1EXT = _link_function('glShaderOp1EXT', None, [GLenum, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:6162
glShaderOp2EXT = _link_function('glShaderOp2EXT', None, [GLenum, GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:6163
glShaderOp3EXT = _link_function('glShaderOp3EXT', None, [GLenum, GLuint, GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:6164
glSwizzleEXT = _link_function('glSwizzleEXT', None, [GLuint, GLuint, GLenum, GLenum, GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6165
glWriteMaskEXT = _link_function('glWriteMaskEXT', None, [GLuint, GLuint, GLenum, GLenum, GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6166
glInsertComponentEXT = _link_function('glInsertComponentEXT', None, [GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:6167
glExtractComponentEXT = _link_function('glExtractComponentEXT', None, [GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:6168
glGenSymbolsEXT = _link_function('glGenSymbolsEXT', GLuint, [GLenum, GLenum, GLenum, GLuint], 'EXT_vertex_shader')
# GL/glext.h:6169
glSetInvariantEXT = _link_function('glSetInvariantEXT', None, [GLuint, GLenum, POINTER(GLvoid)], 'EXT_vertex_shader')
# GL/glext.h:6170
glSetLocalConstantEXT = _link_function('glSetLocalConstantEXT', None, [GLuint, GLenum, POINTER(GLvoid)], 'EXT_vertex_shader')
# GL/glext.h:6171
glVariantbvEXT = _link_function('glVariantbvEXT', None, [GLuint, POINTER(GLbyte)], 'EXT_vertex_shader')
# GL/glext.h:6172
glVariantsvEXT = _link_function('glVariantsvEXT', None, [GLuint, POINTER(GLshort)], 'EXT_vertex_shader')
# GL/glext.h:6173
glVariantivEXT = _link_function('glVariantivEXT', None, [GLuint, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:6174
glVariantfvEXT = _link_function('glVariantfvEXT', None, [GLuint, POINTER(GLfloat)], 'EXT_vertex_shader')
# GL/glext.h:6175
glVariantdvEXT = _link_function('glVariantdvEXT', None, [GLuint, POINTER(GLdouble)], 'EXT_vertex_shader')
# GL/glext.h:6176
glVariantubvEXT = _link_function('glVariantubvEXT', None, [GLuint, POINTER(GLubyte)], 'EXT_vertex_shader')
# GL/glext.h:6177
glVariantusvEXT = _link_function('glVariantusvEXT', None, [GLuint, POINTER(GLushort)], 'EXT_vertex_shader')
# GL/glext.h:6178
glVariantuivEXT = _link_function('glVariantuivEXT', None, [GLuint, POINTER(GLuint)], 'EXT_vertex_shader')
# GL/glext.h:6179
glVariantPointerEXT = _link_function('glVariantPointerEXT', None, [GLuint, GLenum, GLuint, POINTER(GLvoid)], 'EXT_vertex_shader')
# GL/glext.h:6180
glEnableVariantClientStateEXT = _link_function('glEnableVariantClientStateEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:6181
glDisableVariantClientStateEXT = _link_function('glDisableVariantClientStateEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:6182
glBindLightParameterEXT = _link_function('glBindLightParameterEXT', GLuint, [GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6183
glBindMaterialParameterEXT = _link_function('glBindMaterialParameterEXT', GLuint, [GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6184
glBindTexGenParameterEXT = _link_function('glBindTexGenParameterEXT', GLuint, [GLenum, GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6185
glBindTextureUnitParameterEXT = _link_function('glBindTextureUnitParameterEXT', GLuint, [GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6186
glBindParameterEXT = _link_function('glBindParameterEXT', GLuint, [GLenum], 'EXT_vertex_shader')
# GL/glext.h:6187
glIsVariantEnabledEXT = _link_function('glIsVariantEnabledEXT', GLboolean, [GLuint, GLenum], 'EXT_vertex_shader')
# GL/glext.h:6188
glGetVariantBooleanvEXT = _link_function('glGetVariantBooleanvEXT', None, [GLuint, GLenum, POINTER(GLboolean)], 'EXT_vertex_shader')
# GL/glext.h:6189
glGetVariantIntegervEXT = _link_function('glGetVariantIntegervEXT', None, [GLuint, GLenum, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:6190
glGetVariantFloatvEXT = _link_function('glGetVariantFloatvEXT', None, [GLuint, GLenum, POINTER(GLfloat)], 'EXT_vertex_shader')
# GL/glext.h:6191
glGetVariantPointervEXT = _link_function('glGetVariantPointervEXT', None, [GLuint, GLenum, POINTER(POINTER(GLvoid))], 'EXT_vertex_shader')
# GL/glext.h:6192
glGetInvariantBooleanvEXT = _link_function('glGetInvariantBooleanvEXT', None, [GLuint, GLenum, POINTER(GLboolean)], 'EXT_vertex_shader')
# GL/glext.h:6193
glGetInvariantIntegervEXT = _link_function('glGetInvariantIntegervEXT', None, [GLuint, GLenum, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:6194
glGetInvariantFloatvEXT = _link_function('glGetInvariantFloatvEXT', None, [GLuint, GLenum, POINTER(GLfloat)], 'EXT_vertex_shader')
# GL/glext.h:6195
glGetLocalConstantBooleanvEXT = _link_function('glGetLocalConstantBooleanvEXT', None, [GLuint, GLenum, POINTER(GLboolean)], 'EXT_vertex_shader')
# GL/glext.h:6196
glGetLocalConstantIntegervEXT = _link_function('glGetLocalConstantIntegervEXT', None, [GLuint, GLenum, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:6197
glGetLocalConstantFloatvEXT = _link_function('glGetLocalConstantFloatvEXT', None, [GLuint, GLenum, POINTER(GLfloat)], 'EXT_vertex_shader')
PFNGLBEGINVERTEXSHADEREXTPROC = CFUNCTYPE(None) # GL/glext.h:6199
PFNGLENDVERTEXSHADEREXTPROC = CFUNCTYPE(None) # GL/glext.h:6200
PFNGLBINDVERTEXSHADEREXTPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6201
PFNGLGENVERTEXSHADERSEXTPROC = CFUNCTYPE(GLuint, GLuint) # GL/glext.h:6202
PFNGLDELETEVERTEXSHADEREXTPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6203
PFNGLSHADEROP1EXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint) # GL/glext.h:6204
PFNGLSHADEROP2EXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint) # GL/glext.h:6205
PFNGLSHADEROP3EXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:6206
PFNGLSWIZZLEEXTPROC = CFUNCTYPE(None, | |
<reponame>hasii2011/albow-python-3
"""
The resource module exports some utility functions for finding, loading and caching various types of
resources. By default, resource files are looked for in a directory named _Resources_ alongside the
.py file of the program's main module.
Resource names are specified in a platform-independent manner using a series of pathname components. Specific
resource types are looked for by default in subdirectories of the resources directory as follows:
| Types | | Location |
| ------- | ---- | ------------------: |
| Fonts | | *resources*/fonts |
| Sounds | | *resources*/sounds |
| Text | | *resources*/text |
| Cursors | | *resources*/cursors |
| Music | | *resources*/music |
The subdirectory can in some cases be overridden using the `prefix` parameter to the relevant resource-loading
function. Each type of resource has a cache. The first time a resource with a given name is requested, it is
loaded and placed in the cache. Subsequent requests for the same name will return the cached object.
"""
import os
import sys
import logging
import pygame
from pygame.locals import RLEACCEL
from pygame import Surface
from pygame import mixer
from albow.core.DummySound import DummySound
DEFAULT_SOUND_DIRECTORY = "sounds"
DEFAULT_IMAGES_DIRECTORY = "images"
DEFAULT_FONTS_DIRECTORY = "fonts"
DEFAULT_TEXT_DIRECTORY = "text"
DEFAULT_CURSORS_DIRECTORY = "cursors"
DEFAULT_RESOURCE_DIRECTORY_NAMES = ["Resources", "resources"]
optimize_images = True
"""
If `True`, images loaded with `get_image()` will have `convert_alpha()` called on them by default. Defaults to `True`.
"""
run_length_encode = False
class ResourceUtility:
"""
Static class housing shortcut methods to quickly access system resources like
- sounds
- cursors
- fonts
- images
- resource directories
.. Note::
Make unit tests for sound and cursor APIs since they are not currently demo'ed
"""
dummy_sound = DummySound()
sound_cache = {}
image_cache = {}
cursor_cache = {}
font_cache = {}
text_cache = {}
ourLogger = logging.getLogger(__name__)
@staticmethod
def find_resource_dir():
directory = sys.path[0]
while True:
for name in DEFAULT_RESOURCE_DIRECTORY_NAMES:
path = os.path.join(directory, name)
if os.path.exists(path):
return path
parent = os.path.dirname(directory)
if parent == directory:
raise SystemError("albow: Unable to find Resources directory")
directory = parent
@staticmethod
def resource_exists(*names, **kwds) -> bool:
"""
Returns true if a resource exists with the given pathname components.
Args:
*names:
**kwds:
Returns: `True` if it does, else `False`
"""
return os.path.exists(ResourceUtility._resource_path("", names, **kwds))
@staticmethod
def get_image(*names, **kwds) -> Surface:
"""
Loads the specified image from the images directory or returns it from the cache.
.. WARNING::
For some of the options to work correctly, you must have initialized the PyGame screen before calling get_image().
Args:
*names:
**kwds:
Returns:
"""
prefix = kwds.pop('prefix', DEFAULT_IMAGES_DIRECTORY)
path = ResourceUtility._resource_path(prefix, names)
return ResourceUtility._get_image(path, **kwds)
@staticmethod
def get_font(size, *names, **kwds):
"""
Loads the specified font or returns it from the cache.
Args:
size: This size font to load
*names:
**kwds:
Returns: A pygame font
"""
path = ResourceUtility._resource_path("%s" % DEFAULT_FONTS_DIRECTORY, names, **kwds)
key = (path, size)
font = ResourceUtility.font_cache.get(key)
if not font:
try:
font = pygame.font.Font(path, size)
#
# Python 3 update
#
# except IOError, e:
except IOError as e:
raise e.__class__("%s: %s" % (e, path))
ResourceUtility.font_cache[key] = font
return font
@staticmethod
def get_text(*names, **kwds):
"""
Loads the contents of a text file as a string or returns it from the cache. The file is opened in
universal newlines mode.
Args:
*names:
**kwds:
Returns:
"""
path = ResourceUtility._resource_path("%s" % DEFAULT_TEXT_DIRECTORY, names, **kwds)
text = ResourceUtility.text_cache.get(path)
if text is None:
text = open(path, "rU").read()
ResourceUtility.text_cache[path] = text
return text
@staticmethod
def resource_path(*names, **kwds) -> str:
"""
Constructs a resource pathname from the given pathname components.
Args:
*names:
**kwds:
Returns: The resource path
"""
return ResourceUtility._resource_path("", names, **kwds)
@staticmethod
def get_sound(*names, **kwds):
"""
Loads the specified sound or returns it from the cache.
If the sound is unable to be loaded for any reason, a warning message is printed and a dummy sound object
with no-op methods is returned. This allows an application to continue without sound in an environment
where sound support is not available.
Args:
*names: Sound file name
**kwds:
Returns:
"""
path = ResourceUtility._resource_path("%s" % DEFAULT_SOUND_DIRECTORY, names, **kwds)
return ResourceUtility.load_sound(path)
@staticmethod
def load_sound(path) -> "mixer.Sound":
"""
Loads a sound from the file specified by path, or returns it from the cache. Like `get_sound()`,
returns a dummy sound object if the sound cannot be loaded.
Args:
path: Fully qualified path
Returns: A pygame sound object
"""
if ResourceUtility.sound_cache is None:
return ResourceUtility.dummy_sound
retSound = ResourceUtility.sound_cache.get(path)
if not retSound:
try:
from pygame.mixer import Sound
#
# Python 3 update
#
# except ImportError, e:
except ImportError as e:
ResourceUtility.no_sound(e)
return ResourceUtility.dummy_sound
try:
retSound = Sound(path)
#
# Python 3 update
#
# except pygame.error, e:
except pygame.error as e:
ResourceUtility.missing_sound(e, path)
return ResourceUtility.dummy_sound
ResourceUtility.sound_cache[path] = retSound
return retSound
@staticmethod
def no_sound(e):
"""
Clear the sound cache as a side-effect
:param e: Exception to log
:return:
"""
ResourceUtility.ourLogger.error(f"albow.resource.get_sound: {e}")
ResourceUtility.ourLogger.error("albow.resource.get_sound: Sound not available, continuing without it")
ResourceUtility.sound_cache = None
@staticmethod
def missing_sound(e, name):
"""
Log an error message on a missing sound
:param e: The exception
:param name: This name of the missing sound
:return:
"""
ResourceUtility.ourLogger.error("albow.resource.get_sound: %s: %s", name, e)
@staticmethod
def get_cursor(*names, **kwds):
"""
Get a cursor out of the cache, Else load it and cache it
:param names:
:param kwds:
:return:
"""
path = ResourceUtility._resource_path("%s" % DEFAULT_CURSORS_DIRECTORY, names, **kwds)
cursor = ResourceUtility.cursor_cache.get(path)
if cursor is None:
cursor = ResourceUtility.load_cursor(path)
ResourceUtility.cursor_cache[path] = cursor
return cursor
@staticmethod
def load_cursor(path):
"""
Loads a cursor from an image file or returns it from the cache. The cursor is returned as a tuple of
arguments suitable for passing to the PyGame function `set_cursor()`.
.. IMPORTANT::
The image must be no larger than 16x16 pixels and should consist only of the colours black (0, 0, 0),
white (255, 255, 255), blue (0, 0, 255) and cyan (0, 255, 255). Blue and cyan are used to indicate the
position of the hotspot, with blue if the hotspot is over a black or transparent pixel, and cyan if it is
over a white pixel. The hotspot defaults to the top left corner. If the image has an alpha channel, it
should consist of fully opaque or fully transparent pixels.
Args:
path: A fully qualified path the the image file
Returns:
"""
image = ResourceUtility._get_image(path)
width, height = image.get_size()
hot = (0, 0)
data = []
mask = []
rowbytes = (width + 7) // 8
#
# Python 3 update
#
# xr = xrange(width)
# yr = xrange(height)
xr = range(width)
yr = range(height)
for y in yr:
bit = 0x80
db = mb = 0
for x in xr:
r, g, b, a = image.get_at((x, y))
if a >= 128:
mb |= bit
if r + g + b < 383:
db |= bit
if r == 0 and b == 255:
hot = (x, y)
bit >>= 1
if not bit:
data.append(db)
mask.append(mb)
db = mb = 0
bit = 0x80
# if bit <> 0x80:
if bit != 0x80:
data.append(db)
mask.append(mb)
return (8 * rowbytes, height), hot, data, mask
@staticmethod
def _resource_path(default_prefix, names, prefix="") -> str:
return os.path.join(ResourceUtility.find_resource_dir(), prefix or default_prefix, *names)
@staticmethod
def _get_image(path, border=0, optimize=optimize_images, noalpha=False, rle=run_length_encode) -> Surface:
"""
Loads the specified image from the images directory or returns it from the cache.
Args:
path:
border: If border is specified, a border of that number of pixels is stripped from around
the image (making it 2 * border pixels smaller in each direction).
optimize: If optimize is true, convert_alpha() is called on the image.
noalpha: If noalpha is true, any alpha channel is stripped from the image.
rle: If rle is true, the image is run-length encoded to improve blitting speed.
Returns: The specified image from the images directory or returns it from the cache
"""
image = ResourceUtility.image_cache.get(path)
if not image:
image = pygame.image.load(path)
if noalpha:
image = image.convert(24)
elif optimize:
image = image.convert_alpha()
if rle:
image.set_alpha(255, RLEACCEL)
if border:
w, h = image.get_size()
b = border
d = 2 * border
image = image.subsurface(b, b, w - d, h - d)
ResourceUtility.image_cache[path] = | |
: float, optional
exclude_atoms : tuple of ...
Returns
-------
int
"""
n_res = 0
resids = []
for contact in self.nearby_atoms:
if (contact.atom_name() in exclude_atoms):
continue
if (contact.distance() < distance):
labels = contact.atom.fetch_labels()
other_resname = contact.resname()
other_resid = labels.chain_id + labels.resid()
if ((ion_params.allowed_coordinating_residues is not None) and
(other_resname in ion_params.allowed_coordinating_residues) and
(not other_resid in resids)):
n_res += 1
resids.append(other_resid)
return n_res
def number_of_atoms_within_radius(self, distance_cutoff):
"""
Counts the number of coordinating atoms within a given radius.
Parameters
----------
float
Returns
-------
int
"""
n_atoms = 0
atom_ids = []
for contact in self.nearby_atoms:
other_id = contact.atom_id_no_altloc()
if (not other_id in atom_ids):
if (contact.distance() < distance_cutoff):
n_atoms += 1
atom_ids.append(other_id) # check for alt confs.
return n_atoms
def number_of_backbone_oxygens(self, distance_cutoff=3.0):
"""
Counts the number of backbone oxygens coordinating a site.
Parameters
----------
distance_cutoff : float, optional
Returns
-------
int
"""
n_bb_ox = 0
for contact in self.nearby_atoms :
if (contact.atom_name() == "O"):
if (contact.distance() <= distance_cutoff):
if (not contact.resname() in WATER_RES_NAMES):
n_bb_ox += 1
return n_bb_ox
# FIXME needs to be refactored and combined with check_fpp_ratio
def is_compatible_anomalous_scattering(self, ion_params):
# lighter elements should have effectively no anomalous scattering
if (ion_params.element.upper() in ["MG", "NA"]):
return ((self.fpp is None) and (self.peak_anom is not None) and
(self.peak_anom < 1.0))
else :
# XXX somewhat dangerous - we really need f'' for this to work reliably
if (self.fpp is None):
return (self.peak_anom is not None) and (self.peak_anom > 3.0)
identity = self.identity(ion = ion_params)
if (identity in self.fpp_ratios):
return (not self.BAD_FPP in self.inaccuracies[identity])
return False
# XXX obsolete, delete?
def atom_weight(self, manager):
"""
Evaluates whether factors indicate that the atom is lighter, heavier, or
isoelectric to what it is currently identified as.
Parameters
----------
manager : mmtbx.ions.identify.manager
Returns
-------
int
-1 if lighter, 0 if isoelectronic, and 1 if heavier.
"""
identity = "HOH" if self.resname in WATER_RES_NAMES else self.identity()
# Waters that don't have B-factors at least 1 stddev below the mean are
# presumed to be correct
if (identity == "HOH" and
(self.atom.b > manager.b_mean_hoh - manager.b_stddev_hoh)):
return 0
if self.is_correctly_identified(identity = identity):
return 0
# B-factors/occupancies?
if self.FOFC_PEAK in self.inaccuracies[identity] or self.atom.b < 1:
return 1
if self.FOFC_HOLE in self.inaccuracies[identity]:
return -1
return 0
def check_ion_environment(self,
ion_params,
wavelength = None,
require_valence = True):
"""
Checks whether or not the specified ion satisfies the metal-coordination
parameters, specified by ion_params, such as valence sum, geometry, etc.
The criteria used here are quite strict, but many of the analyses are
saved for later if we want to use looser critera.
Parameters
----------
ion_params : mmtbx.ions.metal_parameters
wavelength : float, optional
require_valence : bool, optional
"""
from iotbx.pdb import common_residue_names_get_class as get_class
identity = self.identity(ion_params)
inaccuracies = self.inaccuracies[identity] = set()
self.expected_params[identity] = ion_params
ignored = self.ignored[identity] = set()
# if the atom is clearly not a water, optionally relax some rules. this
# will be more sensitive for transition metals, without finding a lot of
# spurious Mg/Na sites.
strict_rules = require_valence or \
self.is_correctly_identified(identity = "HOH") or \
self.strict_valence or \
ion_params.element in ["NA","MG"]
# Check for all non-overlapping atoms within 3 A of the metal
n_closest = 0
coord_atoms = []
for i_pair, contact1 in enumerate(self.nearby_atoms):
distance = contact1.distance()
if (distance < 3.0):
for contact2 in self.nearby_atoms[(i_pair+1):] :
if ((contact1 == contact2) or
(contact1.distance_from(contact2) <= 0.3)):
break
else :
coord_atoms.append(contact1)
if (distance < 2.7):
n_closest += 1
if len(coord_atoms) < ion_params.coord_num_lower:
inaccuracies.add(self.TOO_FEW_COORD)
if n_closest > ion_params.coord_num_upper:
inaccuracies.add(self.TOO_MANY_COORD)
# Coordinating atoms closer than 3.0 A are not positively charged
n_non_water = 0
self.bad_coords[identity] = []
for contact in self.nearby_atoms:
other_name = contact.atom_name()
other_resname = contact.resname()
other_element = contact.element
if (not other_resname in WATER_RES_NAMES):
n_non_water += 1
else:
# Everything can potentially be coordinated by water
continue
if (contact.distance() < 3.0):
# XXX: So, we have a a fair number of rules restricting nitrogens and
# nitrogen-containing residues from coordinating a number of cations.
#
# However, this rule is dependent on the protonation of the nitrogen,
# if the pKa is low at the site, it is possible for a metal to
# coordinate the residue fine.
#
# We want a complex rule that takes into account coordinating geometry,
# density signal, and the presence of other coordinating atoms that
# might drop the site's pKa enough to lose the hydrogen.
if ((ion_params.allowed_coordinating_atoms is not None) and
(other_element not in ion_params.allowed_coordinating_atoms)):
self.bad_coords[identity].append(contact)
inaccuracies.add(self.BAD_COORD_ATOM)
if (get_class(other_resname) == "common_amino_acid"):
# limit elements allowed to bind to backbone atoms (mainly carbonyl
# oxygen)
if ((other_name in ["C","N","O","CA","H","HA"]) and
((ion_params.allowed_backbone_atoms is None) or
(not other_name in ion_params.allowed_backbone_atoms))):
if (other_name == "O") and (contact.is_carboxy_terminus):
pass # C-terminal carboxyl group is allowed
else :
self.bad_coords[identity].append(contact)
inaccuracies.add(self.BAD_COORD_ATOM)
# Check if atom is of an allowed residue type, if part of a sidechain
if (ion_params.allowed_coordinating_residues is not None):
allowed = ion_params.allowed_coordinating_residues
if ((not other_resname in allowed) and
(other_name not in ["C", "O", "N", "CA", "OXT"])):
# XXX probably just O
self.bad_coords[identity].append(contact)
inaccuracies.add(self.BAD_COORD_RESIDUE)
elif (cmp(0, mmtbx.ions.server.get_charge(contact.atom)) ==
cmp(0, ion_params.charge)):
# Check if coordinating atom is of opposite charge
self.bad_coords[identity].append(contact)
inaccuracies.add(self.LIKE_COORD)
elif (ion_params.charge > 0 and
other_element in ["N"] and
other_resname in ["LYS", "ARG", "ASN", "GLN"]):
# Coordinating nitrogen most likely positive.
#
# Ignore nitrogens without a charge label that are on positively
# charged amino acids.
self.bad_coords[identity].append(contact)
inaccuracies.add(self.LIKE_COORD)
# Check the number of coordinating waters
if (n_non_water < ion_params.min_coordinating_non_waters):
inaccuracies.add(self.TOO_FEW_NON_WATERS)
# Check the geometry of the coordinating atoms
if ion_params.allowed_geometries and strict_rules:
allowed = [i[0] in ion_params.allowed_geometries
for i in self.geometries]
if "any" in ion_params.allowed_geometries:
pass
elif not self.geometries:
if strict_rules:
inaccuracies.add(self.NO_GEOMETRY)
elif not any(allowed):
inaccuracies.add(self.BAD_GEOMETRY)
else:
strict_rules = False
# If no distinct geometry, check that none of the coordinating have distinct
# geometry, either
if self.geometries == []:
for contact in self.nearby_atoms:
o_atom = contact.atom
if o_atom.i_seq in self.manager.atoms_to_props:
o_geometry = self.manager.atoms_to_props[o_atom.i_seq].geometries
if o_geometry != []:
inaccuracies.add(self.COORDING_GEOMETRY)
# Check for reasonable vector/valence values
vectors = mmtbx.ions.server.calculate_valences(ion_params,
self.nearby_atoms)
self.vectors[identity] = vectors
self.valence_sum[identity] = sum([abs(i) for i in vectors])
self.vector_sum[identity] = abs(sum(vectors, col((0, 0, 0))))
if self.vector_sum[identity] > ion_params.vec_sum_cutoff:
if (strict_rules):
inaccuracies.add(self.BAD_VECTORS)
else :
ignored.add(self.BAD_VECTORS)
# XXX I am not sure how low a valence sum we want to allow, but many
# structures with non-physiological cation binding have partial and/or
# irregular coordination shells
if (self.valence_sum[identity] < ion_params.cvbs_expected * 0.25 or
self.valence_sum[identity] > ion_params.cvbs_expected * 1.25):
inaccuracies.add(self.VERY_BAD_VALENCES)
else:
if (self.valence_sum[identity] < ion_params.cvbs_lower or
self.valence_sum[identity] > ion_params.cvbs_upper):
if strict_rules:
inaccuracies.add(self.BAD_VALENCES)
else :
ignored.add(self.BAD_VALENCES)
self.score[identity] = abs(self.valence_sum[identity] -
ion_params.cvbs_expected)
# FIXME this really needs to be refactored and combined with the method
# is_compatible_anomalous_scattering
def check_fpp_ratio(self,
ion_params,
wavelength,
fpp_ratio_min = 0.3,
fpp_ratio_max = 1.05):
"""
Compare the refined and theoretical f'' values if available.
Parameters
----------
ion_params : mmtbx.ions.metal_parameters
wavelength : float
fpp_ratio_min : float, optional
fpp_ratio_max : float, optional
Returns
-------
float
f'' / f''_expected
"""
identity = str(ion_params)
inaccuracies = self.inaccuracies.get(identity, None)
if (inaccuracies is None):
inaccuracies = self.inaccuracies[identity] = set()
if (ion_params.element.upper() in ["MG", "NA"]):
if (self.fpp is not None) or (self.peak_anom > 1):
inaccuracies.add(self.BAD_FPP)
else :
# XXX in theory the fpp_ratio should be no more than 1.0 unless we are
# right on the peak wavelength. in practice Phaser can overshoot a little
# bit, so we need to be more tolerant. picking the maximum f'' from the
# Sasaki and Henke tables will also limit the ratio.
if (wavelength is not None) and (self.anomalous_flag):
fpp_expected_sasaki = sasaki.table(ion_params.element).at_angstrom(
wavelength).fdp()
fpp_expected_henke = henke.table(ion_params.element).at_angstrom(
wavelength).fdp()
self.fpp_expected[identity] = max(fpp_expected_sasaki,
fpp_expected_henke)
if (self.fpp is not None) and (self.fpp_expected[identity] != 0):
self.fpp_ratios[identity] = self.fpp / self.fpp_expected[identity]
if ((self.fpp_ratios[identity] > fpp_ratio_max) or
((self.fpp >= 0.2) and
(self.fpp_ratios[identity] < fpp_ratio_min))):
inaccuracies.add(self.BAD_FPP)
elif (self.fpp_expected[identity] > 0.75) and (self.peak_anom < 2):
inaccuracies.add(self.BAD_FPP)
return self.fpp_ratios.get(identity)
def show_properties(self, identity, out = sys.stdout):
"""
Show atomic properties that are independent of the suspected identity.
Parameters
----------
identity : mmtbx.ions.metal_parameters
out : | |
# clip data to time window NOW
# dt = 1000.0 * acqr.sample_interval
dt_seconds = acqr.sample_interval
min_index = int(self.min_time / dt_seconds)
if self.max_time > 0.0:
max_index = int(self.max_time / dt_seconds)
else:
max_index = data.shape[1]
data = data[:, min_index:max_index]
time_base = acqr.time_base[min_index:max_index]
time_base = time_base - self.min_time
if not datanameposted and not check:
self.P.figure_handle.suptitle(f"{mouse:s}\n{str(mousedata):s}\n{self.cell_summary['genotype']:s}",
fontsize=8,
weight="normal",
)
datanameposted = True
# data = data * 1e12 # convert to pA
maxt = np.max(time_base)
tracelist, nused = self.analyze_protocol_traces(
mode=mode, data=data, time_base=time_base,
maxt=maxt, dt_seconds=dt_seconds,
mousedata=mousedata, ntr=ntr
)
ntr = ntr + len(tracelist) # - len(exclude_traces)
if nused == 0:
continue
if check:
return None # no processing
# summarize the event and amplitude distributions
# For the amplitude, the data are fit against normal,
# skewed normal and gamma distributions.
self.plot_hists()
# show to sceen or save plots to a file
if pdf is None:
mpl.show()
else:
pdf.savefig(dpi=300) # rasterized to 300 dpi is ok for documentation.
mpl.close()
self.plot_individual_events(
fit_err_limit=50.0,
title=f"{str(mousedata):s} {self.cell_summary['mouse']:s} {self.cell_summary['genotype']:s}",
pdf=pdf,
)
def analyze_protocol_traces(
self, mode:str='cb', data:Union[object, None]=None,
time_base:Union[np.ndarray, None]=None,
maxt:float=0., dt_seconds:Union[float, None] = None,
mousedata:Union[dict, None] = None,
ntr:int=0
):
"""
perform mini analyzis on all the traces in one protocol
mode: str
which event detector to use ()
"""
assert data is not None
assert time_base is not None
assert dt_seconds is not None
order = int(1e-3 / dt_seconds)
ntraces = data.shape[0]
tracelist = list(range(ntraces))
self.ax0.set_xlim(-1.0, np.max(time_base))
nused = 0
# for all of the traces collected in this protocol that
# are accepted (not excluded)
tasks = []
for i in tracelist:
tasks.append(i)
print("*** Mode: ", mode)
if mode == "aj":
aj = minis.AndradeJonas()
aj.setup(
ntraces=ntraces,
tau1=mousedata["rt"],
tau2=mousedata["decay"],
template_tmax=maxt,
dt_seconds=dt_seconds,
delay=0.0,
sign=self.sign,
risepower=1.0,
min_event_amplitude=self.min_event_amplitude,
threshold=float(mousedata["thr"]),
)
elif mode == "cb":
cb = minis.ClementsBekkers()
cb.setup(
ntraces=ntraces,
tau1=mousedata["rt"],
tau2=mousedata["decay"],
template_tmax=3.0 * mousedata["decay"],
dt_seconds=dt_seconds,
delay=0.0,
sign=self.sign,
risepower=1.0,
min_event_amplitude=self.min_event_amplitude,
threshold=float(mousedata["thr"]),
)
cb.set_cb_engine("cython")
else:
raise ValueError("Mode must be aj or cb for event detection")
for i in tracelist:
yp = (ntr + i) * self.yspan
ypq = (ntr * i) * self.ypqspan
linefit = np.polyfit(time_base, data[i], 1)
refline = np.polyval(linefit, time_base)
holding = self.measure_baseline(data[i])
data[i] = data[i] - refline # linear correction
self.ax0.text(
-1.2, yp, "%03d %d" % (self.dprot, i), fontsize=8
) # label the trace
# if i in exclude_traces: # plot the excluded traces, but do not analyze them
# print(" **** Trace {0:d} excluded in list".format(i))
# # self.ax0.plot(self.acqr.time_base*1000., odata + yp ,'y-',
# # linewidth=0.25, alpha=0.25, rasterized=self.rasterize)
# continue
if holding < self.dataplan_data["holding"]:
# self.ax0.plot(self.acqr.time_base*1000., odata + yp ,'m-',
# linewidth=0.25, alpha=0.25, rasterized=self.rasterize)
print(
" >>>> Trace {0:d} excluded for holding {1:.3f}".format(
i, holding
)
)
nused = nused + 1
if self.filter: # apply notch filter to traces
dfilt = DF.NotchFilter(
data[i],
[
60.0,
120.0,
180.0,
240.0,
300.0,
360.0,
420.0,
480.0,
660.0,
780.0,
1020.0,
1140.0,
1380.0,
1500.0,
4000.0,
],
Q=20.0,
samplefreq=1.0 / dt_seconds,
)
if i == 0:
print('Notch filtering applied')
data[i] = dfilt
if mousedata['lpf'] is not None: # bessell is not recommended...
dfilt = DF.SignalFilter_LPFButter(data[i], mousedata['lpf'], 1./dt_seconds, NPole=8)
data[i] = dfilt
if i == 0:
print('Bessel LPF iltering applied at ', mousedata['lpf'])
else:
CP("r", "NO Low Pass Filtering applied")
if mode == "aj":
aj.deconvolve(
data[i], itrace=i, llambda=10.0
) # , order=order) # threshold=float(mousedata['thr']),
else:
cb.cbTemplateMatch(
data[i], itrace=i, # order=order, # threshold=float(mousedata["thr"]),
)
if mode == "aj":
aj.identify_events(order=order)
aj.summarize(np.array(data))
# print(aj.Summary)
method = aj
elif mode == "cb":
cb.identify_events(outlier_scale=3.0, order=101)
cb.summarize(np.array(data))
method = cb
# print('aj onsets2: ', method.onsets)
self.cell_summary['averaged'].extend([{'tb': method.Summary.average.avgeventtb,
'avg': method.Summary.average.avgevent,
'fit': {'amplitude': method.Amplitude,
'tau1': method.fitted_tau1,
'tau2': method.fitted_tau2,
'risepower': method.risepower},
'best_fit': method.avg_best_fit,
'risetenninety': method.Summary.average.risetenninety,
'decaythirtyseven': method.Summary.average.decaythirtyseven,
'Qtotal': method.Summary.Qtotal}])
for i in tracelist:
intervals = np.diff(method.timebase[method.onsets[i]])
self.cell_summary['intervals'].extend(intervals)
self.cell_summary['amplitudes'].extend(method.sign*data[i][method.Summary.smpkindex[i]]) # smoothed peak amplitudes
self.cell_summary['protocols'].append((self.nprot, i))
self.cell_summary['eventcounts'].append(len(intervals))
self.cell_summary['holding'].append(holding)
self.cell_summary['sign'].append(method.sign)
self.cell_summary['threshold'].append(mousedata['thr'])
# method.fit_individual_events() # fit_err_limit=2000., tau2_range=2.5) # on the data just analyzed
# self.cell_summary['indiv_amp'].append(method.ev_amp)
# self.cell_summary['indiv_fitamp'].append(method.ev_fitamp)
# self.cell_summary['indiv_tau1'].append(method.ev_tau1)
# self.cell_summary['indiv_tau2'].append(method.ev_tau2)
# self.cell_summary['indiv_fiterr'].append(method.fiterr)
# self.cell_summary['fitted_events'].append(method.fitted_events)
# self.cell_summary['indiv_Qtotal'].append(method.ev_Qtotal)
# self.cell_summary['indiv_evok'].append(method.events_ok)
# self.cell_summary['indiv_notok'].append(method.events_notok)
# self.cell_summary['allevents'].append(np.array(method.allevents))
# self.cell_summary['best_fit'].append(np.array(method.best_fit))
# self.cell_summary['best_decay_fit'].append(np.array(method.best_decay_fit))
#
#
# for jev in range(len(method.allevents)):
# self.cell_summary['allevents'].append(method.Summary.allevents[jev])
# self.cell_summary['best_fit'].append(method.best_fit[jev])
# self.cell_summary['indiv_tb'].append(aj.avgeventtb)
scf = 1e12
for i, a in enumerate(data):
method.reset_filtering()
method.prepare_data(data[i])
yp = (ntr + i) * self.yspan
ypq = (ntr * i) * self.ypqspan
linefit = np.polyfit(time_base, data[i], 1)
refline = np.polyval(linefit, time_base)
jtr = method.Summary.event_trace_list[
i
] # get trace and event number in trace
if len(jtr) == 0:
continue
peaks = method.Summary.smpkindex[i]
onsets = method.Summary.onsets[i]
# print('pk, on, dt: ', pk, on, method.dt_seconds)
onset_times = np.array(onsets) * method.dt_seconds
peak_times = np.array(peaks) * method.dt_seconds
# self.ax0.plot(method.timebase, data[i] + yp ,'c-',
# linewidth=0.25, alpha=0.25, rasterized=self.rasterize)
self.ax0.plot(
method.timebase, scf*method.data + yp, "k-", linewidth=0.25, rasterized=self.rasterize
)
# self.ax0.plot(method.timebase[pkindex], data[i][pkindex] + yp,
# 'ro', markersize=1.75, rasterized=self.rasterize)
# self.ax0.plot(aj.timebase[aj.smpkindex], data[i][aj.smpkindex] + yp,
# 'ro', markersize=1.75, rasterized=self.rasterize)
self.ax0.plot(
peak_times,
scf*method.data[peaks] + yp, # method.Summary.smoothed_peaks[jtr[0]][jtr[1]] + yp,
"ro",
markersize=1.,
rasterized=self.rasterize,
alpha=0.5,
)
# self.ax0.plot(
# onset_times,
# scf*data[i][onsets] + yp,
# "y^",
# markersize=1.5,
# rasterized=self.rasterize,
# )
if "A1" in self.P.axdict.keys():
self.axdec.plot(
aj.timebase[: aj.Crit.shape[0]], aj.Crit, label="Deconvolution"
)
self.axdec.plot(
[aj.timebase[0], aj.timebase[-1]],
[aj.sdthr, aj.sdthr],
"r--",
linewidth=0.75,
label="Threshold ({0:4.2f}) SD".format(aj.sdthr),
)
self.axdec.plot(
aj.timebase[aj.onsets] - aj.idelay,
ypq + aj.Crit[aj.onsets],
"y^",
label="Deconv. Peaks",
)
# axdec.plot(aj.timebase, aj.Crit+ypq, 'k', linewidth=0.5, rasterized=self.rasterize)
# print("--- finished run %d/%d ---" % (i + 1, tot_runs))
return tracelist, nused
def plot_individual_events(
self, fit_err_limit=1000.0, tau2_range=2.5, title="", pdf=None
):
P = PH.regular_grid(
3,
3,
order="columnsfirst",
figsize=(8.0, 8.0),
showgrid=False,
verticalspacing=0.1,
horizontalspacing=0.12,
margins={
"leftmargin": 0.12,
"rightmargin": 0.12,
"topmargin": 0.03,
"bottommargin": 0.1,
},
labelposition=(-0.12, 0.95),
)
P.figure_handle.suptitle(title)
all_evok = self.cell_summary[
"indiv_evok"
] # this is the list of ok events - a 2d list by
all_notok = self.cell_summary["indiv_notok"]
# print('all evok: ', all_evok)
# print('len allevok: ', len(all_evok))
#
# # print('all_notok: ', all_notok)
# # print('indiv tau1: ', self.cell_summary['indiv_tau1'])
# exit(1)
trdat = []
trfit = []
trdecfit = []
for itr in range(len(all_evok)): # for each trace
for evok in all_evok[itr]: # for each ok event in that trace
P.axdict["A"].plot(
self.cell_summary["indiv_tau1"][itr][evok],
self.cell_summary["indiv_amp"][itr][evok],
"ko",
markersize=3,
)
P.axdict["B"].plot(
self.cell_summary["indiv_tau2"][itr][evok],
self.cell_summary["indiv_amp"][itr][evok],
"ko",
markersize=3,
)
P.axdict["C"].plot(
self.cell_summary["indiv_tau1"][itr][evok],
self.cell_summary["indiv_tau2"][itr][evok],
"ko",
markersize=3,
)
P.axdict["D"].plot(
self.cell_summary["indiv_amp"][itr][evok],
self.cell_summary["indiv_fiterr"][itr][evok],
"ko",
markersize=3,
)
P.axdict["H"].plot(
self.cell_summary["indiv_tau1"][itr][evok],
self.cell_summary["indiv_Qtotal"][itr][evok],
"ko",
markersize=3,
)
trdat.append(
np.column_stack(
[
self.cell_summary["indiv_tb"][itr],
self.cell_summary["allevents"][itr][evok],
]
)
)
# idl = len(self.cell_summary['best_decay_fit'][itr][evok])
trfit.append(
np.column_stack(
[
self.cell_summary["indiv_tb"][itr],
-self.cell_summary["best_fit"][itr][evok],
]
)
)
trdecfit.append(
np.column_stack(
[
self.cell_summary["indiv_tb"][itr],
-self.cell_summary["best_decay_fit"][itr][evok],
]
)
)
dat_coll = collections.LineCollection(trdat, colors="k", linewidths=0.5)
fit_coll = collections.LineCollection(trfit, colors="r", linewidths=0.25)
# decay_fit_coll = collections.LineCollection(trdecfit, colors='c', linewidths=0.3)
P.axdict["G"].add_collection(dat_coll)
P.axdict["G"].add_collection(fit_coll)
# P.axdict['G'].add_collection(decay_fit_coll)
n_trdat = []
n_trfit = []
for itr in range(len(all_notok)):
for notok in all_notok[itr]:
n_trdat.append(
np.column_stack(
[
self.cell_summary["indiv_tb"][itr],
self.cell_summary["allevents"][itr][notok],
]
)
)
n_trfit.append(
np.column_stack(
[
self.cell_summary["indiv_tb"][itr],
-self.cell_summary["best_fit"][itr][notok],
]
)
)
P.axdict["D"].plot(
self.cell_summary["indiv_amp"][itr][notok],
self.cell_summary["indiv_fiterr"][itr][notok],
"ro",
markersize=3,
)
n_dat_coll = collections.LineCollection(n_trdat, colors="b", linewidths=0.35)
n_fit_coll = collections.LineCollection(n_trfit, colors="y", linewidths=0.25)
P.axdict["E"].add_collection(n_dat_coll)
P.axdict["E"].add_collection(n_fit_coll)
P.axdict["A"].set_xlabel(r"$tau_1$ (ms)")
P.axdict["A"].set_ylabel(r"Amp (pA)")
P.axdict["B"].set_xlabel(r"$tau_2$ (ms)")
P.axdict["B"].set_ylabel(r"Amp (pA)")
P.axdict["C"].set_xlabel(r"$\tau_1$ (ms)")
P.axdict["C"].set_ylabel(r"$\tau_2$ (ms)")
P.axdict["D"].set_xlabel(r"Amp (pA)")
P.axdict["D"].set_ylabel(r"Fit Error (cost)")
P.axdict["H"].set_xlabel(r"$\tau_1$ (ms)")
P.axdict["H"].set_ylabel(r"Qtotal")
P.axdict["G"].set_ylim((-100.0, 20.0))
P.axdict["G"].set_xlim((-2.0, 25.0))
P.axdict["E"].set_ylim((-100.0, 20.0))
P.axdict["E"].set_xlim((-2.0, 25.0))
# put in averaged event too
# self.cell_summary['averaged'].extend([{'tb': aj.avgeventtb,
# 'avg': aj.avgevent, 'fit': {'amplitude': aj.Amplitude,
# 'tau1': aj.tau1, 'tau2': aj.tau2, 'risepower': aj.risepower}, 'best_fit': aj.avg_best_fit,
# 'risetenninety': aj.risetenninety, 'decaythirtyseven': aj.decaythirtyseven}])
aev = self.cell_summary["averaged"]
for i in range(len(aev)):
P.axdict["F"].plot(aev[i]["tb"], aev[i]["avg"], "k-", linewidth=0.8)
P.axdict["F"].plot(aev[i]["tb"], aev[i]["best_fit"], "r--", linewidth=0.4)
if pdf is None:
mpl.show()
else:
pdf.savefig(dpi=300)
mpl.close()
def plot_all_events_and_fits(self):
P3 = PH.regular_grid(
1,
5,
order="columns",
figsize=(12, 8.0),
showgrid=False,
verticalspacing=0.1,
horizontalspacing=0.02,
margins={
"leftmargin": 0.07,
"rightmargin": 0.05,
"topmargin": 0.03,
"bottommargin": 0.05,
},
labelposition=(-0.12, 0.95),
)
idx = [a for a in P3.axdict.keys()]
offset2 = 0.0
k = 0
all_evok = self.cell_summary[
"indiv_evok"
] # this is the list of ok events - a 2d list by
for itr in range(len(all_evok)): # for each trace
for evok in all_evok[itr]: # for each ok event in that trace
P3.axdict[idx[k]].plot(
[
self.cell_summary["indiv_tb"][itr][0],
self.cell_summary["indiv_tb"][itr][-1],
],
np.zeros(2) + offset2,
"b--",
linewidth=0.3,
)
P3.axdict[idx[k]].plot(
self.cell_summary["indiv_tb"][itr],
self.cell_summary["allevents"][itr][evok] + offset2,
"k--",
linewidth=0.5,
)
P3.axdict[idx[k]].plot(
self.cell_summary["indiv_tb"][itr],
-self.cell_summary["best_fit"][itr][evok] + offset2,
"r--",
linewidth=0.5,
)
if k | |
(dict{str: str}): Dictionary of pairs to find and
replace. ex: {'find': 'replace'}.
skip (list(str), optional): List of values to ignore when
replacing. Defaults to None.
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
Returns:
self: Xlsx object.
"""
if not skip:
skip = []
for row, cell in enumerate(self.ws[col.upper()], 1):
if row >= startrow:
if cell.value and str(cell.value).lower() not in skip:
for find, replace in fndrplc.items():
if find in str(cell.value):
self.ws[
f'{col.upper()}{row}'
] = str(cell.value).replace(
find, replace)
return self
def move_values(self, scol: str, tcol: str,
vals: list, startrow: int = 1) -> object:
"""Search source column for passed list of values and
move them to target column.
Args:
scol (str): Source column letter to search for values.
ex: 'A'
tcol (str): Target column letter to move located values to.
ex: 'B'
vals (list(str)): List of str values to move.
ex: ('name', '20')
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
Returns:
self: Xlsx object.
"""
for row, cell in enumerate(self.ws[scol.upper()], 1):
if cell.value and row >= startrow:
for item in vals:
if item in str(cell.value):
self.ws[f'{tcol.upper()}{row}'] = item
self.ws[
f'{scol.upper()}{row}'] = cell.value.replace(
item, '')
break
return self
def reverse_text(self, datacol: str = "A",
startrow: int = 1, separator: str = ",") -> object:
"""Get values from specified column, split them on specified separator,
reverse the value's order and write them back to the cell minus the
separator. ex: Last, First -> First Last
Args:
datacol (str, optional): Excel column with values.
Defaults to "A".
startrow (int, optional): Excel row where values begin.
Defaults to 1.
separator (str, optional): Text separator to split on.
Defaults to ",".
Returns:
self: Xlsx object
"""
for row, cell in enumerate(self.ws[datacol.upper()], 1):
if row < startrow or not cell.value or separator not in cell.value:
continue
# Swap info and write back to cell
split_value = str(cell.value).split(separator)
self.ws[
f"{datacol.upper()}{row}"
] = f"{split_value[1].strip()} {split_value[0].strip()}"
return self
def remove_non_numbers(self,
datacol: str, startrow: int = 1,
stoprow: int = None, skip: list = []) -> object:
"""Get values from a specified column that should contain only
numbers. Remove any characters that are non-numbers and write
the new values back to the cells (as a string value). If a list is
passed to skip, check this list first before processing and skip
the cell if it matches an entry in the list.
Args:
datacol (str): Excel column with values to clean.
startrow (int): Excel row where values begin.
stoprow (int, optional): Excel row to stop cleaning values.
Defaults to None.
skip (list, optional): List of string values to skip if
found in the specified cells. Defaults to an empty list.
Returns:
self: Xlsx object
"""
for row, cell in enumerate(self.ws[f"{datacol.upper()}"], 1):
if stoprow and row == stoprow:
break
if row < startrow or not cell.value:
continue
if str(cell.value).lower() in skip:
continue
# Read and clean the data leaving only numbers
new_value = cell.value
for char in str(cell.value):
if char.isnumeric():
continue
new_value = str(cell.value).replace(char, "")
# Replace cell value with new version
self.ws[f"{datacol.upper()}{row}"] = new_value
return self
def get_matching_value(self, srchcol: str, srchval: str,
retcol: str, startrow: int = 1) -> str:
"""Search column for a value and return the corresponding value
from another column in the same row.
Args:
srchcol (str): Column letter to search for a value. ex: 'A'
srchval (str): Value to search column for. ex: 'Total'
retcol (str): Column letter containing the corresponding
value to be returned. ex: 'B'
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
Returns:
str: Value from corresponding cell in the same row as search
value. Returns False if value search value is not found.
"""
for row, cell in enumerate(self.ws[srchcol.upper()], 1):
if row >= startrow and cell.value:
if srchval in str(cell.value):
return self.ws[f'{retcol.upper()}{row}'].value
return False
def search_matching_value(self, header_srch_value: str,
row_srch_value: str) -> str:
"""Searches cells by row for header search value and row search
value, and returns corresponding cell value matching both as
a string.
Args:
header_srch_value (str): Header name to search for.
row_srch_value (str): Row name to search for.
Returns:
str: Matching (intersecting) value corresponding to the
searched header and row value. Returns False if not found.
"""
search_column, search_row = 0, 0
for row in self.ws.iter_rows():
for cell_number, cell_data in enumerate(row, 1):
if cell_data.value == header_srch_value:
search_column += cell_number
if cell_data.value == row_srch_value:
search_row = True
if search_row:
if cell_number == search_column:
return str(cell_data.value)
# In case search isn't located.
return False
def verify_length(self, col: str, length: int, fillcolor: str,
skip: list = None, startrow: int = 1,
stoprow: int = None) -> object:
"""Cycle through values in a column to verify their length marking
cells of an incorrect length with a background fill color.
Args:
col (str): Column to search for values. ex: 'B'
length (int): Total character length for correct values.
fillcolor (str): Background fill color selection from COLORS
dict.
skip (list(str), optional): List of string values to skip
when evaluating. Defaults to None.
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
stoprow (int, optional): Ending row number where values end.
Defaults to None.
Returns:
self: Xlsx object.
"""
if not stoprow:
stoprow = self.ws.max_row
if not skip:
skip = []
if COLORS.get(fillcolor.lower()):
for row, cell in enumerate(self.ws[col.upper()], 1):
if startrow <= row <= stoprow:
if cell.value and str(cell.value).lower() not in skip:
if len(str(cell.value)) != length:
self.ws[
f'{col.upper()}{row}'].fill = COLORS.get(
fillcolor.lower())
else:
print(f" Color '{fillcolor}' not available.")
return self
def find_and_highlight_rows(self, col: str, srch: str,
fillcolor: str = 'red',
startrow: int = 1) -> object:
"""Search row for specified str value and fill entire row
with specified background fill color when found.
Args:
col (str): Column to search for value. ex: 'B'
srch (str): Str value to search cells for.
fillcolor (str): Background fill color selection from COLORS
dict.
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
Returns:
self: Xlsx object.
"""
if COLORS.get(fillcolor.lower()):
for row, cell in enumerate(self.ws[col.upper()], 1):
if row >= startrow:
if cell.value and srch.lower() in str(
cell.value).lower():
for each in self.ws[f'{row}:{row}']:
each.fill = COLORS.get(fillcolor.lower())
else:
print(f" Color '{fillcolor}' not available.")
return self
def number_type_fix(self, col: str,
numtype: str, startrow: int = 1) -> object:
"""Quick fix for cells that contain numbers formatted as
text/str data. Cycle through cells replacing str formatted
values with int/float values.
Args:
col (str): Column containing data to convert
numtype (str): 'i' or 'f' indicating which type of number
values the column contains (int/float)
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
Returns:
self: Xlsx object.
"""
for row, cell in enumerate(self.ws[col.upper()], 1):
if cell.value and row >= startrow:
if numtype.lower() == 'i':
self.ws[f'{col.upper()}{row}'] = int(cell.value)
if numtype.lower() == 'f':
self.ws[f'{col.upper()}{row}'] = float(cell.value)
return self
def format_date(self, col: str, startrow: int = 1) -> object:
"""Format str date value to (MM/DD/YYYY).
Args:
col (str): Column containing date values.
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
Returns:
self: Xlsx object.
"""
for row, cell in enumerate(self.ws[col.upper()], 1):
if row >= startrow and cell.value:
self.ws[f'{col.upper()}{row}'] = cell.value.strftime(
'%m/%d/%Y')
return self
def format_currency(self, col: str,
startrow: int = 1, stoprow: int = None) -> object:
"""Format str currency value to ($0,000.00).
Args:
col (str): Column containing currency values to be formatted.
startrow (int, optional): Starting row number where values
begin. Defaults to 1.
stoprow (int, optional): Ending row where values stop.
Defaults to None.
Returns:
self: Xlsx object.
"""
if not stoprow:
stoprow = self.ws.max_row
for row, cell in enumerate(self.ws[col.upper()], 1):
if startrow <= row <= stoprow and cell.value:
cell.number_format = '$#,###.00'
return self
def | |
# (c) 2012, <NAME> <<EMAIL>>
# (c) 2012-2014, <NAME> <<EMAIL>> and others
# (c) 2017, <NAME> <<EMAIL>>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import imp
import os
import os.path
import sys
import warnings
from collections import defaultdict
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
from ansible.parsing.utils.yaml import from_yaml
from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
from ansible.utils.plugin_docs import get_docstring
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of play basedirs, configured
paths, and the python path. The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
aliases = {} if aliases is None else aliases
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.subdir = subdir
# FIXME: remove alias dict in favor of alias by symlink?
self.aliases = aliases
if config and not isinstance(config, list):
config = [config]
elif not config:
config = []
self.config = config
if class_name not in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if class_name not in PATH_CACHE:
PATH_CACHE[class_name] = None
if class_name not in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._extra_dirs = []
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name=self.class_name,
base_class=self.base_class,
package=self.package,
config=self.config,
subdir=self.subdir,
aliases=self.aliases,
_extra_dirs=self._extra_dirs,
_searched_paths=self._searched_paths,
PATH_CACHE=PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
)
def format_paths(self, paths):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in paths:
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def print_paths(self):
return self.format_paths(self._get_paths(subdirs=False))
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir, followlinks=True):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root, x))
return results
def _get_package_paths(self, subdirs=True):
''' Gets the path of a Python package '''
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = os.path.dirname(m.__file__)
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
def _get_paths(self, subdirs=True):
''' Return a list of paths to search for plugins in '''
# FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
# In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
# which always calls it with subdirs=False. So there currently isn't a problem with this caching.
if self._paths is not None:
return self._paths
ret = self._extra_dirs[:]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
path = os.path.realpath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
if os.path.isdir(c) and c not in ret:
ret.append(c)
if path not in ret:
ret.append(path)
# look for any plugins installed in the package subtree
# Note package path always gets added last so that every other type of
# path is searched before it.
ret.extend(self._get_package_paths(subdirs=subdirs))
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
reordered_paths = []
win_dirs = []
for path in ret:
if path.endswith('windows'):
win_dirs.append(path)
else:
reordered_paths.append(path)
reordered_paths.extend(win_dirs)
# cache and return the result
self._paths = reordered_paths
return reordered_paths
def _load_config_defs(self, name, path):
''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
# plugins w/o class name don't support config
if self.class_name:
type_name = get_plugin_class(self.class_name)
# if type name != 'module_doc_fragment':
if type_name in C.CONFIGURABLE_PLUGINS:
dstring = get_docstring(path, fragment_loader, verbose=False, ignore_errors=True)[0]
if dstring and 'options' in dstring and isinstance(dstring['options'], dict):
C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._paths = None
display.debug('Added %s to loader search path' % (directory))
def _find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False):
''' Find a plugin named name '''
global _PLUGIN_FILTERS
if name in _PLUGIN_FILTERS[self.package]:
return None
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
if check_aliases:
name = self.aliases.get(name, name)
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[suffix]
try:
return pull_cache[name]
except KeyError:
# Cache miss. Now let's find the plugin
pass
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator. Currently, it
# looks like _get_paths() never forces a cache refresh so if we expect
# additional directories to be added later, it is buggy.
for path in (p for p in self._get_paths() if p not in self._searched_paths and os.path.isdir(p)):
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
# FIXME: I believe this is only correct for modules and module_utils.
# For all other plugins we want .pyc and .pyo should be valid
if full_path.endswith(('.pyc', '.pyo')):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = full_path
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = full_path
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = full_path
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = full_path
self._searched_paths.add(path)
try:
return pull_cache[name]
except KeyError:
# Didn't find the plugin in this directory. Load modules from the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
if not ignore_deprecated and not os.path.islink(pull_cache[alias_name]):
# FIXME: this is not always the case, some are just aliases
display.deprecated('%s is kept for backwards compatibility but usage is discouraged. '
'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
return pull_cache[alias_name]
return None
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False):
''' Find a plugin named name '''
# Import here to avoid circular import
from | |
(seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
pegoFa = 0
else:
seq1 = line.split()[0]
if z == 2:
add = seq1[-2:]
seq = seq1[:-2]
elif z == 1:
add = seq1[-1]
seq = seq1[:-1]
pegoFa = 1
complete = name + "&SIZE_" + size + "&M3_" + add + addx + "\n" + seq + "\n"
output1.write(complete)
def _5sRNAaddfa(readFile, output, z):
output1 = open(output, "w")
yeseq = r'A|a|T|t|C|c|G|g'
pegoFa = 0
for line in open(readFile):
line = line.split("\n")[0]
if line.startswith(">"):
if pegoFa == 0:
seqName = line+"&"
if (seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
elif pegoFa == 1:
complete = name + "&SIZE_" + size + "&M5_" + addx + add + "\n" + seq + "\n"
output1.write(complete)
seqName = line+"&"
if (seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
pegoFa = 0
else:
seq1 = line.split()[0]
add = seq1[:z]
seq = seq1[z:]
pegoFa = 1
complete = name + "&SIZE_" + size + "&M5_" + addx + add + "\n" + seq + "\n"
output1.write(complete)
def _appCutoff(input, name, cutoff, dir):
inputA = open(input)
cut = int(cutoff)
out = dir+"Cutoff/"+name+"-Cutoff"+cutoff+".txt"
output = open(out, "w")
for line in inputA:
if line.startswith("OriginalSeq"):
output.write(line)
else:
sum = 0
line1 = line.split("\t")[4:]
n = len(line1)
for i in line1:
sum += int(i)
if sum >= (cut):
output.write(line)
return out
def _RevComp(sequence):
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A','a':'t', 'c':'g', 'g':'c', 't':'a'}
return "".join([complement.get(nt, 'N') for nt in sequence[::-1]])
def _Rev(sequence):
return sequence[::-1]
def _Comp(sequence):
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A','a':'t', 'c':'g', 'g':'c', 't':'a'}
return "".join([complement.get(nt, 'N') for nt in sequence])
def _freqSAM(input1, n, R, output, nadd):
yesSeqNT = r'A|a|T|t|C|c|G|g'
out = open(output, 'w')
s = 5
nlib = 4
dic = dict()
n = int(n)
x = n+s
cab = ""
Seq = ""
mod = ""
startall = datetime.datetime.now()
for line in input1:
start = datetime.datetime.now()
libName = line
print "%s..."%(libName)
lib2 = input1[line]
nlib += 1
cab += "\t" + libName
samFile = open(lib2)
for line1 in samFile.readlines():
mod = "no"
if line1.startswith("@"):
next
else:
tmpName = line1.split("\t")[0]
tmpName = tmpName+"&"
if (tmpName.split("&")[1]).split("_")[0] == "SIZE":
rawname = (tmpName.split("&")[0]).split(":")[0]
Freq = int((tmpName.split("&")[0]).split(":")[1])
Size = (tmpName.split("&")[1]).split("_")[1]
Add = (tmpName.split("&")[2]).split("_")[1]
mod = Add
Add1 = (tmpName.split("&")[2]).split("_")[0]
else:
rawname = (tmpName.split("&")[0]).split(":")[0]
Freq = int((tmpName.split("&")[0]).split(":")[1])
Size = ""
Add = ""
Add1 = "no"
Test_Sense = line1.split("\t")[1]
if Test_Sense == "0":
Sense = ""
Seqmap = line1.split("\t")[9]
else:
Seqmap = _RevComp(line1.split("\t")[9])
Sense = "-"
if nadd == 3:
Seq = Seqmap+Add
mod += ","+str(len(Seq)) + ":M3"
if nadd == 5:
Seq = Add+Seqmap
mod += ",1" + ":M5"
if nadd == 0:
Seq = Seqmap
mod += ":r0"
if nadd == 1:
Seq = Seqmap
mod += ":r1"
mm = (line1.split("\t")[12]).split(":")[-1]
SNP = ""
if mm == str(len(Seqmap)): # MD:Z:22
next,
if mm != str(len(Seq)): # MD:Z:0C21 | MD:Z:21A0 | MD:Z:16A5 | MD:Z:4C17
try:
if mm[1]=="A" or mm[1]=="T" or mm[1]=="C" or mm[1]=="G":
SNP = Seq[int(mm[0])]
mod = mm[1] + ">" + SNP + "," + str(int(mm[0])+1)
if mm[0] == "0":
Add1 = "M5"
mod += ":" + Add1
else:
Add1 = "MM"
mod += ":" + Add1
elif mm[2]=="A" or mm[2]=="T" or mm[2]=="C" or mm[2]=="G":
SNP = Seq[int(mm[0:2])]
mod = mm[2] + ">" + SNP + "," + str(int(mm[0:2])+1)
if mm[0:2] == str(len(Seqmap)-1):
Add1 = "M3"
mod += ":" + Add1
else:
Add1 = "MM"
mod += ":" + Add1
except:
next;
Size = str(len(Seq))
RefSeq = line1.split("\t")[2]+":"+Sense+line1.split("\t")[3]+":"+mod
if SNP != "N":
if Seq in dic:
dic[Seq][nlib] = Freq
if rawname not in dic[Seq][0]:
dic[Seq][0].append(rawname)
if RefSeq not in dic[Seq][4]:
dic[Seq][4].append(RefSeq)
elif rawname in dic[Seq][0]:
if RefSeq not in dic[Seq][4]:
dic[Seq][4].append(RefSeq)
elif Seq not in dic:
dic[Seq] = []
for i in range(s):
if i == 0 or i == 4:
dic[Seq].append([])
else:
dic[Seq].append("")
for i in range(n):
dic[Seq].append(0)
dic[Seq][0].append(rawname)
dic[Seq][1] = Seq
dic[Seq][2] = R
dic[Seq][3] = Size
dic[Seq][4].append(RefSeq)
dic[Seq][nlib] = Freq
end = datetime.datetime.now() - start
print "\t%s is done\t(time: %s)"%(libName,end)
cab = "OriginalSeq"+"\t"+"Round"+"\t"+"Variation"+"\t"+"Length"+"\t"+"preMIRref"+ cab + "\n"
out.write(cab)
print "\tcreating Frequences Tab"
for Seq in dic:
tab = ""
for i in range(x):
if i != 0:
if i < (x-1):
if i == 4:
dic[Seq][4].sort()
",".join(dic[Seq][4])
tmp1 = dic[Seq][i]
tab += str(tmp1)+"\t"
else:
tmp1 = dic[Seq][i]
tab += str(tmp1)
tmp2 = tab + "\n"
out.write(tmp2)
total = datetime.datetime.now() - startall
print "\ttotal time read libs: %s"%(total)
def _cleanSAM(inputA, output):
out = open(output, "wb")
cab = ""
dic = dict()
for line in open(inputA):
if line.startswith("@"):
out.write(line)
if line.startswith("@SQ"):
locus = (line.split("@SQ\tSN:")[1]).split("\t")[0]
dic[locus] = [1]
else:
seq = line.split("\t")[2]
if seq in dic:
out.write(line)
else:
next
def _Round_mir(bowtie, refpath, mir_ref, root0):
print "\n*** Mapping mature miRNA on precursor"
inp = "f"
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %s --sam %sSAM/mature.sam" % (bowtie, refpath, inp, mir_ref, root0))
inputA = "%sSAM/mature.sam" %(root0)
inputB = "%sSAM/mature-clean.sam" %(root0)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/mature.sam" %(root0))
def _Round0(bowtie, refpath, libname, libpath, format, filter, filterpath, root0):
"""
:param bowtie:
:param refpath:
:param libname:
:param libpath:
:param format:
:param filter:
:param filterpath:
:param root0:
"""
print "\n*** Mapping Round 0...%s" %(libname)
if format == "fa":
inp = "f"
else:
inp = "q"
if filter == "yes":
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %s --sam %sSAM/r0-%s.sam --un %sunmapped/unr0tmp-%s.%s --al %smapped/r0-%s.%s" %(bowtie, refpath, inp, libpath, root0, libname, root0, libname, format, root0, libname, format))
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %sunmapped/unr0tmp-%s.%s --sam %sSAM/r0tmp-%s.sam --un %sunmapped/unr0-%s.%s --al %smapped/r0-%s.%s" %(bowtie, filterpath, inp, root0, libname, format, root0, libname, root0, libname, format, root0, libname, format))
os.remove("%sunmapped/unr0tmp-%s.%s" %(root0, libname, format))
os.remove("%sSAM/r0tmp-%s.sam" %(root0, libname))
elif filter == "no":
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %s --sam %sSAM/r0-%s.sam --un %sunmapped/unr0-%s.%s --al %smapped/r0-%s.%s" %(bowtie, refpath, inp, libpath, root0, libname, root0, libname, format, root0, libname, format))
inputA = "%sSAM/r0-%s.sam" %(root0, libname)
inputB = "%sSAM/r0-%s-clean.sam" %(root0, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r0-%s.sam" %(root0, libname))
def _Round1(bowtie, refpath, libname, libpath, format, n, root1, root0):
add = "M"
inp = "f"
print "*** Mapping %sadd Round %s...%s" %(add, n, libname)
os.system("%s --norc -n 1 -v 1 -a -l 6 -t %s -%s %sunmapped/unr0-%s.%s --sam %sSAM/r1-%s.sam --un %sunmapped/unr1-%s.%s --al %smapped/r1-%s.%s" %(bowtie, refpath, inp, root0, libname, format, root1, libname, root1, libname, format, root1, libname, format))
inputA = "%sSAM/r1-%s.sam" %(root1, libname)
inputB = "%sSAM/r1-%s-clean.sam" %(root1, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r1-%s.sam" %(root1, libname))
def _RoundN(add, bowtie, refpath, libname, libpath, format, n, rootN, root1):
y=n-1
if format == "fa":
inp = "f"
else:
inp = "q"
if n == 2:
print "*** Mapping M%s Round %s...%s" %(add, n, libname)
inputA = "%sunmapped/unr1-%s.%s" %(root1, libname, format)
inputB = "%sunmapped/un%sr1-%s.%s" %(root1, add, libname, format)
print "\t*** Creating un%sr1-%s.%s" %(add, libname, format)
if add == "3":
_3sRNAaddfa(inputA, inputB, 2)
if add == "5":
_5sRNAaddfa(inputA, inputB, 2)
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %sunmapped/un%sr1-%s.%s --sam %sSAM/r2-%s.sam --un %sunmapped/unr2-%s.%s --al %smapped/r2-%s.%s" %(bowtie, refpath, inp, root1, add, libname, format, rootN, libname, rootN, libname, format, rootN, libname, format))
inputA = "%sSAM/r%s-%s.sam" %(rootN, n, libname)
inputB = "%sSAM/r%s-%s-clean.sam" %(rootN, n, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r%s-%s.sam" %(rootN, n, libname))
elif n > 2:
print "\n*** Mapping M%s Round %s...%s" %(add, n, libname)
inputA = "%sunmapped/unr%s-%s.%s" %(rootN, y, libname, format)
inputB = "%sunmapped/un%sr%s-%s.%s" %(rootN, add, y, libname, format)
print "\t*** Creating un%sr%s-%s.%s" %(add, y, libname, format)
if add == "3":
if format == "fq":
_3sRNAaddfq(inputA, inputB, 1)
elif format == "fa":
_3sRNAaddfa(inputA, inputB, 1)
if add == "5":
if format == "fq":
_5sRNAaddfq(inputA, inputB, 1)
elif format == "fa":
_5sRNAaddfa(inputA, inputB, 1)
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %sunmapped/un%sr%s-%s.%s --sam | |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Optional, Tuple, Type, Union
import beanmachine.ppl.compiler.bmg_nodes as bn
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.error_report import BMGError, ErrorReport
from beanmachine.ppl.compiler.typer_base import TyperBase
# A "node fixer" is a partial function on nodes; it is similar to a "rule". (See rules.py)
# What distinguishes a node fixer from a rule?
#
# * A node fixer is not an instance of a Rule class; it's just a function.
#
# * A node fixer returns:
# 1. None or Inapplicable if the fixer did not know how to fix the problem
# TODO: Eliminate use of None as a special return value from a node fixer.
# Node fixers should return Inapplicable, Fatal, or a node.
# 2. The same node as the input, if the node does not actually need fixing.
# 3. A new node, if the fixer did know how to fix the problem
# 4. Fatal, if the node definitely cannot be fixed, so compilation should cease.
#
# Note the subtle difference between (1) and (2). Suppose we compose a set of n
# fixers together, as in the first_match combinator below. If the first fixer
# returns Inapplicable, then we try the second fixer. If the first fixer returns the
# input, then that fixer is saying that the node is already correct, and we
# should not try the second fixer.
#
# * A node fixer mutates an existing graph by adding a new node to it; a Rule just
# returns a success code containing a new value.
#
# * Rules may be combined together with combinators that apply sub-rules to
# various branches in a large tree, and the result of such a combination is
# itself a Rule. Node fixers are combined together to form more complex fixers,
# but they still just operate on individual nodes. The work of applying node fixers
# over an entire graph is done by a GraphFixer.
class NodeFixerError:
pass
Inapplicable = NodeFixerError()
Fatal = NodeFixerError()
NodeFixerResult = Union[bn.BMGNode, None, NodeFixerError]
NodeFixer = Callable[[bn.BMGNode], NodeFixerResult]
def node_fixer_first_match(fixers: List[NodeFixer]) -> NodeFixer:
def first_match(node: bn.BMGNode) -> NodeFixerResult:
for fixer in fixers:
result = fixer(node)
if result is not None and result is not Inapplicable:
return result
return Inapplicable
return first_match
def type_guard(t: Type, fixer: Callable) -> NodeFixer:
def guarded(node: bn.BMGNode) -> Optional[bn.BMGNode]:
return fixer(node) if isinstance(node, t) else None
return guarded
# A GraphFixer is a function that takes no arguments and returns (1) a bool indicating
# whether the graph fixer made any change or not, and (2) an error report. If the
# error report is non-empty then further processing should stop and the error should
# be reported to the user.
GraphFixerResult = Tuple[bool, ErrorReport]
GraphFixer = Callable[[], GraphFixerResult]
# The identity graph fixer never makes a change or produces an error.
identity_graph_fixer: GraphFixer = lambda: (False, ErrorReport())
def conditional_graph_fixer(condition: bool, fixer: GraphFixer) -> GraphFixer:
return fixer if condition else identity_graph_fixer
def ancestors_first_graph_fixer( # noqa
bmg: BMGraphBuilder,
typer: TyperBase,
node_fixer: NodeFixer,
get_error: Optional[Callable[[bn.BMGNode, int], Optional[BMGError]]] = None,
) -> GraphFixer:
# Applies the node fixer to each node in the graph builder that is an ancestor,
# of any sample, query, or observation, starting with ancestors and working
# towards decendants. Fixes are done one *edge* at a time. That is, when
# we enumerate a node, we check all its input edges to see if the input node
# needs to be fixed, and if so, then we update that edge to point from
# the fixed node to its new output.
#
# We enumerate each output node once, but because we then examine each of its
# input edges, we will possibly encounter the same input node more than once.
#
# Rather than rewriting it again, we memoize the result and reuse it.
# If a fixer indicates a fatally unfixable node then we attempt to report an error
# describing the problem with the edge. However, we will continue to run fixers
# on other nodes, hoping that we might report more errors.
#
# A typer associates type information with each node in the graph. We have some
# problems though:
#
# * We frequently need to accurately know the type of a node when checking to
# see if it needs fixing.
# * Computing the type of a node requires computing the types of all of its
# *ancestor* nodes, which can be quite expensive.
# * If a mutation changes an input of a node, that node's type might change,
# which could then change the types of all of its *descendant* nodes.
#
# We solve this performance problem by (1) computing types of nodes on demand
# and caching the result, (2) being smart about recomputing the type of a node
# and its descendants when the graph is mutated. We therefore tell the typer
# that it needs to re-type a node and its descendants only when a node changes.
#
# CONSIDER: Could we use a simpler algorithm here? That is: for each node,
# try to fix the node. If successful, remove all the output edges of the old
# node and add output edges to the new node. The problem with this approach
# is that we might end up reporting an error on an edge that is NOT in the
# subgraph of ancestors of samples, queries and observations, which would be
# a bad user experience.
def ancestors_first() -> Tuple[bool, ErrorReport]:
errors = ErrorReport()
replacements = {}
reported = set()
nodes = bmg.all_ancestor_nodes()
made_progress = False
for node in nodes:
node_was_updated = False
for i in range(len(node.inputs)):
c = node.inputs[i]
# Have we already reported an error on this node? Skip it.
if c in reported:
continue
# Have we already replaced this input with something?
# If so, no need to compute the replacement again.
if c in replacements:
if node.inputs[i] is not replacements[c]:
node.inputs[i] = replacements[c]
node_was_updated = True
continue
replacement = node_fixer(c)
if isinstance(replacement, bn.BMGNode):
replacements[c] = replacement
if node.inputs[i] is not replacement:
node.inputs[i] = replacement
node_was_updated = True
made_progress = True
elif replacement is Fatal:
reported.add(c)
if get_error is not None:
error = get_error(node, i)
if error is not None:
errors.add_error(error)
if node_was_updated:
typer.update_type(node)
return made_progress, errors
return ancestors_first
def edge_error_pass(
bmg: BMGraphBuilder, get_error: Callable[[bn.BMGNode, int], Optional[BMGError]]
) -> GraphFixer:
"""Given a function that takes an edge in the graph and returns an optional error,
build a pass which checks for errors every edge in the graph that is an ancestor
of a query, observation, or sample. The edge is given as the descendant node and
the index of the parent node."""
def error_pass() -> Tuple[bool, ErrorReport]:
errors = ErrorReport()
reported = set()
nodes = bmg.all_ancestor_nodes()
for node in nodes:
for i in range(len(node.inputs)):
parent = node.inputs[i]
# We might find errors on many edges, but we only report
# one error per parent node.
if parent in reported:
continue
error = get_error(node, i)
if error is not None:
errors.add_error(error)
reported.add(parent)
return False, errors
return error_pass
def node_error_pass(
bmg: BMGraphBuilder, get_error: Callable[[bn.BMGNode], Optional[BMGError]]
) -> GraphFixer:
"""Given a function that takes an node in the graph and returns an optional error,
build a pass which checks for errors every node in the graph that is an ancestor
of a query, observation, or sample."""
def error_pass() -> Tuple[bool, ErrorReport]:
errors = ErrorReport()
nodes = bmg.all_ancestor_nodes()
for node in nodes:
error = get_error(node)
if error is not None:
errors.add_error(error)
return False, errors
return error_pass
def sequential_graph_fixer(fixers: List[GraphFixer]) -> GraphFixer:
"""Takes a list of graph fixers and applies each in turn once unless one fails."""
def sequential() -> GraphFixerResult:
made_progress = False
errors = ErrorReport()
for fixer in fixers:
fixer_made_progress, errors = fixer()
made_progress |= fixer_made_progress
if errors.any():
break
return made_progress, errors
return sequential
def | |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 Samsung Electronics Co., Ltd All Rights Reserved
#
# Contact: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""
Python3 bindings for YACA.
Usage is almost the same as in the C API. All the functions that made
sense in Python were implemented. Memory allocations and functions for
getting length of the buffers were ommited as all those things are
handled automatically for both input and output.
All the parameters for strings/data expect python's bytes type. All
the parameters are named the same as in the C API and their meaning is
exactly the same.
The major exception being encrypt/decrypt update where second
parameter can have 2 meanings. This is only used for CCM_AAD. See
examples.
Some parameters now have default values for ease of use.
For details please refer to the C API doxygen documentation.
For examples see tests/examples in yaca.tests module.
"""
import enum as _enum
import ctypes as _ctypes
import yaca.library
from yaca.error import InvalidParameterError
del yaca.error
# Initialization
_lib = yaca.library.get_yaca()
del yaca.library
# Helpers
def _get_char_param_nullify_if_zero(param):
return None if not param else param
def _context_get_output_length(ctx, input_length):
output_length = _ctypes.c_size_t()
_lib.yaca_context_get_output_length(ctx,
input_length,
_ctypes.byref(output_length))
return output_length.value
# Types
class Context():
def __init__(self, ptr):
if not isinstance(ptr, _ctypes.c_void_p):
raise TypeError('Invalid type')
self._as_parameter_ = ptr
def __del__(self):
_lib.yaca_context_destroy(self._as_parameter_)
class Key():
def __init__(self, ptr):
if not isinstance(ptr, _ctypes.c_void_p):
raise TypeError('Invalid type')
self._as_parameter_ = ptr
def __del__(self):
_lib.yaca_key_destroy(self._as_parameter_)
def __repr__(self):
if self._as_parameter_.value is None:
return '<yaca.Key: KEY_NULL>'
return '<yaca.Key: ' + str(self.get_type()) + ', ' + \
str(self.get_bit_length()) + ' bits at ' + str(hex(id(self))) + '>'
def get_type(self):
return key_get_type(self)
def get_bit_length(self):
return key_get_bit_length(self)
KEY_NULL = Key(_ctypes.c_void_p())
# Enums
@_enum.unique
class KEY_FORMAT(_enum.Enum):
DEFAULT = 0
PKCS8 = 1
@_enum.unique
class KEY_FILE_FORMAT(_enum.Enum):
RAW = 0
BASE64 = 1
PEM = 2
DER = 3
@_enum.unique
class KEY_TYPE(_enum.Enum):
SYMMETRIC = 0
DES = 1
IV = 2
RSA_PUB = 3
RSA_PRIV = 4
DSA_PUB = 5
DSA_PRIV = 6
DH_PUB = 7
DH_PRIV = 8
EC_PUB = 9
EC_PRIV = 10
DSA_PARAMS = 11
DH_PARAMS = 12
EC_PARAMS = 13
class KEY_BIT_LENGTH(_enum.IntEnum):
IV_64BIT = 64
IV_128BIT = 128
UNSAFE_8BIT = 8
UNSAFE_40BIT = 40
UNSAFE_64BIT = 64
UNSAFE_80BIT = 80
UNSAFE_128BIT = 128
L192BIT = 192
L256BIT = 256
L512BIT = 512
L1024BIT = 1024
L2048BIT = 2048
L3072BIT = 3072
L4096BIT = 4096
@_enum.unique
class KEY_BIT_LENGTH_EC(_enum.IntEnum):
PRIME192V1 = 0x300000C0
PRIME256V1 = 0x30000100
SECP256K1 = 0x31200100
SECP384R1 = 0x31100180
SECP521R1 = 0x31100209
KEY_LENGTH_DH_GENERATOR_2 = 0x10000000
KEY_LENGTH_DH_GENERATOR_5 = 0x11000000
@_enum.unique
class KEY_BIT_LENGTH_DH_RFC(_enum.IntEnum):
L1024_160 = 0x20000400
L2048_224 = 0x21000800
L2048_256 = 0x22000800
@_enum.unique
class DIGEST_ALGORITHM(_enum.Enum):
MD5 = 0
SHA1 = 1
SHA224 = 2
SHA256 = 3
SHA384 = 4
SHA512 = 5
@_enum.unique
class ENCRYPT_ALGORITHM(_enum.Enum):
AES = 0
UNSAFE_DES = 1
UNSAFE_TRIPLE_DES_2TDEA = 2
TRIPLE_DES_3TDEA = 3
UNSAFE_RC2 = 4
UNSAFE_RC4 = 5
CAST5 = 6
@_enum.unique
class BLOCK_CIPHER_MODE(_enum.Enum):
NONE = 0
ECB = 1
CTR = 2
CBC = 3
GCM = 4
CFB = 5
CFB1 = 6
CFB8 = 7
OFB = 8
CCM = 9
WRAP = 10
@_enum.unique
class PROPERTY(_enum.Enum):
PADDING = 0
GCM_AAD = 1
GCM_TAG = 2
GCM_TAG_LEN = 3
CCM_AAD = 4
CCM_TAG = 5
CCM_TAG_LEN = 6
RC2_EFFECTIVE_KEY_BITS = 7
@_enum.unique
class PADDING(_enum.Enum):
NONE = 0
X931 = 1
PKCS1 = 2
PKCS1_PSS = 3
PKCS1_OAEP = 4
PKCS1_SSLV23 = 5
PKCS7 = 6
@_enum.unique
class KDF(_enum.Enum):
X942 = 0
X962 = 1
# Implementation crypto
def initialize():
"""Initializes the library. Must be called before any other crypto
function. Should be called once in each thread that uses yaca."""
_lib.yaca_initialize()
def cleanup():
"""Cleans up the library.
Must be called before exiting the thread that called yaca_initialize()."""
_lib.yaca_cleanup()
def memcmp(first, second, length):
"""Safely compares first length bytes of two buffers."""
length = _ctypes.c_size_t(length)
return _lib.yaca_memcmp(first, second, length)
def random_bytes(length):
"""Generates random data."""
data = _ctypes.create_string_buffer(length)
_lib.yaca_randomize_bytes(data, length)
return bytes(data)
def context_set_property(ctx, prop, prop_val):
"""Sets the non-standard context properties.
Can only be called on an initialized context."""
if prop == PROPERTY.PADDING:
value = _ctypes.c_int(prop_val.value)
value_length = _ctypes.sizeof(value)
_lib.yaca_context_set_property(ctx,
prop.value,
_ctypes.byref(value),
value_length)
elif (prop == PROPERTY.GCM_AAD) or (prop == PROPERTY.CCM_AAD) or \
(prop == PROPERTY.GCM_TAG) or (prop == PROPERTY.CCM_TAG):
value = prop_val
value_length = len(prop_val)
_lib.yaca_context_set_property(ctx, prop.value,
value, value_length)
elif (prop == PROPERTY.GCM_TAG_LEN) or (prop == PROPERTY.CCM_TAG_LEN) or \
(prop == PROPERTY.RC2_EFFECTIVE_KEY_BITS):
value = _ctypes.c_size_t(prop_val)
value_length = _ctypes.sizeof(value)
_lib.yaca_context_set_property(
ctx, prop.value, _ctypes.byref(value), value_length)
else:
raise InvalidParameterError('Wrong property passed')
def context_get_property(ctx, prop):
"""Returns the non-standard context properties.
Can only be called on an initialized context."""
value = _ctypes.c_void_p()
value_length = _ctypes.c_size_t()
_lib.yaca_context_get_property(ctx, prop.value, _ctypes.byref(value),
_ctypes.byref(value_length))
if prop == PROPERTY.PADDING:
value_cast = _ctypes.cast(value, _ctypes.POINTER(_ctypes.c_int))
value_proper = value_cast.contents.value
assert value_length.value == _ctypes.sizeof(value_cast.contents)
elif (prop == PROPERTY.GCM_AAD) or (prop == PROPERTY.CCM_AAD) or \
(prop == PROPERTY.GCM_TAG) or (prop == PROPERTY.CCM_TAG):
value_cast = _ctypes.cast(value, _ctypes.POINTER(_ctypes.c_char))
value_proper = value_cast[:value_length.value]
assert value_length.value == len(value_proper)
elif (prop == PROPERTY.GCM_TAG_LEN) or \
(prop == PROPERTY.CCM_TAG_LEN) or \
(prop == PROPERTY.RC2_EFFECTIVE_KEY_BITS):
value_cast = _ctypes.cast(value, _ctypes.POINTER(_ctypes.c_size_t))
value_proper = value_cast.contents.value
assert value_length.value == _ctypes.sizeof(value_cast.contents)
else:
raise InvalidParameterError('Wrong property passed')
_lib.yaca_free(value)
return value_proper
# Implementation key
def key_get_type(key):
"""Gets key's type"""
key_type = _ctypes.c_int()
_lib.yaca_key_get_type(key, _ctypes.byref(key_type))
return KEY_TYPE(key_type.value)
def key_get_bit_length(key):
"""Gets key's length (in bits)."""
key_bit_length = _ctypes.c_size_t()
_lib.yaca_key_get_bit_length(key, _ctypes.byref(key_bit_length))
return key_bit_length.value
def key_import(data, key_type=KEY_TYPE.SYMMETRIC, password=b''):
"""Imports a key or key generation parameters."""
key = _ctypes.c_void_p()
_lib.yaca_key_import(key_type.value, _ctypes.c_char_p(password),
data, len(data), _ctypes.byref(key))
return Key(key)
def key_export(key, key_file_fmt=KEY_FILE_FORMAT.BASE64,
key_fmt=KEY_FORMAT.DEFAULT, password=b''):
"""Exports a key or key generation parameters to arbitrary format."""
data = _ctypes.POINTER(_ctypes.c_char)()
data_length = _ctypes.c_size_t()
_lib.yaca_key_export(key, key_fmt.value, key_file_fmt.value,
_ctypes.c_char_p(password), _ctypes.byref(data),
_ctypes.byref(data_length))
data_bytes = data[:data_length.value]
_lib.yaca_free(data)
return data_bytes
def key_generate(key_type=KEY_TYPE.SYMMETRIC,
key_bit_length=KEY_BIT_LENGTH.L256BIT):
"""Generates a secure key or key generation parameters
(or an Initialization Vector)."""
key = _ctypes.c_void_p()
_lib.yaca_key_generate(key_type.value, key_bit_length,
_ctypes.byref(key))
return Key(key)
def key_generate_from_parameters(params):
"""Generates a secure private asymmetric key from parameters."""
prv_key = _ctypes.c_void_p()
_lib.yaca_key_generate_from_parameters(params,
_ctypes.byref(prv_key))
return Key(prv_key)
def key_extract_public(prv_key):
"""Extracts public key from a private one."""
pub_key = _ctypes.c_void_p()
_lib.yaca_key_extract_public(prv_key, _ctypes.byref(pub_key))
return Key(pub_key)
def key_extract_parameters(key):
"""Extracts parameters from a private or a public key."""
params = _ctypes.c_void_p()
_lib.yaca_key_extract_parameters(key, _ctypes.byref(params))
return Key(params)
def key_derive_dh(prv_key, pub_key):
"""Derives a shared secret using Diffie-Helmann or EC Diffie-Helmann
key exchange protocol."""
secret = _ctypes.POINTER(_ctypes.c_char)()
secret_length = _ctypes.c_size_t()
_lib.yaca_key_derive_dh(prv_key, pub_key, _ctypes.byref(secret),
_ctypes.byref(secret_length))
secret_bytes = secret[:secret_length.value]
_lib.yaca_free(secret)
return secret_bytes
def key_derive_kdf(secret, key_material_length, info=b'',
kdf=KDF.X942, digest_algo=DIGEST_ALGORITHM.SHA256):
"""Derives a key material from shared secret."""
info_param = _get_char_param_nullify_if_zero(info)
key_material = _ctypes.POINTER(_ctypes.c_char)()
_lib.yaca_key_derive_kdf(kdf.value, digest_algo.value,
secret, len(secret),
info_param, len(info), key_material_length,
_ctypes.byref(key_material))
key_material_bytes = key_material[:key_material_length]
_lib.yaca_free(key_material)
return key_material_bytes
def key_derive_pbkdf2(password, key_bit_length=KEY_BIT_LENGTH.L256BIT,
salt=b'', digest_algo=DIGEST_ALGORITHM.SHA256,
iterations=50000):
"""Derives a key from user password (PKCS #5 a.k.a. pbkdf2 algorithm)."""
salt_param = _get_char_param_nullify_if_zero(salt)
key = _ctypes.c_void_p()
_lib.yaca_key_derive_pbkdf2(_ctypes.c_char_p(password), salt_param,
len(salt), iterations, digest_algo.value,
key_bit_length, _ctypes.byref(key))
return Key(key)
# Implementation simple
def simple_encrypt(sym_key, plaintext, encrypt_algo=ENCRYPT_ALGORITHM.AES,
bcm=BLOCK_CIPHER_MODE.ECB, iv=KEY_NULL):
"""Encrypts data using a symmetric cipher."""
plaintext_param = _get_char_param_nullify_if_zero(plaintext)
ciphertext = _ctypes.POINTER(_ctypes.c_char)()
ciphertext_length = _ctypes.c_size_t()
_lib.yaca_simple_encrypt(encrypt_algo.value, bcm.value, sym_key, iv,
plaintext_param, len(plaintext),
_ctypes.byref(ciphertext),
_ctypes.byref(ciphertext_length))
ciphertext_bytes = ciphertext[:ciphertext_length.value]
_lib.yaca_free(ciphertext)
return ciphertext_bytes
def simple_decrypt(sym_key, ciphertext, encrypt_algo=ENCRYPT_ALGORITHM.AES,
bcm=BLOCK_CIPHER_MODE.ECB, iv=KEY_NULL):
"""Decrypts data using a symmetric cipher."""
ciphertext_param = _get_char_param_nullify_if_zero(ciphertext)
plaintext = _ctypes.POINTER(_ctypes.c_char)()
plaintext_length = _ctypes.c_size_t()
_lib.yaca_simple_decrypt(encrypt_algo.value, bcm.value, sym_key, iv,
ciphertext_param, len(ciphertext),
_ctypes.byref(plaintext),
_ctypes.byref(plaintext_length))
plaintext_bytes = plaintext[:plaintext_length.value]
_lib.yaca_free(plaintext)
return plaintext_bytes
def simple_calculate_digest(message, digest_algo=DIGEST_ALGORITHM.SHA256):
"""Calculates a digest of a message."""
message_param = _get_char_param_nullify_if_zero(message)
digest = _ctypes.POINTER(_ctypes.c_char)()
digest_length = _ctypes.c_size_t()
_lib.yaca_simple_calculate_digest(digest_algo.value, message_param,
len(message),
_ctypes.byref(digest),
_ctypes.byref(digest_length))
digest_bytes = digest[:digest_length.value]
_lib.yaca_free(digest)
return digest_bytes
def simple_calculate_signature(prv_key, message,
digest_algo=DIGEST_ALGORITHM.SHA256):
"""Creates a signature using asymmetric private key."""
message_param = _get_char_param_nullify_if_zero(message)
signature = _ctypes.POINTER(_ctypes.c_char)()
signature_length = _ctypes.c_size_t()
_lib.yaca_simple_calculate_signature(digest_algo.value, prv_key,
message_param, len(message),
_ctypes.byref(signature),
_ctypes.byref(signature_length))
signature_bytes = signature[:signature_length.value]
_lib.yaca_free(signature)
return signature_bytes
def simple_verify_signature(pub_key, message, signature,
digest_algo=DIGEST_ALGORITHM.SHA256):
"""Verifies a signature using asymmetric public key."""
return _lib.yaca_simple_verify_signature(digest_algo.value, pub_key,
message, len(message),
signature, len(signature))
def simple_calculate_hmac(sym_key, message,
digest_algo=DIGEST_ALGORITHM.SHA256):
"""Calculates a HMAC of given message using symmetric key."""
message_param = _get_char_param_nullify_if_zero(message)
mac = _ctypes.POINTER(_ctypes.c_char)()
mac_length = _ctypes.c_size_t()
_lib.yaca_simple_calculate_hmac(digest_algo.value, sym_key,
message_param, len(message),
_ctypes.byref(mac),
_ctypes.byref(mac_length))
mac_bytes = mac[:mac_length.value]
_lib.yaca_free(mac)
return mac_bytes
def simple_calculate_cmac(sym_key, message,
encrypt_algo=ENCRYPT_ALGORITHM.AES):
"""Calculates a CMAC of given message using symmetric key."""
message_param = _get_char_param_nullify_if_zero(message)
mac = _ctypes.POINTER(_ctypes.c_char)()
mac_length = _ctypes.c_size_t()
_lib.yaca_simple_calculate_cmac(encrypt_algo.value, sym_key,
message_param, len(message),
| |
<filename>ancom.py
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy import stats
from itertools import product
from mytstats import tstatistic
from skbio.stats import composition
from skbio.stats.composition import clr, multiplicative_replacement
__all__ = ['otuLogRatios',
'ancom',
'globalLRPermTest',
'LRPermTest',
'ratios2otumat',
'loadAbundance',
'_dmeanStat',
'_sumDmeanStat',
'_maxDmeanStat',
'_tStat',
'_sumTStat',
'_maxTStat']
def _dmeanStat(mat, boolInd, axis=0):
return mat[boolInd,:].mean(axis=axis) - mat[~boolInd,:].mean(axis=axis)
def _sumDmeanStat(mat, boolInd):
return (_dmeanStat(mat, boolInd)**2).sum()
def _maxDmeanStat(mat, boolInd):
return (_dmeanStat(mat, boolInd)**2).max()
def _tStat(mat, boolInd, axis=0):
return tstatistic(mat[boolInd,:], mat[~boolInd,:], axis=axis, equal_var=True)
def _sumTStat(mat, boolInd, axis=0):
return np.abs(_tStat(mat, boolInd)).sum()
def _maxTStat(mat, boolInd, axis=0):
return np.abs(_tStat(mat, boolInd)).max()
def _rhoStat(mat, x, axis=0):
assert mat.shape[axis] == x.shape[0]
if axis == 0:
r = [
stats.spearmanr(x, mat[:, i]).correlation
for i in range(mat.shape[1 - axis])
]
else:
r = [
stats.spearmanr(x, mat[i, :]).correlation
for i in range(mat.shape[1 - axis])
]
r = np.array(r)
assert r.shape[0] == mat.shape[1 - axis], (r.shape[0], mat.shape[1 - axis])
return r
def _sumRhoStat(mat, x):
return (_rhoStat(mat, x)**2).sum()
def _maxRhoStat(mat, x):
return (_rhoStat(mat, x)**2).max()
def loadAbundance(filename, compositionNorm=True, truncate=True):
"""Load OTU counts file (phylum, genus or species level)
with OTUs along the rows and samples along the columns.
Parameters
----------
filename : str
Excel file from QIIME pipeline.
Contains OTUs along the rows and samples along the columns,
with a few header rows.
compositionNorm : bool
Add delta count to zeros and normalize each sample by the
total number of reads. (uses skbio.stats.composition.multiplicative_replacement)
truncate : bool
Discard taxa with less than 0.5% of total reads.
Discard taxa that are not present in 25% of samples.
"""
def _cleanCountDf(df):
"""Drop extra columns/headers and transpose so that
samples are along rows and OTUs along columns.
Returns
-------
outDf : pd.DataFrame [index: samples, columns: OTUs]"""
df = df.drop(['tax_id', 'rank'], axis = 1)
df = df.dropna(subset=['tax_name'], axis = 0)
df = df.rename_axis({'tax_name':'OTU'}, axis=1)
df = df.set_index('OTU')
df = df.drop(['specimen'], axis = 0)
df = df.T
df = df.dropna(subset=['label'], axis=0)
df['sid'] = df.label.str.replace('Sample-', 'S')
df = df.set_index('sid')
df = df.drop('label', axis=1)
df = df.astype(float)
return df
def _discardLow(df, thresh=0.005):
"""Discard taxa/columns with less than 0.5% of reads"""
totReads = df.values.sum()
keepInd1 = (df.sum(axis=0)/totReads) > thresh
"""Also discard taxa that are not present in 25% of samples"""
keepInd2 = (df>0).sum(axis=0)/df.shape[0] > 0.25
return df.loc[:, keepInd1 & keepInd2]
df = pd.read_excel(filename)
df = _cleanCountDf(df)
if truncate:
df = _discardLow(df)
if compositionNorm:
values = composition.multiplicative_replacement(df.values)
df = pd.DataFrame(values, columns=df.columns, index=df.index)
cols = [c for c in df.columns if not c in ['sid']]
print('Abundance data: %s samples, %s taxa' % (df.shape[0], len(cols)))
return df, cols
def ratios2otumat(otuDf, lrvec):
"""Reshape a vector of log-ratios back into a matrix of OTU x OTU
using columns in otuDf
Example
-------
qbyOTU = ratios2otumat(qvalues)
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
Returns:
--------
mat : pd.DataFrame [index: OTUs, columns: OTUs]"""
nSamples, nOTUs = otuDf.shape
otuMat = pd.DataFrame(np.zeros((nOTUs, nOTUs)), columns=otuDf.columns, index=otuDf.columns)
for ind in lrvec.index:
i = np.where(otuDf.columns == ind[0])[0]
j = np.where(otuDf.columns == ind[1])[0]
otuMat.values[i, j] = lrvec[ind]
otuMat.values[j, i] = lrvec[ind]
return otuMat
def otuLogRatios(otuDf):
"""Calculates pairwise log ratios between all OTUs for all samples.
TODO: Use skbio.stats.composition.perturb_inv for simplicity and consistency
(though I think the result will be identical)
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
Returns:
--------
logRatio : pd.DataFrame [index: (OTU1,OTU2) for each log-ratio]
Log-ratio statistic for each comparison"""
nSamples, nOTUs = otuDf.shape
"""Define minimum OTU abundance to avoid log(0)
multiplicative_replacement takes matrix [samples x OTUs]"""
assert otuDf.min().min() > 0, "Cannot input 0 values to otuLogRatios (min value {})".format(otuDf.min().min())
logOTU = np.log(otuDf).values
nRatios = int(nOTUs * (nOTUs-1) / 2)
logRatio = np.zeros((nSamples, nRatios))
"""List of tuples of two indices for each ratio [nRatios]"""
ratioIndices = [(otui, otuj) for otui in range(nOTUs - 1) for otuj in range(otui+1, nOTUs)]
"""List of indices corresponding to the ratios that contain each OTU"""
otuIndices = [[j for j in range(nRatios) if otui in ratioIndices[j]] for otui in range(nOTUs)]
ratioCount = 0
for otui in range(nOTUs - 1):
tmpCount = int(nOTUs - (otui+1))
logRatio[:, ratioCount:(ratioCount+tmpCount)] = logOTU[:, otui+1:] - logOTU[:, otui][:, None]
ratioCount += tmpCount
cols = [(otuDf.columns[ratioIndices[r][0]], otuDf.columns[ratioIndices[r][1]]) for r in range(nRatios)]
logRatio = pd.DataFrame(logRatio, index=otuDf.index, columns=cols)
return logRatio
def globalCLRPermTest(otuDf, labels, statfunc=_sumRhoStat, nperms=999, seed=110820, binary=False):
"""Calculates centered-log-ratios (CLR) for each sample and performs global
permutation tests to determine if there is a significant correlation
over all log-median-ratios, with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (float)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.ndarray [n x k] and float index [n] as parameters and
returns a float summarizing over k.
nperms : int
Number of iterations for the permutation test.
seed :int
Seed for random permutation generation.
Returns:
--------
pvalue : float
Global p-value for a significant association of OTU log-median-ratios
with label, based on the summary statistic.
obs : float
Statistic summarizing the label difference."""
nSamples, nOTUs = otuDf.shape
if binary:
labelValues = labels.values.astype(bool)
else:
labelValues = labels.values.astype(float)
# Make proportions
otuDf = otuDf / otuDf.sum()
# Apply multiplicative replacement for zero values
otuMR = multiplicative_replacement(otuDf.values)
# Calculate the CLR
otuCLR = clr(otuMR)
# Make into a DataFrame
otuCLR = pd.DataFrame(otuCLR, index=otuDf.index, columns=otuDf.columns)
np.random.seed(seed)
obs = statfunc(otuCLR.values, labelValues)
samples = np.array([
statfunc(otuCLR.values, labelValues[np.random.permutation(nSamples)])
for permi in range(nperms)
])
"""Since test is based on the abs statistic it is inherently two-sided"""
pvalue = ((np.abs(samples) >= np.abs(obs)).sum() + 1) / (nperms + 1)
return pvalue, obs
def CLRPermTest(otuDf, labels, statfunc=_rhoStat, nperms=999, adjMethod='fdr_bh', seed=110820, binary=False):
"""Calculates centered-log-ratio (CLR) for all OTUs and performs
permutation tests to determine if there is a significant correlation
in OTU ratios with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (float)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.array [n x k] and float index [n] as parameters and
returns a 1-D array of the statistic [k].
nperms : int
Number of iterations for the permutation test.
adjMethod : string
Passed to sm.stats.multipletests for p-value multiplicity adjustment.
If value is None then no adjustment is made.
seed :int
Seed for random permutation generation.
Returns:
--------
qvalues : pd.Series [index: OTU]
Q/P-values for each OTU computed.
observed : pd.Series [index: OTU]
Log-ratio statistic summarizing across samples."""
nSamples, nOTUs = otuDf.shape
if binary:
labelValues = labels.values.astype(bool)
else:
labelValues = labels.values.astype(float)
# Make proportions
otuDf = otuDf / otuDf.sum()
# Apply multiplicative replacement for zero values
otuMR = multiplicative_replacement(otuDf.values)
# Calculate the CLR
otuCLR = clr(otuMR)
# Make into a DataFrame
otuCLR = pd.DataFrame(otuCLR, index=otuDf.index, columns=otuDf.columns)
obs = statfunc(otuCLR.values, labelValues)
np.random.seed(seed)
samples = np.zeros((nperms, nOTUs))
for permi in range(nperms):
samples[permi, :] = statfunc(
otuCLR.values,
labelValues[np.random.permutation(nSamples)]
)
pvalues = ((np.abs(samples) >= np.abs(obs[None, :])).sum(
axis=0) + 1) / (nperms + 1)
if adjMethod is None or adjMethod.lower() == 'none':
qvalues = pvalues
else:
qvalues = _pvalueAdjust(pvalues, method=adjMethod)
qvalues = pd.Series(qvalues, index=otuDf.columns)
observed = pd.Series(obs, index=otuDf.columns)
return qvalues, observed
def globalLRPermTest(otuDf, labels, statfunc=_sumTStat, nperms=999, seed=110820):
"""Calculates pairwise log ratios between all OTUs and performs global
permutation tests to determine if there is a significant difference
over all log-ratios, with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (bool or int)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.ndarray [n x k] and boolean index [n] as parameters and
returns a float summarizing | |
1:
# Single 2D data
Y, X = data.shape
T, Z = 1, 1
data.shape = T, Z, 1, Y, X, 1 # imageJ format should always have TZCYXS data shape
if metadata is None:
metadata = {}
new_tif.save(data, metadata=metadata)
def getDefaultROI(self):
Y, X = self.img.image.shape
w, h = X, Y
xc, yc = int(round(X/2)), int(round(Y/2))
# yt, xl = int(round(xc-w/2)), int(round(yc-h/2))
yt, xl = 0, 0
# Add ROI Rectangle
cropROI = pg.ROI(
[xl, yt], [w, h],
rotatable=False,
removable=False,
pen=pg.mkPen(color='r'),
maxBounds=QRectF(QRect(0,0,X,Y))
)
return cropROI
def setROIprops(self, roi):
xl, yt = [int(round(c)) for c in roi.pos()]
roi.handleSize = 7
roi.label = pg.LabelItem('ROI', color='r', size=f'{self.pt}pt')
hLabel = roi.label.rect().bottom()
roi.label.setPos(xl, yt-hLabel)
## handles scaling horizontally around center
roi.addScaleHandle([1, 0.5], [0, 0.5])
roi.addScaleHandle([0, 0.5], [1, 0.5])
## handles scaling vertically from opposite edge
roi.addScaleHandle([0.5, 0], [0.5, 1])
roi.addScaleHandle([0.5, 1], [0.5, 0])
## handles scaling both vertically and horizontally
roi.addScaleHandle([1, 1], [0, 0])
roi.addScaleHandle([0, 0], [1, 1])
def init_data(self, user_ch_file_paths, user_ch_name):
# Iterate pos and load_data
data = []
for f, file_path in enumerate(user_ch_file_paths):
try:
posData = load.loadData(file_path, user_ch_name, QParent=self)
posData.getBasenameAndChNames()
posData.buildPaths()
posData.loadImgData()
posData.loadOtherFiles(
load_segm_data=True,
load_acdc_df=True,
load_shifts=True,
loadSegmInfo=True,
load_dataPrep_ROIcoords=True,
load_delROIsInfo=False,
loadBkgrData=False,
loadBkgrROIs=True,
load_last_tracked_i=False,
load_metadata=True,
getTifPath=True
)
# If data was cropped then dataPrep_ROIcoords are useless
if posData.dataPrep_ROIcoords is not None:
df = posData.dataPrep_ROIcoords
isROIactive = df.at['cropped', 'value'] == 0
if not isROIactive:
posData.dataPrep_ROIcoords = None
posData.loadAllImgPaths()
if f==0 and not self.metadataAlreadyAsked:
proceed = posData.askInputMetadata(
self.num_pos,
ask_SizeT=self.num_pos==1,
ask_TimeIncrement=False,
ask_PhysicalSizes=False,
save=True
)
self.SizeT = posData.SizeT
self.SizeZ = posData.SizeZ
if not proceed:
self.titleLabel.setText(
'File --> Open or Open recent to start the process',
color='w')
return False
else:
posData.SizeT = self.SizeT
if self.SizeZ > 1:
# In case we know we are loading single 3D z-stacks
# we alwways use the third dimensins as SizeZ because
# SizeZ in some positions might be different than
# first loaded pos
SizeZ = posData.img_data.shape[-3]
posData.SizeZ = SizeZ
else:
posData.SizeZ = 1
if self.SizeT > 1:
posData.SizeT = self.SizeT
else:
posData.SizeT = 1
posData.saveMetadata()
except AttributeError:
print('')
print('====================================')
traceback.print_exc()
print('====================================')
print('')
self.titleLabel.setText(
'File --> Open or Open recent to start the process',
color='w')
return False
if posData is None:
self.titleLabel.setText(
'File --> Open or Open recent to start the process',
color='w')
return False
img_shape = posData.img_data.shape
self.num_frames = posData.SizeT
self.user_ch_name = user_ch_name
SizeT = posData.SizeT
SizeZ = posData.SizeZ
if f==0:
self.logger.info(f'Data shape = {img_shape}')
self.logger.info(f'Number of frames = {SizeT}')
self.logger.info(f'Number of z-slices per frame = {SizeZ}')
data.append(posData)
if SizeT>1 and self.num_pos>1:
path = os.path.normpath(file_path)
path_li = path.split(os.sep)
rel_path = f'.../{"/".join(path_li[-3:])}'
msg = QMessageBox()
msg.critical(
self, 'Multiple Pos loading not allowed.',
f'The file {rel_path} has multiple frames over time.\n\n'
'Loading multiple positions that contain frames over time '
'is not allowed.\n\n'
'To analyse frames over time load one position at the time',
msg.Ok
)
self.titleLabel.setText(
'File --> Open or Open recent to start the process',
color='w')
return False
self.data = data
self.init_segmInfo_df()
self.init_attr()
return True
def init_segmInfo_df(self):
self.pos_i = 0
self.frame_i = 0
for posData in self.data:
NO_segmInfo = (
posData.segmInfo_df is None
or posData.filename not in posData.segmInfo_df.index
)
if NO_segmInfo and posData.SizeZ > 1:
filename = posData.filename
df = myutils.getDefault_SegmInfo_df(posData, filename)
if posData.segmInfo_df is None:
posData.segmInfo_df = df
else:
posData.segmInfo_df = pd.concat([df, posData.segmInfo_df])
posData.segmInfo_df.to_csv(posData.segmInfo_df_csv_path)
posData = self.data[0]
if posData.SizeZ > 1:
self.zSliceScrollBar.setDisabled(False)
self.zProjComboBox.setDisabled(False)
self.zSliceScrollBar.setMaximum(posData.SizeZ-1)
try:
self.zSliceScrollBar.valueChanged.disconnect()
self.zProjComboBox.currentTextChanged.disconnect()
except Exception as e:
pass
self.zSliceScrollBar.valueChanged.connect(self.update_z_slice)
self.zProjComboBox.currentTextChanged.connect(self.updateZproj)
if posData.SizeT > 1:
self.interpAction.setEnabled(True)
self.ZbackAction.setEnabled(True)
self.ZforwAction.setEnabled(True)
df = posData.segmInfo_df
idx = (posData.filename, self.frame_i)
how = posData.segmInfo_df.at[idx, 'which_z_proj']
self.zProjComboBox.setCurrentText(how)
def update_z_slice(self, z):
if self.zProjComboBox.currentText() == 'single z-slice':
posData = self.data[self.pos_i]
df = posData.segmInfo_df
idx = (posData.filename, self.frame_i)
posData.segmInfo_df.at[idx, 'z_slice_used_dataPrep'] = z
posData.segmInfo_df.at[idx, 'z_slice_used_gui'] = z
self.update_img()
posData.segmInfo_df.to_csv(posData.segmInfo_df_csv_path)
def updateZproj(self, how):
posData = self.data[self.pos_i]
for frame_i in range(self.frame_i, posData.SizeT):
df = posData.segmInfo_df
idx = (posData.filename, self.frame_i)
posData.segmInfo_df.at[idx, 'which_z_proj'] = how
posData.segmInfo_df.at[idx, 'which_z_proj_gui'] = how
if how == 'single z-slice':
self.zSliceScrollBar.setDisabled(False)
self.z_label.setStyleSheet('color: black')
self.update_z_slice(self.zSliceScrollBar.sliderPosition())
else:
self.zSliceScrollBar.setDisabled(True)
self.z_label.setStyleSheet('color: gray')
self.update_img()
# Apply same z-proj to future pos
if posData.SizeT == 1:
for posData in self.data[self.pos_i+1:]:
idx = (posData.filename, self.frame_i)
posData.segmInfo_df.at[idx, 'which_z_proj'] = how
self.save_segmInfo_df_pos()
def save_segmInfo_df_pos(self):
# Launch a separate thread to save to csv and keep gui responsive
self.thread = QThread()
self.worker = toCsvWorker()
self.worker.setData(self.data)
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.run)
self.worker.finished.connect(self.thread.quit)
self.worker.finished.connect(self.worker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.thread.start()
def useSameZ_fromHereBack(self, event):
how = self.zProjComboBox.currentText()
posData = self.data[self.pos_i]
df = posData.segmInfo_df
z = df.at[(posData.filename, self.frame_i), 'z_slice_used_dataPrep']
if posData.SizeT > 1:
for i in range(0, self.frame_i):
df.at[(posData.filename, i), 'z_slice_used_dataPrep'] = z
df.at[(posData.filename, i), 'z_slice_used_gui'] = z
df.at[(posData.filename, i), 'which_z_proj'] = how
posData.segmInfo_df.to_csv(posData.segmInfo_df_csv_path)
elif posData.SizeZ > 1:
for _posData in self.data[:self.pos_i]:
df = _posData.segmInfo_df
df.at[(_posData.filename, 0), 'z_slice_used_dataPrep'] = z
df.at[(_posData.filename, 0), 'z_slice_used_gui'] = z
df.at[(_posData.filename, 0), 'which_z_proj'] = how
self.save_segmInfo_df_pos()
def useSameZ_fromHereForw(self, event):
how = self.zProjComboBox.currentText()
posData = self.data[self.pos_i]
df = posData.segmInfo_df
z = df.at[(posData.filename, self.frame_i), 'z_slice_used_dataPrep']
if posData.SizeT > 1:
for i in range(self.frame_i, posData.SizeT):
df.at[(posData.filename, i), 'z_slice_used_dataPrep'] = z
df.at[(posData.filename, i), 'z_slice_used_gui'] = z
df.at[(posData.filename, i), 'which_z_proj'] = how
posData.segmInfo_df.to_csv(posData.segmInfo_df_csv_path)
elif posData.SizeZ > 1:
for _posData in self.data[self.pos_i:]:
df = _posData.segmInfo_df
df.at[(_posData.filename, 0), 'z_slice_used_dataPrep'] = z
df.at[(_posData.filename, 0), 'z_slice_used_gui'] = z
df.at[(_posData.filename, 0), 'which_z_proj'] = how
self.save_segmInfo_df_pos()
def interp_z(self, event):
posData = self.data[self.pos_i]
df = posData.segmInfo_df
x0, z0 = 0, df.at[(posData.filename, 0), 'z_slice_used_dataPrep']
x1 = self.frame_i
z1 = df.at[(posData.filename, x1), 'z_slice_used_dataPrep']
f = scipy.interpolate.interp1d([x0, x1], [z0, z1])
xx = np.arange(0, self.frame_i)
zz = np.round(f(xx)).astype(int)
for i in range(self.frame_i):
df.at[(posData.filename, i), 'z_slice_used_dataPrep'] = zz[i]
df.at[(posData.filename, i), 'z_slice_used_gui'] = zz[i]
df.at[(posData.filename, i), 'which_z_proj'] = 'single z-slice'
posData.segmInfo_df.to_csv(posData.segmInfo_df_csv_path)
@myutils.exception_handler
def prepData(self, event):
self.titleLabel.setText(
'Prepping data... (check progress in the terminal)',
color='w')
doZip = False
for p, posData in enumerate(self.data):
self.startAction.setDisabled(True)
nonTifFound = (
any([npz is not None for npz in posData.npz_paths]) or
any([npy is not None for npy in posData.npy_paths]) or
posData.segmFound
)
imagesPath = posData.images_path
zipPath = f'{imagesPath}.zip'
if nonTifFound and p==0:
txt = (
'Additional <b>NON-tif files detected.</b><br><br>'
'The requested experiment folder <b>already contains .npy '
'or .npz files</b> '
'most likely from previous analysis runs.<br><br>'
'To <b>avoid data losses</b> we recommend zipping the '
'"Images" folder.<br><br>'
'If everything looks fine after prepping the data, '
'you can manually '
'delete the zip archive.<br><br>'
'Do you want to <b>automatically zip now?</b><br><br>'
'PS: Zip archive location:<br><br>'
f'{zipPath}'
)
txt = html_utils.paragraph(txt)
msg = widgets.myMessageBox()
_, yes, no = msg.warning(
self, 'NON-Tif data detected!', txt,
buttonsTexts=('Cancel', 'Yes', 'No')
)
if msg.cancel:
self.cropAction.setEnabled(True)
self.titleLabel.setText('Process aborted', color='w')
break
if yes == msg.clickedButton:
doZip = True
if doZip:
self.logger.info(f'Zipping Images folder: {zipPath}')
shutil.make_archive(imagesPath, 'zip', imagesPath)
self.npy_to_npz(posData)
self.alignData(self.user_ch_name, posData)
if posData.SizeZ>1:
posData.segmInfo_df.to_csv(posData.segmInfo_df_csv_path)
else:
self.update_img()
self.logger.info('Done.')
self.addROIs()
self.saveROIcoords(False, self.data[self.pos_i])
self.saveBkgrROIs(self.data[self.pos_i])
self.cropAction.setEnabled(True)
if posData.SizeZ>1:
self.cropZaction.setEnabled(True)
self.titleLabel.setText(
'Data successfully prepped. You can now crop the images or '
'close the program',
color='w')
def setStandardRoiShape(self, text):
posData = self.data[self.pos_i]
Y, X = posData.img_data.shape[-2:]
m = re.findall(r'(\d+)x(\d+)', text)
w, h = int(m[0][0]), int(m[0][1])
# xc, yc = int(round(X/2)), int(round(Y/2))
# yt, xl = int(round(xc-w/2)), int(round(yc-h/2))
posData.cropROI.setPos([0, 0])
posData.cropROI.setSize([w, h])
def addROIs(self):
Y, X = self.img.image.shape
max_size = round(int(np.log2(min([Y, X])/16)))
items = [f'{16*(2**i)}x{16*(2**i)}' for i in range(1, max_size+1)]
items.append(f'{X}x{Y}')
self.ROIshapeComboBox.clear()
self.ROIshapeComboBox.addItems(items)
self.ROIshapeComboBox.setCurrentText(items[-1])
for posData in self.data:
if posData.dataPrep_ROIcoords is None:
cropROI = self.getDefaultROI()
else:
xl = posData.dataPrep_ROIcoords.at['x_left', 'value']
yt = posData.dataPrep_ROIcoords.at['y_top', 'value']
w = posData.dataPrep_ROIcoords.at['x_right', 'value'] - xl
h = posData.dataPrep_ROIcoords.at['y_bottom', 'value'] - yt
cropROI = pg.ROI(
[xl, yt], [w, h],
rotatable=False,
removable=False,
pen=pg.mkPen(color='r'),
maxBounds=QRectF(QRect(0,0,X,Y))
)
self.setROIprops(cropROI)
posData.cropROI = cropROI
self.updateROI()
try:
self.ROIshapeComboBox.currentTextChanged.disconnect()
except Exception as e:
self.ROIshapeComboBox.currentTextChanged.connect(
self.setStandardRoiShape)
self.addBkrgRoiActon.setDisabled(False)
for posData in self.data:
if not posData.bkgrROIs:
bkgrROI = self.getDefaultBkgrROI()
self.setBkgrROIprops(bkgrROI)
posData.bkgrROIs.append(bkgrROI)
else:
for bkgrROI in posData.bkgrROIs:
self.setBkgrROIprops(bkgrROI)
self.updateBkgrROIs()
def getDefaultBkgrROI(self):
Y, X = self.img.image.shape
xRange, yRange = self.ax1.viewRange()
xl, yt = abs(xRange[0]), abs(yRange[0])
w, h = int(X/8), int(Y/8)
bkgrROI = pg.ROI(
[xl, yt], [w, h],
rotatable=False,
removable=False,
pen=pg.mkPen(color=(150,150,150)),
maxBounds=QRectF(QRect(0,0,X,Y))
)
return bkgrROI
def setBkgrROIprops(self, bkgrROI):
bkgrROI.handleSize = 7
xl, yt = [int(round(c)) for c in bkgrROI.pos()]
bkgrROI.label = pg.LabelItem(
'Bkgr. ROI', color=(150,150,150), size=f'{self.pt}pt'
)
hLabel = bkgrROI.label.rect().bottom()
bkgrROI.label.setPos(xl, yt-hLabel)
## handles scaling horizontally around center
bkgrROI.addScaleHandle([1, 0.5], [0, 0.5])
| |
"""Filter out instances of Token, leaving only a list of strings.
Used instead of a more specific parsing method (e.g. splitting on commas)
when only strings are expected, so as to be a little lenient.
Apache does it this way and has some comments about broken clients which
forget commas (?), so I'm doing it the same way. It shouldn't
hurt anything, in any case.
"""
l = []
for x in seq:
if not isinstance(x, Token):
l.append(x)
return l
# parser utilities:
def checkSingleToken(tokens):
if len(tokens) != 1:
raise ValueError("Expected single token, not %s." % (tokens,))
return tokens[0]
def parseKeyValue(val):
if len(val) == 1:
return val[0], None
elif len(val) == 3 and val[1] == Token('='):
return val[0], val[2]
raise ValueError("Expected key or key=value, but got %s." % (val,))
def parseArgs(field):
args = split(field, Token(';'))
val = args.next()
args = [parseKeyValue(arg) for arg in args]
return val, args
def listParser(fun):
"""Return a function which applies 'fun' to every element in the
comma-separated list"""
def listParserHelper(tokens):
fields = split(tokens, Token(','))
for field in fields:
if len(field) != 0:
yield fun(field)
return listParserHelper
def last(seq):
"""Return seq[-1]"""
return seq[-1]
# Generation utilities
def quoteString(s):
"""
Quote a string according to the rules for the I{quoted-string} production
in RFC 2616 section 2.2.
@type s: C{str}
@rtype: C{str}
"""
return '"%s"' % s.replace('\\', '\\\\').replace('"', '\\"')
def listGenerator(fun):
"""Return a function which applies 'fun' to every element in
the given list, then joins the result with generateList"""
def listGeneratorHelper(l):
return generateList([fun(e) for e in l])
return listGeneratorHelper
def generateList(seq):
return ", ".join(seq)
def singleHeader(item):
return [item]
_seperators = re.compile('[' + re.escape(http_tokens) + ']')
def generateKeyValues(parameters):
"""
Format an iterable of key/value pairs.
Although each header in HTTP 1.1 redefines the grammar for the formatting
of its parameters, the grammar defined by almost all headers conforms to
the specification given in RFC 2046. Note also that RFC 2616 section 19.2
note 2 points out that many implementations fail if the value is quoted,
therefore this function only quotes the value when it is necessary.
@param parameters: An iterable of C{tuple} of a C{str} parameter name and
C{str} or C{None} parameter value which will be formated.
@return: The formatted result.
@rtype: C{str}
"""
l = []
for k, v in parameters:
if v is None:
l.append('%s' % k)
else:
if _seperators.search(v) is not None:
v = quoteString(v)
l.append('%s=%s' % (k, v))
return ";".join(l)
class MimeType(object):
def fromString(cls, mimeTypeString):
"""Generate a MimeType object from the given string.
@param mimeTypeString: The mimetype to parse
@return: L{MimeType}
"""
return DefaultHTTPHandler.parse('content-type', [mimeTypeString])
fromString = classmethod(fromString)
def __init__(self, mediaType, mediaSubtype, params={}, **kwargs):
"""
@type mediaType: C{str}
@type mediaSubtype: C{str}
@type params: C{dict}
"""
self.mediaType = mediaType
self.mediaSubtype = mediaSubtype
self.params = dict(params)
if kwargs:
self.params.update(kwargs)
def __eq__(self, other):
if not isinstance(other, MimeType):
return NotImplemented
return (self.mediaType == other.mediaType and
self.mediaSubtype == other.mediaSubtype and
self.params == other.params)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "MimeType(%r, %r, %r)" % (self.mediaType, self.mediaSubtype, self.params)
def __hash__(self):
return hash(self.mediaType) ^ hash(self.mediaSubtype) ^ hash(tuple(self.params.iteritems()))
class MimeDisposition(object):
def fromString(cls, dispositionString):
"""Generate a MimeDisposition object from the given string.
@param dispositionString: The disposition to parse
@return: L{MimeDisposition}
"""
return DefaultHTTPHandler.parse('content-disposition', [dispositionString])
fromString = classmethod(fromString)
def __init__(self, dispositionType, params={}, **kwargs):
"""
@type mediaType: C{str}
@type mediaSubtype: C{str}
@type params: C{dict}
"""
self.dispositionType = dispositionType
self.params = dict(params)
if kwargs:
self.params.update(kwargs)
def __eq__(self, other):
if not isinstance(other, MimeDisposition):
return NotImplemented
return (self.dispositionType == other.dispositionType and
self.params == other.params)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "MimeDisposition(%r, %r)" % (self.dispositionType, self.params)
def __hash__(self):
return hash(self.dispositionType) ^ hash(tuple(self.params.iteritems()))
# Specific header parsers.
def parseAccept(field):
atype, args = parseArgs(field)
if len(atype) != 3 or atype[1] != Token('/'):
raise ValueError("MIME Type " + str(atype) + " invalid.")
# okay, this spec is screwy. A 'q' parameter is used as the separator
# between MIME parameters and (as yet undefined) additional HTTP
# parameters.
num = 0
for arg in args:
if arg[0] == 'q':
mimeparams = tuple(args[0:num])
params = args[num:]
break
num = num + 1
else:
mimeparams = tuple(args)
params = []
# Default values for parameters:
qval = 1.0
# Parse accept parameters:
for param in params:
if param[0] == 'q':
qval = float(param[1])
else:
# Warn? ignored parameter.
pass
ret = MimeType(atype[0], atype[2], mimeparams), qval
return ret
def parseAcceptQvalue(field):
atype, args = parseArgs(field)
atype = checkSingleToken(atype)
qvalue = 1.0 # Default qvalue is 1
for arg in args:
if arg[0] == 'q':
qvalue = float(arg[1])
return atype, qvalue
def addDefaultCharset(charsets):
if charsets.get('*') is None and charsets.get('iso-8859-1') is None:
charsets['iso-8859-1'] = 1.0
return charsets
def addDefaultEncoding(encodings):
if encodings.get('*') is None and encodings.get('identity') is None:
# RFC doesn't specify a default value for identity, only that it
# "is acceptable" if not mentioned. Thus, give it a very low qvalue.
encodings['identity'] = .0001
return encodings
def parseContentType(header):
# Case folding is disabled for this header, because of use of
# Content-Type: multipart/form-data; boundary=CaSeFuLsTuFf
# So, we need to explicitly .lower() the ctype and arg keys.
ctype, args = parseArgs(header)
if len(ctype) != 3 or ctype[1] != Token('/'):
raise ValueError("MIME Type " + str(ctype) + " invalid.")
args = [(kv[0].lower(), kv[1]) for kv in args]
return MimeType(ctype[0].lower(), ctype[2].lower(), tuple(args))
def parseContentDisposition(header):
# Case folding is disabled for this header, because of use of
# So, we need to explicitly .lower() the dtype and arg keys.
dtype, args = parseArgs(header)
if len(dtype) != 1:
raise ValueError("Content-Disposition " + str(dtype) + " invalid.")
args = [(kv[0].lower(), kv[1]) for kv in args]
return MimeDisposition(dtype[0].lower(), tuple(args))
def parseContentMD5(header):
try:
return base64.decodestring(header)
except Exception, e:
raise ValueError(e)
def parseContentRange(header):
"""Parse a content-range header into (kind, start, end, realLength).
realLength might be None if real length is not known ('*').
start and end might be None if start,end unspecified (for response code 416)
"""
kind, other = header.strip().split()
if kind.lower() != "bytes":
raise ValueError("a range of type %r is not supported")
startend, realLength = other.split("/")
if startend.strip() == '*':
start, end = None, None
else:
start, end = map(int, startend.split("-"))
if realLength == "*":
realLength = None
else:
realLength = int(realLength)
return (kind, start, end, realLength)
def parseExpect(field):
etype, args = parseArgs(field)
etype = parseKeyValue(etype)
return (etype[0], (lambda *args: args)(etype[1], *args))
def parseExpires(header):
# """HTTP/1.1 clients and caches MUST treat other invalid date formats,
# especially including the value 0, as in the past (i.e., "already expired")."""
try:
return parseDateTime(header)
except ValueError:
return 0
def parseIfModifiedSince(header):
# Ancient versions of netscape and *current* versions of MSIE send
# If-Modified-Since: Thu, 05 Aug 2004 12:57:27 GMT; length=123
# which is blantantly RFC-violating and not documented anywhere
# except bug-trackers for web frameworks.
# So, we'll just strip off everything after a ';'.
return parseDateTime(header.split(';', 1)[0])
def parseIfRange(headers):
try:
return ETag.parse(tokenize(headers))
except ValueError:
return parseDateTime(last(headers))
def parseRange(crange):
crange = list(crange)
if len(crange) < 3 or crange[1] != Token('='):
raise ValueError("Invalid range header format: %s" % (crange,))
rtype = crange[0]
if rtype != 'bytes':
raise ValueError("Unknown range unit: %s." % (rtype,))
rangeset = split(crange[2:], Token(','))
ranges = []
for byterangespec in rangeset:
if len(byterangespec) != 1:
raise ValueError("Invalid range header format: %s" % (crange,))
start, end = byterangespec[0].split('-')
if not start and not end:
raise ValueError("Invalid range header format: %s" % (crange,))
if start:
start = int(start)
else:
start = None
if end:
end = int(end)
else:
end = None
if start and end and start > end:
raise ValueError("Invalid range header, start > end: %s" % (crange,))
ranges.append((start, end))
return rtype, ranges
def parseRetryAfter(header):
try:
# delta seconds
return time.time() + int(header)
except ValueError:
# or datetime
return parseDateTime(header)
# WWW-Authenticate and Authorization
def parseWWWAuthenticate(tokenized):
headers = []
tokenList = list(tokenized)
while tokenList:
scheme = tokenList.pop(0)
challenge = {}
last = None
kvChallenge = False
while tokenList:
token = tokenList.pop(0)
if token == Token('='):
kvChallenge = True
challenge[last] = tokenList.pop(0)
last = None
elif token == Token(','):
if kvChallenge:
if len(tokenList) > 1 and tokenList[1] != Token('='):
break
else:
break
else:
last | |
<filename>qcodes/instrument_drivers/Keysight/KtM960xDefs.py
# KtM960x Definitions
#
# These have been copy/pasted out of KtM960x.h provided by Keysite
#
IVI_ATTR_BASE = 1000000
IVI_INHERENT_ATTR_BASE = (
IVI_ATTR_BASE + 50000
) # base for inherent capability attributes
# base for IVI-defined class attributes
IVI_CLASS_ATTR_BASE = IVI_ATTR_BASE + 250000
# base for IviLxiSync attributes
IVI_LXISYNC_ATTR_BASE = IVI_ATTR_BASE + 950000
IVI_SPECIFIC_ATTR_BASE = (
IVI_ATTR_BASE + 150000
) # base for attributes of specific drivers
# #===== IVI Inherent Instrument Attributes ==============================
# - Driver Identification
KTM960X_ATTR_SPECIFIC_DRIVER_DESCRIPTION = (
IVI_INHERENT_ATTR_BASE + 514
) # ViString, read-only
KTM960X_ATTR_SPECIFIC_DRIVER_PREFIX = (
IVI_INHERENT_ATTR_BASE + 302
) # ViString, read-only
KTM960X_ATTR_SPECIFIC_DRIVER_VENDOR = (
IVI_INHERENT_ATTR_BASE + 513
) # ViString, read-only
KTM960X_ATTR_SPECIFIC_DRIVER_REVISION = (
IVI_INHERENT_ATTR_BASE + 551
) # ViString, read-only
KTM960X_ATTR_SPECIFIC_DRIVER_CLASS_SPEC_MAJOR_VERSION = (
IVI_INHERENT_ATTR_BASE + 515
) # ViInt32, read-only
KTM960X_ATTR_SPECIFIC_DRIVER_CLASS_SPEC_MINOR_VERSION = (
IVI_INHERENT_ATTR_BASE + 516
) # ViInt32, read-only
# - User Options
# ViBoolean, read-write
KTM960X_ATTR_RANGE_CHECK = IVI_INHERENT_ATTR_BASE + 2
KTM960X_ATTR_QUERY_INSTRUMENT_STATUS = (
IVI_INHERENT_ATTR_BASE + 3
) # ViBoolean, read-write
# ViBoolean, read-write
KTM960X_ATTR_CACHE = IVI_INHERENT_ATTR_BASE + 4
# ViBoolean, read-write
KTM960X_ATTR_SIMULATE = IVI_INHERENT_ATTR_BASE + 5
# ViBoolean, read-write
KTM960X_ATTR_RECORD_COERCIONS = IVI_INHERENT_ATTR_BASE + 6
# ViBoolean, read-write
KTM960X_ATTR_INTERCHANGE_CHECK = IVI_INHERENT_ATTR_BASE + 21
# - Advanced Session Information
# ViString, read-only
KTM960X_ATTR_LOGICAL_NAME = IVI_INHERENT_ATTR_BASE + 305
KTM960X_ATTR_IO_RESOURCE_DESCRIPTOR = (
IVI_INHERENT_ATTR_BASE + 304
) # ViString, read-only
# ViString, read-only
KTM960X_ATTR_DRIVER_SETUP = IVI_INHERENT_ATTR_BASE + 7
# - Driver Capabilities
# ViString, read-only
KTM960X_ATTR_GROUP_CAPABILITIES = IVI_INHERENT_ATTR_BASE + 401
KTM960X_ATTR_SUPPORTED_INSTRUMENT_MODELS = (
IVI_INHERENT_ATTR_BASE + 327
) # ViString, read-only
# - Instrument Identification
KTM960X_ATTR_INSTRUMENT_FIRMWARE_REVISION = (
IVI_INHERENT_ATTR_BASE + 510
) # ViString, read-only
KTM960X_ATTR_INSTRUMENT_MANUFACTURER = (
IVI_INHERENT_ATTR_BASE + 511
) # ViString, read-only
# ViString, read-only
KTM960X_ATTR_INSTRUMENT_MODEL = IVI_INHERENT_ATTR_BASE + 512
# ===== Instrument-Specific Attributes ===========
# - System
# ViString, read-only
KTM960X_ATTR_SERIAL_NUMBER = IVI_SPECIFIC_ATTR_BASE + 3
# ViString, read-only
KTM960X_ATTR_SYSTEM_ABOUT = IVI_SPECIFIC_ATTR_BASE + 4
KTM960X_ATTR_SYSTEM_GC_TIMING_OPTIMIZATION_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 5
) # ViBoolean, read-write
KTM960X_ATTR_SYSTEM_IDENTIFY_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 6
) # ViBoolean, read-write
# ViInt32, read-only
KTM960X_ATTR_SYSTEM_INSTANCE_ID = IVI_SPECIFIC_ATTR_BASE + 7
# ViString, read-only
KTM960X_ATTR_SYSTEM_OPTIONS = IVI_SPECIFIC_ATTR_BASE + 8
KTM960X_ATTR_SYSTEM_AUTO_TIMER_RESET_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 435
) # ViBoolean, read-write
# ViString, read-only
KTM960X_ATTR_SYSTEM_DATE = IVI_SPECIFIC_ATTR_BASE + 436
KTM960X_ATTR_SYSTEM_INTERLOCK_THRESHOLD_VOLTAGE = (
IVI_SPECIFIC_ATTR_BASE + 437
) # ViReal64, read-write
KTM960X_ATTR_SYSTEM_INTERLOCK_TRIPPED = (
IVI_SPECIFIC_ATTR_BASE + 438
) # ViBoolean, read-only
# ViInt32, read-write
KTM960X_ATTR_SYSTEM_LINE_FREQUENCY = IVI_SPECIFIC_ATTR_BASE + 439
# ViReal64, read-only
KTM960X_ATTR_SYSTEM_TIMER_COUNT = IVI_SPECIFIC_ATTR_BASE + 440
# ViInt32, read-only
KTM960X_ATTR_SYSTEM_CHANNEL_COUNT = IVI_SPECIFIC_ATTR_BASE + 514
# ViInt32, read-only
KTM960X_ATTR_SYSTEM_MODULE_COUNT = IVI_SPECIFIC_ATTR_BASE + 518
# - Licensing
KTM960X_ATTR_LICENSING_HOST_IDENTIFIER = (
IVI_SPECIFIC_ATTR_BASE + 11
) # ViString, read-only
KTM960X_ATTR_LICENSING_INSTALLED_LICENSES = (
IVI_SPECIFIC_ATTR_BASE + 12
) # ViString, read-only
# - SFP
KTM960X_ATTR_SYSTEM_SFP_CONTROLS_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 15
) # ViBoolean, read-write
# - AutoRefresh
KTM960X_ATTR_SYSTEM_SFP_AUTOREFRESH_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 13
) # ViBoolean, read-write
KTM960X_ATTR_SYSTEM_SFP_AUTOREFRESH_PERIOD = (
IVI_SPECIFIC_ATTR_BASE + 14
) # ViReal64, read-write
# - Group
KTM960X_ATTR_SYSTEM_GROUP_SYNC_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 515
) # ViBoolean, read-write
KTM960X_ATTR_SYSTEM_GROUP_SYNC_MASTER_MODULE = (
IVI_SPECIFIC_ATTR_BASE + 516
) # ViInt32, read-write
KTM960X_ATTR_SYSTEM_GROUP_SYNC_TRIGGER_LINE = (
IVI_SPECIFIC_ATTR_BASE + 517
) # ViInt32, read-write
# - Module
# ViInt32, read-only
KTM960X_ATTR_MODULE_COUNT = IVI_SPECIFIC_ATTR_BASE + 23
KTM960X_ATTR_MODULE_INSTRUMENT_CAPABILITY = (
IVI_SPECIFIC_ATTR_BASE + 24
) # ViString, read-only
KTM960X_ATTR_MODULE_MAXIMUM_RECORDED_TEMPERATURE = (
IVI_SPECIFIC_ATTR_BASE + 25
) # ViReal64, read-only
# ViString, read-only
KTM960X_ATTR_MODULE_OPTIONS = IVI_SPECIFIC_ATTR_BASE + 26
# ViString, read-only
KTM960X_ATTR_MODULE_SERIAL_NUMBER = IVI_SPECIFIC_ATTR_BASE + 27
# ViInt32, read-only
KTM960X_ATTR_MODULE_SLOT = IVI_SPECIFIC_ATTR_BASE + 28
# ViReal64, read-only
KTM960X_ATTR_MODULE_TEMPERATURE = IVI_SPECIFIC_ATTR_BASE + 29
# ViInt32, read-only
KTM960X_ATTR_MODULE_CHASSIS_NUMBER = IVI_SPECIFIC_ATTR_BASE + 490
KTM960X_ATTR_MODULE_IDENTIFY_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 491
) # ViBoolean, read-write
KTM960X_ATTR_MODULE_INSTALLED_LICENSES = (
IVI_SPECIFIC_ATTR_BASE + 492
) # ViString, read-only
KTM960X_ATTR_MODULE_MANUFACTURING_NUMBER = (
IVI_SPECIFIC_ATTR_BASE + 508
) # ViString, read-only
# ViString, read-only
KTM960X_ATTR_MODULE_MODEL_NUMBER = IVI_SPECIFIC_ATTR_BASE + 509
# ViInt64, read-only
KTM960X_ATTR_MODULE_UPTIME = IVI_SPECIFIC_ATTR_BASE + 511
# ViInt32, read-only
KTM960X_ATTR_MODULE_CHANNEL_COUNT = IVI_SPECIFIC_ATTR_BASE + 526
# ViBoolean, read-write
KTM960X_ATTR_MODULE_POWER_STATE = IVI_SPECIFIC_ATTR_BASE + 527
# ViString, read-only
KTM960X_ATTR_MODULE_VENDOR = IVI_SPECIFIC_ATTR_BASE + 528
KTM960X_ATTR_MODULE_INTERLOCK_TRIPPED = (
IVI_SPECIFIC_ATTR_BASE + 529
) # ViBoolean, read-only
# - Calibration
KTM960X_ATTR_MODULE_CALIBRATION_ADJUSTMENT_INFORMATION = (
IVI_SPECIFIC_ATTR_BASE + 30
) # ViString, read-only
KTM960X_ATTR_MODULE_CALIBRATION_DUE_DATE = (
IVI_SPECIFIC_ATTR_BASE + 31
) # ViString, read-only
KTM960X_ATTR_MODULE_CALIBRATION_STATUS = (
IVI_SPECIFIC_ATTR_BASE + 524
) # ViInt32, read-only
KTM960X_ATTR_MODULE_CALIBRATION_VERIFICATION_INFORMATION = (
IVI_SPECIFIC_ATTR_BASE + 525
) # ViString, read-only
# - Nonvolatile
KTM960X_ATTR_NONVOLATILE_ASSET_NUMBER = (
IVI_SPECIFIC_ATTR_BASE + 34
) # ViString, read-write
KTM960X_ATTR_NONVOLATILE_CAL_DUE_REMINDER = (
IVI_SPECIFIC_ATTR_BASE + 35
) # ViInt32, read-write
KTM960X_ATTR_NONVOLATILE_ENABLE_INSTRUMENT_CAL_WARNINGS = (
IVI_SPECIFIC_ATTR_BASE + 36
) # ViBoolean, read-write
KTM960X_ATTR_NONVOLATILE_ENABLE_MODULE_CAL_WARNINGS = (
IVI_SPECIFIC_ATTR_BASE + 37
) # ViBoolean, read-write
KTM960X_ATTR_NONVOLATILE_ENABLE_PERIODIC_CAL = (
IVI_SPECIFIC_ATTR_BASE + 38
) # ViBoolean, read-write
KTM960X_ATTR_NONVOLATILE_INSTRUMENT_CAL_INTERVAL = (
IVI_SPECIFIC_ATTR_BASE + 39
) # ViInt32, read-write
KTM960X_ATTR_NONVOLATILE_MODULE_CAL_INTERVAL = (
IVI_SPECIFIC_ATTR_BASE + 40
) # ViInt32, read-write
KTM960X_ATTR_NONVOLATILE_PASSPHRASE = (
IVI_SPECIFIC_ATTR_BASE + 41
) # ViString, read-write
KTM960X_ATTR_NONVOLATILE_SYSTEM_IDENTIFICATION = (
IVI_SPECIFIC_ATTR_BASE + 42
) # ViString, read-write
# - External
# ViInt32, read-only
KTM960X_ATTR_EXTERNAL_COUNT = IVI_SPECIFIC_ATTR_BASE + 493
KTM960X_ATTR_MODULE_IO_EXTERNAL_EDGE_POSITION = (
IVI_SPECIFIC_ATTR_BASE + 496
) # ViInt32, read-write
KTM960X_ATTR_MODULE_IO_EXTERNAL_EDGE_WIDTH = (
IVI_SPECIFIC_ATTR_BASE + 497
) # ViReal64, read-write
KTM960X_ATTR_MODULE_IO_EXTERNAL_FUNCTION = (
IVI_SPECIFIC_ATTR_BASE + 498
) # ViInt32, read-write
KTM960X_ATTR_MODULE_IO_EXTERNAL_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 499
) # ViInt32, read-write
KTM960X_ATTR_MODULE_IO_EXTERNAL_POLARITY = (
IVI_SPECIFIC_ATTR_BASE + 500
) # ViInt32, read-write
KTM960X_ATTR_MODULE_IO_EXTERNAL_TYPE = (
IVI_SPECIFIC_ATTR_BASE + 501
) # ViInt32, read-write
# - PXIe
# ViInt32, read-only
KTM960X_ATTR_PXIE_COUNT = IVI_SPECIFIC_ATTR_BASE + 495
KTM960X_ATTR_MODULE_IO_PXIE_EDGE_POSITION = (
IVI_SPECIFIC_ATTR_BASE + 503
) # ViInt32, read-write
# ViInt32, read-write
KTM960X_ATTR_MODULE_IO_PXIE_LEVEL = IVI_SPECIFIC_ATTR_BASE + 504
# ViInt32, read-write
KTM960X_ATTR_MODULE_IO_PXIE_TYPE = IVI_SPECIFIC_ATTR_BASE + 505
KTM960X_ATTR_MODULE_IO_PXIE_EDGE_WIDTH = (
IVI_SPECIFIC_ATTR_BASE + 506
) # ViReal64, read-write
KTM960X_ATTR_MODULE_IO_PXIE_FUNCTION = (
IVI_SPECIFIC_ATTR_BASE + 507
) # ViInt32, read-write
KTM960X_ATTR_MODULE_IO_PXIE_POLARITY = (
IVI_SPECIFIC_ATTR_BASE + 530
) # ViInt32, read-write
# - Calibration
KTM960X_ATTR_CALIBRATION_ADJUSTMENT_INFORMATION = (
IVI_SPECIFIC_ATTR_BASE + 480
) # ViString, read-only
# ViString, read-only
KTM960X_ATTR_CALIBRATION_DUE_DATE = IVI_SPECIFIC_ATTR_BASE + 481
KTM960X_ATTR_CALIBRATION_INSTRUMENT_IDENTIFIER = (
IVI_SPECIFIC_ATTR_BASE + 482
) # ViString, read-only
# ViString, read-only
KTM960X_ATTR_CALIBRATION_LAST_DATE = IVI_SPECIFIC_ATTR_BASE + 483
# ViInt32, read-only
KTM960X_ATTR_CALIBRATION_STATUS = IVI_SPECIFIC_ATTR_BASE + 484
KTM960X_ATTR_CALIBRATION_VERIFICATION_INFORMATION = (
IVI_SPECIFIC_ATTR_BASE + 485
) # ViString, read-only
# - Measurement
# ViInt32, read-only
KTM960X_ATTR_MEASUREMENT_COUNT = IVI_SPECIFIC_ATTR_BASE + 327
KTM960X_ATTR_MEASUREMENT_ACQUISITION_MODE = (
IVI_SPECIFIC_ATTR_BASE + 486
) # ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_TRIGGER_OUTPUT_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 489
) # ViBoolean, read-write
# - Arm
KTM960X_ATTR_MEASUREMENT_ARM_BYPASS = (
IVI_SPECIFIC_ATTR_BASE + 328
) # ViInt32, read-write
# ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_ARM_COUNT = IVI_SPECIFIC_ATTR_BASE + 329
KTM960X_ATTR_MEASUREMENT_ARM_DELAY = (
IVI_SPECIFIC_ATTR_BASE + 330
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_ARM_SOURCE = (
IVI_SPECIFIC_ATTR_BASE + 331
) # ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_ARM_TIMER = (
IVI_SPECIFIC_ATTR_BASE + 332
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_ARM_TRIGGER_OUTPUT_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 333
) # ViBoolean, read-write
# - Current
KTM960X_ATTR_MEASUREMENT_CURRENT_APERTURE = (
IVI_SPECIFIC_ATTR_BASE + 334
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_APERTURE_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 335
) # ViBoolean, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_IS_COMPLIANCE = (
IVI_SPECIFIC_ATTR_BASE + 340
) # ViBoolean, read-only
KTM960X_ATTR_MEASUREMENT_CURRENT_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 341
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_NEGATIVE_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 342
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_NPLC = (
IVI_SPECIFIC_ATTR_BASE + 343
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_NPLC_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 344
) # ViBoolean, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_POSITIVE_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 345
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_CURRENT_RANGE = (
IVI_SPECIFIC_ATTR_BASE + 346
) # ViReal64, read-write
# - Function
KTM960X_ATTR_MEASUREMENT_FUNCTION_DISABLE_COUNT = (
IVI_SPECIFIC_ATTR_BASE + 348
) # ViInt32, read-only
KTM960X_ATTR_MEASUREMENT_FUNCTION_ENABLE_COUNT = (
IVI_SPECIFIC_ATTR_BASE + 349
) # ViInt32, read-only
# - Trigger
KTM960X_ATTR_MEASUREMENT_TRIGGER_BYPASS = (
IVI_SPECIFIC_ATTR_BASE + 366
) # ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_TRIGGER_COUNT = (
IVI_SPECIFIC_ATTR_BASE + 367
) # ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_TRIGGER_DELAY = (
IVI_SPECIFIC_ATTR_BASE + 368
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_TRIGGER_SOURCE = (
IVI_SPECIFIC_ATTR_BASE + 369
) # ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_TRIGGER_TIMER = (
IVI_SPECIFIC_ATTR_BASE + 370
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_TRIGGER_TRIGGER_OUTPUT_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 371
) # ViBoolean, read-write
# - Voltage
KTM960X_ATTR_MEASUREMENT_VOLTAGE_APERTURE = (
IVI_SPECIFIC_ATTR_BASE + 372
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_APERTURE_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 373
) # ViBoolean, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_IS_COMPLIANCE = (
IVI_SPECIFIC_ATTR_BASE + 378
) # ViBoolean, read-only
KTM960X_ATTR_MEASUREMENT_VOLTAGE_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 379
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_NEGATIVE_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 380
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_NPLC = (
IVI_SPECIFIC_ATTR_BASE + 381
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_NPLC_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 382
) # ViBoolean, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_POSITIVE_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 383
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_VOLTAGE_RANGE = (
IVI_SPECIFIC_ATTR_BASE + 384
) # ViReal64, read-write
# - WaitTime
KTM960X_ATTR_MEASUREMENT_WAIT_TIME_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 386
) # ViBoolean, read-write
KTM960X_ATTR_MEASUREMENT_WAIT_TIME_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 387
) # ViBoolean, read-write
KTM960X_ATTR_MEASUREMENT_WAIT_TIME_GAIN = (
IVI_SPECIFIC_ATTR_BASE + 388
) # ViReal64, read-write
KTM960X_ATTR_MEASUREMENT_WAIT_TIME_OFFSET = (
IVI_SPECIFIC_ATTR_BASE + 389
) # ViReal64, read-write
# - Sampling
KTM960X_ATTR_MEASUREMENT_SAMPLING_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 487
) # ViInt32, read-write
KTM960X_ATTR_MEASUREMENT_SAMPLING_TOTAL_TIME = (
IVI_SPECIFIC_ATTR_BASE + 488
) # ViReal64, read-write
# - Output
# ViInt32, read-only
KTM960X_ATTR_OUTPUT_COUNT = IVI_SPECIFIC_ATTR_BASE + 390
KTM960X_ATTR_OUTPUT_AUTO_OFF_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 391
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_AUTO_ON_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 392
) # ViBoolean, read-write
# ViBoolean, read-write
KTM960X_ATTR_OUTPUT_ENABLED = IVI_SPECIFIC_ATTR_BASE + 403
# ViInt32, read-write
KTM960X_ATTR_OUTPUT_OFF_CONDITION = IVI_SPECIFIC_ATTR_BASE + 410
# ViInt32, read-write
KTM960X_ATTR_OUTPUT_PRIORITY_MODE = IVI_SPECIFIC_ATTR_BASE + 411
# ViInt32, read-write
KTM960X_ATTR_OUTPUT_SHAPE = IVI_SPECIFIC_ATTR_BASE + 419
# ViInt32, read-write
KTM960X_ATTR_OUTPUT_OPERATION_MODE = IVI_SPECIFIC_ATTR_BASE + 479
# - Current
KTM960X_ATTR_OUTPUT_CURRENT_AUTO_RANGE_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 393
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_CURRENT_BASE_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 394
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_CURRENT_BASE_TYPE = (
IVI_SPECIFIC_ATTR_BASE + 395
) # ViInt32, read-write
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_CURRENT_LEVEL = IVI_SPECIFIC_ATTR_BASE + 396
KTM960X_ATTR_OUTPUT_CURRENT_POST_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 397
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_CURRENT_POST_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 398
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_CURRENT_POST_TYPE = (
IVI_SPECIFIC_ATTR_BASE + 399
) # ViInt32, read-write
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_CURRENT_RANGE = IVI_SPECIFIC_ATTR_BASE + 400
KTM960X_ATTR_OUTPUT_CURRENT_RANGE_LOWER_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 401
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_CURRENT_TRIGGERED_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 402
) # ViReal64, read-write
# - Filter
KTM960X_ATTR_OUTPUT_FILTER_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 404
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_FILTER_CUT_OFF_FREQUENCY = (
IVI_SPECIFIC_ATTR_BASE + 405
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_FILTER_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 406
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_FILTER_TIME_CONSTANT = (
IVI_SPECIFIC_ATTR_BASE + 407
) # ViReal64, read-write
# - Pulse
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_PULSE_DELAY = IVI_SPECIFIC_ATTR_BASE + | |
discrete
selection. The data value _nearest_ the mouse cursor is added to the
selection. See the [nearest transform](nearest.html) documentation for more
information.
on : VgEventStream
A [Vega event stream](https://vega.github.io/vega/docs/event-streams/)
(object or selector) that triggers the selection. For interval selections,
the event stream must specify a [start and
end](https://vega.github.io/vega/docs/event-streams/#between-filters).
resolve : SelectionResolution
With layered and multi-view displays, a strategy that determines how
selections' data queries are resolved when applied in a filter transform,
conditional encoding rule, or scale domain.
toggle : anyOf(string, boolean)
Controls whether data values should be toggled or only ever inserted into
multi selections. Can be `true`, `false` (for insertion only), or a [Vega
expression](https://vega.github.io/vega/docs/expressions/). __Default
value:__ `true`, which corresponds to `event.shiftKey` (i.e., data values
are toggled when a user interacts with the shift-key pressed). See the
[toggle transform](toggle.html) documentation for more information.
type : string
"""
_schema = {'$ref': '#/definitions/MultiSelection'}
_rootschema = Root._schema
def __init__(self, type=Undefined, empty=Undefined, encodings=Undefined,
fields=Undefined, nearest=Undefined, on=Undefined,
resolve=Undefined, toggle=Undefined, **kwds):
super(MultiSelection, self).__init__(type=type, empty=empty,
encodings=encodings, fields=fields,
nearest=nearest, on=on,
resolve=resolve, toggle=toggle, **kwds)
class MultiSelectionConfig(SchemaBase):
"""MultiSelectionConfig schema wrapper
Attributes
----------
empty : string
By default, all data values are considered to lie within an empty selection.
When set to `none`, empty selections contain no data values.
encodings : list
An array of encoding channels. The corresponding data field values must
match for a data tuple to fall within the selection.
fields : list
An array of field names whose values must match for a data tuple to fall
within the selection.
nearest : boolean
When true, an invisible voronoi diagram is computed to accelerate discrete
selection. The data value _nearest_ the mouse cursor is added to the
selection. See the [nearest transform](nearest.html) documentation for more
information.
on : VgEventStream
A [Vega event stream](https://vega.github.io/vega/docs/event-streams/)
(object or selector) that triggers the selection. For interval selections,
the event stream must specify a [start and
end](https://vega.github.io/vega/docs/event-streams/#between-filters).
resolve : SelectionResolution
With layered and multi-view displays, a strategy that determines how
selections' data queries are resolved when applied in a filter transform,
conditional encoding rule, or scale domain.
toggle : anyOf(string, boolean)
Controls whether data values should be toggled or only ever inserted into
multi selections. Can be `true`, `false` (for insertion only), or a [Vega
expression](https://vega.github.io/vega/docs/expressions/). __Default
value:__ `true`, which corresponds to `event.shiftKey` (i.e., data values
are toggled when a user interacts with the shift-key pressed). See the
[toggle transform](toggle.html) documentation for more information.
"""
_schema = {'$ref': '#/definitions/MultiSelectionConfig'}
_rootschema = Root._schema
def __init__(self, empty=Undefined, encodings=Undefined, fields=Undefined,
nearest=Undefined, on=Undefined, resolve=Undefined,
toggle=Undefined, **kwds):
super(MultiSelectionConfig, self).__init__(empty=empty, encodings=encodings,
fields=fields, nearest=nearest,
on=on, resolve=resolve,
toggle=toggle, **kwds)
class MultiTimeUnit(SchemaBase):
"""MultiTimeUnit schema wrapper"""
_schema = {'$ref': '#/definitions/MultiTimeUnit'}
_rootschema = Root._schema
def __init__(self, *args, **kwds):
super(MultiTimeUnit, self).__init__(*args, **kwds)
class NamedData(SchemaBase):
"""NamedData schema wrapper
Attributes
----------
format : DataFormat
An object that specifies the format for parsing the data.
name : string
Provide a placeholder name and bind data at runtime.
"""
_schema = {'$ref': '#/definitions/NamedData'}
_rootschema = Root._schema
def __init__(self, name=Undefined, format=Undefined, **kwds):
super(NamedData, self).__init__(name=name, format=format, **kwds)
class NiceTime(SchemaBase):
"""NiceTime schema wrapper"""
_schema = {'$ref': '#/definitions/NiceTime'}
_rootschema = Root._schema
def __init__(self, *args):
super(NiceTime, self).__init__(*args)
class OrderFieldDef(SchemaBase):
"""OrderFieldDef schema wrapper
Attributes
----------
aggregate : Aggregate
Aggregation function for the field (e.g., `mean`, `sum`, `median`, `min`,
`max`, `count`). __Default value:__ `undefined` (None)
bin : anyOf(boolean, BinParams)
A flag for binning a `quantitative` field, or [an object defining binning
parameters](https://vega.github.io/vega-lite/docs/bin.html#params). If
`true`, default [binning
parameters](https://vega.github.io/vega-lite/docs/bin.html) will be applied.
__Default value:__ `false`
field : anyOf(string, RepeatRef)
__Required.__ A string defining the name of the field from which to pull a
data value or an object defining iterated values from the
[`repeat`](https://vega.github.io/vega-lite/docs/repeat.html) operator.
__Note:__ Dots (`.`) and brackets (`[` and `]`) can be used to access nested
objects (e.g., `"field": "foo.bar"` and `"field": "foo['bar']"`). If field
names contain dots or brackets but are not nested, you can use `\\` to
escape dots and brackets (e.g., `"a\\.b"` and `"a\\[0\\]"`). See more
details about escaping in the [field
documentation](https://vega.github.io/vega-lite/docs/field.html). __Note:__
`field` is not required if `aggregate` is `count`.
sort : SortOrder
The sort order. One of `"ascending"` (default) or `"descending"`.
timeUnit : TimeUnit
Time unit (e.g., `year`, `yearmonth`, `month`, `hours`) for a temporal
field. or [a temporal field that gets casted as
ordinal](https://vega.github.io/vega-lite/docs/type.html#cast). __Default
value:__ `undefined` (None)
type : Type
The encoded field's type of measurement (`"quantitative"`, `"temporal"`,
`"ordinal"`, or `"nominal"`). It can also be a geo type (`"latitude"`,
`"longitude"`, and `"geojson"`) when a [geographic
projection](https://vega.github.io/vega-lite/docs/projection.html) is
applied.
"""
_schema = {'$ref': '#/definitions/OrderFieldDef'}
_rootschema = Root._schema
def __init__(self, type=Undefined, aggregate=Undefined, bin=Undefined,
field=Undefined, sort=Undefined, timeUnit=Undefined, **kwds):
super(OrderFieldDef, self).__init__(type=type, aggregate=aggregate, bin=bin,
field=field, sort=sort,
timeUnit=timeUnit, **kwds)
class Orient(SchemaBase):
"""Orient schema wrapper"""
_schema = {'$ref': '#/definitions/Orient'}
_rootschema = Root._schema
def __init__(self, *args):
super(Orient, self).__init__(*args)
class Padding(SchemaBase):
"""Padding schema wrapper"""
_schema = {'$ref': '#/definitions/Padding'}
_rootschema = Root._schema
def __init__(self, *args, **kwds):
super(Padding, self).__init__(*args, **kwds)
class PositionFieldDef(SchemaBase):
"""PositionFieldDef schema wrapper
Attributes
----------
aggregate : Aggregate
Aggregation function for the field (e.g., `mean`, `sum`, `median`, `min`,
`max`, `count`). __Default value:__ `undefined` (None)
axis : anyOf(Axis, None)
An object defining properties of axis's gridlines, ticks and labels. If
`null`, the axis for the encoding channel will be removed. __Default
value:__ If undefined, default [axis
properties](https://vega.github.io/vega-lite/docs/axis.html) are applied.
bin : anyOf(boolean, BinParams)
A flag for binning a `quantitative` field, or [an object defining binning
parameters](https://vega.github.io/vega-lite/docs/bin.html#params). If
`true`, default [binning
parameters](https://vega.github.io/vega-lite/docs/bin.html) will be applied.
__Default value:__ `false`
field : anyOf(string, RepeatRef)
__Required.__ A string defining the name of the field from which to pull a
data value or an object defining iterated values from the
[`repeat`](https://vega.github.io/vega-lite/docs/repeat.html) operator.
__Note:__ Dots (`.`) and brackets (`[` and `]`) can be used to access nested
objects (e.g., `"field": "foo.bar"` and `"field": "foo['bar']"`). If field
names contain dots or brackets but are not nested, you can use `\\` to
escape dots and brackets (e.g., `"a\\.b"` and `"a\\[0\\]"`). See more
details about escaping in the [field
documentation](https://vega.github.io/vega-lite/docs/field.html). __Note:__
`field` is not required if `aggregate` is `count`.
scale : Scale
An object defining properties of the channel's scale, which is the function
that transforms values in the data domain (numbers, dates, strings, etc) to
visual values (pixels, colors, sizes) of the encoding channels. __Default
value:__ If undefined, default [scale
properties](https://vega.github.io/vega-lite/docs/scale.html) are applied.
sort : anyOf(SortOrder, SortField, None)
Sort order for the encoded field. Supported `sort` values include
`"ascending"`, `"descending"` and `null` (no sorting). For fields with
discrete domains, `sort` can also be a [sort field definition
object](https://vega.github.io/vega-lite/docs/sort.html#sort-field).
__Default value:__ `"ascending"`
stack : anyOf(StackOffset, None)
Type of stacking offset if the field should be stacked. `stack` is only
applicable for `x` and `y` channels with continuous domains. For example,
`stack` of `y` can be used to customize stacking for a vertical bar chart.
`stack` can be one of the following values: - `"zero"`: stacking with
baseline offset at zero value of the scale (for creating typical stacked
[bar](https://vega.github.io/vega-lite/docs/stack.html#bar) and
[area](https://vega.github.io/vega-lite/docs/stack.html#area) chart). -
`"normalize"` - stacking with normalized domain (for creating [normalized
stacked bar and area
charts](https://vega.github.io/vega-lite/docs/stack.html#normalized). <br/>
-`"center"` - stacking with center baseline (for
[streamgraph](https://vega.github.io/vega-lite/docs/stack.html#streamgraph)).
- `null` - No-stacking. This will produce layered
[bar](https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart)
and area chart. __Default value:__ `zero` for plots with all of the
following conditions are true: (1) the mark is `bar` or `area`; (2) the
stacked measure channel (x or y) has a linear scale; (3) At least one of
non-position channels mapped to an unaggregated field that is different from
x and y. Otherwise, `null` by default.
timeUnit : TimeUnit
Time unit (e.g., `year`, `yearmonth`, `month`, `hours`) for a temporal
field. or [a temporal field that gets casted as
ordinal](https://vega.github.io/vega-lite/docs/type.html#cast). __Default
value:__ `undefined` (None)
type : Type
The encoded field's type of measurement (`"quantitative"`, `"temporal"`,
`"ordinal"`, or `"nominal"`). It can also be a geo type (`"latitude"`,
`"longitude"`, and `"geojson"`) when a [geographic
projection](https://vega.github.io/vega-lite/docs/projection.html) is
applied.
"""
_schema = {'$ref': '#/definitions/PositionFieldDef'}
_rootschema = Root._schema
def __init__(self, | |
4.83759520e-16, 9.49612632e-06,
# -1.06805612e-06, -1.53743221e-09, 3.63509506e-10], [-1.49006382e-07, -9.89413898e-13, -7.43139226e-15, 1.68328909e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 8.04661898e-06,
# -1.09156392e-06, -2.36771942e-09, 4.08420695e-10], [-1.47357294e-07, -1.08127351e-12, -5.75445470e-15, 2.50483222e-11,
# -3.83278831e-11, -1.45811529e-16, 4.62357926e-16, 9.49612632e-06,
# -1.06805612e-06, -1.89852663e-09, 3.63509506e-10], [-1.16881742e-07, -1.31596089e-12, -5.83314472e-15, 2.63563289e-11,
# -3.44796856e-11, -1.92134963e-16, 5.72951835e-16, 9.40197124e-06,
# -1.06805612e-06, -2.40163435e-09, 3.63509506e-10], [-1.49006382e-07, -9.89413898e-13, -9.15032280e-15, 1.19069693e-11,
# -4.43909835e-11, -1.49044805e-16, 6.54615891e-16, 8.04661898e-06,
# -8.01398133e-07, -2.36771942e-09, 3.92456238e-10], [-1.09118739e-07, -1.12770456e-12, -5.83314472e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.42517639e-16, 8.04661898e-06,
# -1.17577960e-06, -2.36771942e-09, 3.92456238e-10], [-1.39789441e-07, -1.08127351e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.42517639e-16, 8.04661898e-06,
# -1.09156392e-06, -2.26598464e-09, 3.92456238e-10], [-1.49006382e-07, -9.89413898e-13, -9.34884554e-15, 1.68328909e-11,
# -3.11488300e-11, -1.36751628e-16, 4.40199218e-16, 7.49536550e-06,
# -1.31517264e-06, -2.11772013e-09, 4.05643587e-10], [-2.61723578e-07, -1.13279121e-12, -5.59125710e-15, 1.92358198e-11,
# -4.30108100e-11, -1.63770904e-16, 4.62357926e-16, 9.49612632e-06,
# -1.06805612e-06, -1.89852663e-09, 4.09547711e-10]],
# [[-1.49006382e-07, -8.26962070e-13, -7.43139226e-15, 2.14331375e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10], [-1.47135609e-07, -1.08127351e-12, -4.47748997e-15, 2.50483222e-11,
# -3.83278831e-11, -1.45811529e-16, 4.62357926e-16, 8.07665288e-06,
# -1.09156392e-06, -2.36771942e-09, 4.08420695e-10], [-1.39789441e-07, -1.08127351e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.61767341e-16, 8.04661898e-06,
# -1.09156392e-06, -2.26598464e-09, 3.92456238e-10], [-1.39789441e-07, -1.08127351e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.42517639e-16, 9.32807092e-06,
# -1.09156392e-06, -2.26598464e-09, 4.56988338e-10], [-1.47357294e-07, -1.08127351e-12, -4.07713500e-15, 2.15392246e-11,
# -3.29951967e-11, -1.45811529e-16, 4.62357926e-16, 1.07031415e-05,
# -1.06805612e-06, -1.11821551e-09, 3.63509506e-10], [-2.58144072e-07, -1.13279121e-12, -5.59125710e-15, 1.68328909e-11,
# -4.54999965e-11, -1.96620173e-16, 4.83759520e-16, 9.49612632e-06,
# -8.57416602e-07, -1.89852663e-09, 3.63509506e-10], [-1.34883553e-07, -1.08127351e-12, -6.81923310e-15, 2.49042272e-11,
# -3.83278831e-11, -1.45811529e-16, 4.62357926e-16, 9.49612632e-06,
# -1.10587560e-06, -2.36771942e-09, 4.32627456e-10], [-1.49006382e-07, -9.89413898e-13, -7.43139226e-15, 1.68328909e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 8.65795907e-06,
# -1.06805612e-06, -1.89852663e-09, 3.63509506e-10], [-2.58144072e-07, -1.36642146e-12, -5.59125710e-15, 2.63563289e-11,
# -2.70009229e-11, -1.92134963e-16, 7.41652993e-16, 9.40197124e-06,
# -1.34828843e-06, -2.14328278e-09, 3.63509506e-10], [-1.16183492e-07, -1.66486715e-12, -5.83314472e-15, 1.68328909e-11,
# -5.72820328e-11, -2.18380106e-16, 4.83759520e-16, 7.13203944e-06,
# -8.01957400e-07, -1.84576033e-09, 3.63509506e-10]],
# [[-1.49006382e-07, -9.66353328e-13, -7.43139226e-15, 2.14331375e-11,
# -3.44796856e-11, -1.45551256e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.25826085e-09, 3.63509506e-10], [-1.49006382e-07, -8.26962070e-13, -7.43139226e-15, 2.14331375e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -8.07844666e-07, -2.28289589e-09, 3.13456546e-10], [-1.46198347e-07, -1.28188039e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.61767341e-16, 8.04661898e-06,
# -1.09156392e-06, -2.26598464e-09, 3.89638111e-10], [-1.39789441e-07, -8.29533557e-13, -4.55124360e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.61767341e-16, 8.04661898e-06,
# -1.09156392e-06, -2.26598464e-09, 3.48371204e-10], [-1.92533779e-07, -1.01028199e-12, -7.43139226e-15, 2.14331375e-11,
# -3.44796856e-11, -1.56880637e-16, 4.66189033e-16, 1.10793251e-05,
# -1.06805612e-06, -2.57902092e-09, 3.63509506e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10], [-1.13573509e-07, -1.08127351e-12, -5.43396839e-15, 2.98241154e-11,
# -4.28349560e-11, -1.40069900e-16, 5.61767341e-16, 8.04661898e-06,
# -1.09156392e-06, -2.81380557e-09, 3.92456238e-10], [-1.39789441e-07, -1.08127351e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.52842289e-16, 5.61767341e-16, 6.21585616e-06,
# -1.16412928e-06, -2.26598464e-09, 3.92456238e-10], [-1.39789441e-07, -1.35245649e-12, -4.93320450e-15, 2.17190998e-11,
# -3.44796856e-11, -1.49044805e-16, 5.61767341e-16, 1.00564537e-05,
# -9.45328710e-07, -2.36513704e-09, 3.92456238e-10], [-1.02273335e-07, -1.08127351e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.30463444e-16, 4.72024221e-16, 8.04661898e-06,
# -1.09156392e-06, -1.79382555e-09, 3.92456238e-10]],
# [[-1.66274214e-07, -1.28188039e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.12837240e-16, 8.04661898e-06,
# -1.06805612e-06, -2.25826085e-09, 3.63509506e-10], [-1.49006382e-07, -9.66353328e-13, -7.43139226e-15, 2.14331375e-11,
# -3.44796856e-11, -1.52852697e-16, 5.42517639e-16, 1.10793251e-05,
# -8.64749718e-07, -2.26598464e-09, 5.03697164e-10], [-1.34176109e-07, -1.28188039e-12, -5.43396839e-15, 2.63563289e-11,
# -3.44796856e-11, -1.30463444e-16, 3.91572074e-16, 8.04661898e-06,
# -1.09156392e-06, -1.79382555e-09, 4.77279273e-10], [-1.02273335e-07, -1.08127351e-12, -6.95729739e-15, 2.63563289e-11,
# -3.44796856e-11, -1.80869896e-16, 6.06517282e-16, 1.03495077e-05,
# -1.09156392e-06, -2.26598464e-09, 3.89638111e-10], [-1.92533779e-07, -9.03160392e-13, -7.43139226e-15, 2.14331375e-11,
# -3.34630495e-11, -1.79614146e-16, 5.42517639e-16, 1.41732364e-05,
# -1.06805612e-06, -2.28289589e-09, 2.75403197e-10], [-1.49006382e-07, -8.59397331e-13, -7.43139226e-15, 1.26775300e-11,
# -2.66999483e-11, -1.56880637e-16, 4.66189033e-16, 1.10793251e-05,
# -1.06805612e-06, -2.31229589e-09, 3.63509506e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.34510045e-05,
# -1.06805612e-06, -2.28289589e-09, 4.09576453e-10], [-1.46198347e-07, -1.28188039e-12, -5.80126837e-15, 2.63563289e-11,
# -2.41603255e-11, -1.49044805e-16, 4.77702594e-16, 8.04661898e-06,
# -1.06805612e-06, -2.46202624e-09, 3.63509506e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.09156392e-06, -2.26598464e-09, 3.87894467e-10]],
# [[-1.46198347e-07, -1.06032483e-12, -5.80126837e-15, 2.63563289e-11,
# -3.44796856e-11, -1.49044805e-16, 5.12837240e-16, 8.04661898e-06,
# -8.81953313e-07, -1.72316250e-09, 4.27994948e-10], [-1.66274214e-07, -1.28188039e-12, -5.43396839e-15, 2.63563289e-11,
# -2.41603255e-11, -1.87511417e-16, 6.00289888e-16, 6.37749726e-06,
# -1.06805612e-06, -2.46202624e-09, 4.26419515e-10], [-1.48007753e-07, -6.44013960e-13, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.96713703e-16, 5.42517639e-16, 1.18426600e-05,
# -1.28529704e-06, -2.09834872e-09, 3.63509506e-10], [-1.49006382e-07, -7.04429041e-13, -7.43139226e-15, 1.47529730e-11,
# -2.66999483e-11, -1.56880637e-16, 3.57819429e-16, 1.10793251e-05,
# -1.06805612e-06, -2.28289589e-09, 2.84182343e-10], [-1.66274214e-07, -1.19239241e-12, -5.43396839e-15, 2.76811963e-11,
# -4.29554847e-11, -1.49044805e-16, 5.12837240e-16, 6.46385925e-06,
# -1.06805612e-06, -2.25826085e-09, 3.63509506e-10], [-1.49006382e-07, -8.59397331e-13, -7.43139226e-15, 1.26775300e-11,
# -2.66999483e-11, -1.82264286e-16, 4.66189033e-16, 1.10793251e-05,
# -9.51268990e-07, -2.78741590e-09, 3.24405619e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10], [-1.46198347e-07, -1.25365553e-12, -5.80126837e-15, 2.90261419e-11,
# -2.41603255e-11, -1.49044805e-16, 4.77702594e-16, 8.04661898e-06,
# -1.06805612e-06, -2.66459562e-09, 3.63509506e-10], [-1.49006382e-07, -1.08127351e-12, -6.95729739e-15, 3.26749351e-11,
# -3.44796856e-11, -1.80869896e-16, 6.06517282e-16, 1.03495077e-05,
# -1.13400541e-06, -2.26598464e-09, 3.89638111e-10], [-1.02273335e-07, -8.86986059e-13, -6.53465438e-15, 1.68886230e-11,
# -2.99468141e-11, -1.09830333e-16, 5.42517639e-16, 1.10793251e-05,
# -1.09156392e-06, -2.26598464e-09, 3.87894467e-10]],
# [[-1.37147321e-07, -9.99818205e-13, -8.89043628e-15, 1.26775300e-11,
# -2.66999483e-11, -1.82264286e-16, 4.66189033e-16, 9.50484303e-06,
# -9.51268990e-07, -2.78741590e-09, 4.42709268e-10], [-1.33050373e-07, -8.86986059e-13, -7.43139226e-15, 2.10591317e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.19140883e-05,
# -1.06805612e-06, -1.83197372e-09, 2.33442100e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -2.72955669e-11, -1.87511417e-16, 6.42987467e-16, 6.37749726e-06,
# -1.06805612e-06, -2.64976869e-09, 3.72973906e-10], [-1.66274214e-07, -1.61358779e-12, -5.97378168e-15, 2.63563289e-11,
# -2.72901704e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.57892284e-09, 3.63509506e-10], [-1.49500103e-07, -8.86986059e-13, -7.43139226e-15, 1.26775300e-11,
# -2.66999483e-11, -1.82264286e-16, 4.66189033e-16, 1.10793251e-05,
# -8.66031899e-07, -2.78741590e-09, 4.04815592e-10], [-1.37446215e-07, -8.59397331e-13, -7.43139226e-15, 1.27077041e-11,
# -3.44796856e-11, -1.56880637e-16, 3.95627611e-16, 1.13325576e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10], [-1.49006382e-07, -1.08127351e-12, -6.95729739e-15, 3.26749351e-11,
# -4.36637213e-11, -1.80869896e-16, 6.79484669e-16, 1.03495077e-05,
# -8.35848331e-07, -2.26598464e-09, 3.89638111e-10], [-1.49006382e-07, -1.08127351e-12, -5.95172356e-15, 3.26749351e-11,
# -3.44796856e-11, -1.47902701e-16, 6.06517282e-16, 7.36583774e-06,
# -1.13400541e-06, -2.26598464e-09, 3.89638111e-10], [-1.49006382e-07, -1.08127351e-12, -7.43139226e-15, 1.68886230e-11,
# -3.44796856e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.28289589e-09, 2.60681519e-10], [-1.49006382e-07, -8.86986059e-13, -6.95729739e-15, 3.26749351e-11,
# -2.75176488e-11, -1.80869896e-16, 4.25567999e-16, 1.30521566e-05,
# -1.13400541e-06, -1.85991496e-09, 3.49230969e-10]],
# [[-1.33050373e-07, -8.86986059e-13, -7.43139226e-15, 1.99651367e-11,
# -3.44796856e-11, -1.86288920e-16, 5.42517639e-16, 9.59954362e-06,
# -1.13400541e-06, -1.85991496e-09, 3.60512708e-10], [-1.93144729e-07, -9.51508233e-13, -5.30986881e-15, 3.26749351e-11,
# -2.75176488e-11, -1.80869896e-16, 4.25567999e-16, 1.19140883e-05,
# -1.06805612e-06, -2.36166429e-09, 2.33463214e-10], [-1.49006382e-07, -8.86986059e-13, -6.95729739e-15, 3.26749351e-11,
# -2.75176488e-11, -1.80869896e-16, 5.31712370e-16, 1.10793251e-05,
# -1.15247223e-06, -2.57892284e-09, 3.63509506e-10], [-1.66274214e-07, -1.75185859e-12, -5.97378168e-15, 2.63563289e-11,
# -2.72901704e-11, -1.56880637e-16, 3.58083689e-16, 1.15071247e-05,
# -1.43564176e-06, -1.85991496e-09, 3.49230969e-10], [-1.49006382e-07, -8.86986059e-13, -7.43139226e-15, 1.27077041e-11,
# -2.90723705e-11, -1.56880637e-16, 3.21858351e-16, 8.60384748e-06,
# -1.32124328e-06, -2.28289589e-09, 3.63509506e-10], [-1.10835144e-07, -8.59397331e-13, -6.95729739e-15, 4.04883598e-11,
# -2.75176488e-11, -1.80869896e-16, 3.25558628e-16, 1.30521566e-05,
# -1.13400541e-06, -1.85991496e-09, 3.49230969e-10], [-1.66274214e-07, -1.61358779e-12, -5.97378168e-15, 3.04684831e-11,
# -2.45894770e-11, -1.56880637e-16, 6.12838983e-16, 6.37749726e-06,
# -7.96968354e-07, -2.64976869e-09, 3.72973906e-10], [-1.28338017e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -2.72955669e-11, -1.87511417e-16, 5.42517639e-16, 1.10793251e-05,
# -1.06805612e-06, -2.52015001e-09, 3.36519403e-10], [-1.25368822e-07, -8.59397331e-13, -7.43139226e-15, 1.27077041e-11,
# -2.72901704e-11, -1.56880637e-16, 5.42517639e-16, 1.10793251e-05,
# -1.12018817e-06, -2.57892284e-09, 3.63509506e-10], [-2.08743008e-07, -1.61358779e-12, -5.97378168e-15, 2.63563289e-11,
# -3.44796856e-11, -1.56880637e-16, 2.81491560e-16, 1.03400665e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10]],
# [[-1.75775254e-07, -1.61358779e-12, -5.97378168e-15, 2.63563289e-11,
# -3.44796856e-11, -1.56880637e-16, 2.81491560e-16, 1.53290291e-05,
# -1.13400541e-06, -1.75911728e-09, 3.49230969e-10], [-1.10835144e-07, -8.59397331e-13, -6.09832152e-15, 4.04883598e-11,
# -2.75176488e-11, -1.80869896e-16, 3.25558628e-16, 1.03400665e-05,
# -1.06805612e-06, -2.28289589e-09, 3.30633612e-10], [-1.28338017e-07, -8.86986059e-13, -7.43139226e-15, 1.68886230e-11,
# -2.72955669e-11, -1.87511417e-16, 5.58757958e-16, 1.09654775e-05,
# -1.06805612e-06, -2.76089022e-09, 3.60512708e-10], [-1.33050373e-07, -8.86986059e-13, -7.43139226e-15, 1.99651367e-11,
# -3.44796856e-11, -1.86288920e-16, 4.26528135e-16, 9.59954362e-06,
# -1.13400541e-06, -1.54306690e-09, 3.36519403e-10], [-2.18989545e-07, -1.61358779e-12, -4.29117518e-15, 2.63563289e-11,
# -2.96763537e-11, -1.56880637e-16, 2.50794031e-16, 1.03400665e-05,
# -1.04565507e-06, -2.93735989e-09, 3.63509506e-10], [-2.08743008e-07, -1.61358779e-12, -5.97378168e-15, 2.63563289e-11,
# -3.44796856e-11, -1.56880637e-16, 2.81491560e-16, 1.03400665e-05,
# -1.06805612e-06, -2.28289589e-09, 2.57527616e-10], [-1.33050373e-07, -8.86986059e-13, -5.97378168e-15, 2.99764550e-11,
# -3.44796856e-11, -1.90670908e-16, 2.81491560e-16, 1.03400665e-05,
# -1.06805612e-06, -2.28289589e-09, 3.63509506e-10], [-2.08743008e-07, -1.61358779e-12, -7.43139226e-15, 2.08944138e-11,
# -2.43755159e-11, -1.86288920e-16, 5.42517639e-16, 9.59954362e-06,
# -1.41304892e-06, -1.85991496e-09, 3.60512708e-10], [-1.10835144e-07, -8.59397331e-13, -6.95729739e-15, 4.04883598e-11,
# -2.75176488e-11, -2.01971648e-16, 3.25558628e-16, 1.30521566e-05,
# -1.13400541e-06, -1.85991496e-09, 3.63509506e-10], [-1.41984362e-07, -8.59397331e-13, -8.34065593e-15, 1.23833503e-11,
# -2.72901704e-11, -1.56880637e-16, 4.93098816e-16, 1.10793251e-05,
# -1.38948803e-06, -2.57892284e-09, 3.49230969e-10]],
# [[-1.29857574e-07, -1.00929977e-12, -6.09832152e-15, 4.15244360e-11,
# -2.64060618e-11, -1.56880637e-16, 2.50794031e-16, 1.29540327e-05,
# -1.04909189e-06, -2.93735989e-09, 3.88599974e-10], [-2.18989545e-07, -1.61358779e-12, -4.29117518e-15, 2.97365289e-11,
# -2.75176488e-11, -1.80869896e-16, 3.25558628e-16, 1.03400665e-05,
# -1.34567505e-06, -2.28289589e-09, 3.30633612e-10], [-1.24152356e-07, -8.86986059e-13, -7.43139226e-15, 1.32118757e-11,
# -2.72955669e-11, -1.87511417e-16, 5.58757958e-16, 1.09654775e-05,
# -1.06805612e-06, -3.11143725e-09, 3.60512708e-10], [-1.28338017e-07, -8.86986059e-13, -5.99723618e-15, 1.49272613e-11,
# -2.72955669e-11, -1.87511417e-16, 5.58757958e-16, 1.09654775e-05,
# -1.26127590e-06, -2.76089022e-09, 3.60512708e-10], [-1.10835144e-07, -6.31356601e-13, -6.09832152e-15, 3.90847907e-11,
# -2.75176488e-11, -1.80869896e-16, 2.67095543e-16, 1.03400665e-05,
# -1.32872910e-06, -2.28289589e-09, 3.04363574e-10], [-1.33050373e-07, -6.89690996e-13, -5.97378168e-15, 3.60623442e-11,
# -3.44796856e-11, -2.33149877e-16, 2.81491560e-16, 8.15538860e-06,
# -1.06805612e-06, -2.28289589e-09, 2.57050658e-10], [-2.18989545e-07, -1.98206660e-12, -4.29117518e-15, 2.63563289e-11,
# -3.37705024e-11, -1.97633487e-16, 2.81491560e-16, 1.03400665e-05,
# -1.06805612e-06, -2.85312123e-09, 3.63509506e-10], [-1.33050373e-07, -8.43443781e-13, -4.41427883e-15, 2.99764550e-11,
# -4.19583511e-11, -2.46133135e-16, 2.50794031e-16, 1.03400665e-05,
# -1.23172012e-06, -2.93735989e-09, 3.70241391e-10], [-2.18989545e-07, -1.33489049e-12, -4.29117518e-15, 3.15567278e-11,
# -3.27388330e-11, -1.56880637e-16, 2.50794031e-16, 1.03400665e-05,
# -1.04565507e-06, -2.93735989e-09, 3.63509506e-10], [-2.18989545e-07, -1.66272656e-12, -4.29117518e-15, 2.63563289e-11,
# -3.61915301e-11, -1.56880637e-16, 2.50794031e-16, 1.03400665e-05,
# -8.86156280e-07, -2.93735989e-09, 3.63509506e-10]],
# [[-1.24152356e-07, -6.94922852e-13, -7.67773083e-15, 1.36780379e-11,
# -2.72955669e-11, -1.43284938e-16, 6.70711761e-16, 1.09654775e-05,
# -1.26365153e-06, -3.11143725e-09, 3.81150639e-10], [-2.09974744e-07, -1.60662950e-12, -3.92631039e-15, 2.63563289e-11,
# -3.37705024e-11, -1.97633487e-16, 2.81491560e-16, 1.03400665e-05,
# -1.06805612e-06, -2.85312123e-09, 3.60512708e-10], [-1.46360018e-07, -8.86986059e-13, -7.43139226e-15, 1.32118757e-11,
# -2.72955669e-11, -1.87511417e-16, 5.58757958e-16, 1.09654775e-05,
# -1.06805612e-06, -3.11143725e-09, 3.60512708e-10], [-1.24152356e-07, -7.96180691e-13, -8.36167635e-15, 1.32118757e-11,
# -3.21807368e-11, -2.22950035e-16, 5.58757958e-16, 8.22513926e-06,
# -1.06805612e-06, -3.35047125e-09, 3.60512708e-10], [-1.24152356e-07, -8.86986059e-13, -4.29117518e-15, 2.63563289e-11,
# -3.37705024e-11, -1.38660518e-16, 2.81491560e-16, 9.60374403e-06,
# -1.18797405e-06, -2.85312123e-09, 3.63509506e-10], [-2.78410004e-07, -1.98206660e-12, -7.43139226e-15, 1.32118757e-11,
# -2.72955669e-11, -2.06529601e-16, 5.58757958e-16, 1.09654775e-05,
# -1.20556956e-06, -3.11143725e-09, 3.60512708e-10], [-2.18989545e-07, -1.33489049e-12, -3.34054061e-15, 3.15567278e-11,
# -3.27388330e-11, -1.56880637e-16, 2.50794031e-16, 1.03400665e-05,
# -1.04565507e-06, -2.85312123e-09, 3.20990371e-10], [-2.18989545e-07, -1.98206660e-12, -4.29117518e-15, 2.08154936e-11,
# -3.79851079e-11, -1.97633487e-16, 2.81491560e-16, 1.03400665e-05,
# -1.25022892e-06, -2.93735989e-09, 3.63509506e-10], [-2.18989545e-07, -1.33489049e-12, -4.44813035e-15, 3.15567278e-11,
# -3.27388330e-11, -1.56880637e-16, 2.90712363e-16, 1.03400665e-05,
# -1.04565507e-06, -2.71432032e-09, 3.63509506e-10], [-2.18989545e-07, -1.33489049e-12, -4.29117518e-15, 3.15567278e-11,
# -3.27388330e-11, -1.95704352e-16, 2.50794031e-16, 1.03400665e-05,
# -1.04565507e-06, -2.93735989e-09, 3.86679058e-10]],
# [[-2.09974744e-07, -1.60662950e-12, -3.92631039e-15, 2.52491264e-11,
# -3.37705024e-11, -1.90686845e-16, 2.81491560e-16, 1.03400665e-05,
# -1.06805612e-06, -2.71432032e-09, 3.63509506e-10], [-2.18989545e-07, -1.33489049e-12, -4.70042812e-15, 3.15567278e-11,
# -3.27388330e-11, -1.56880637e-16, 2.90712363e-16, 1.03400665e-05,
# -8.28266240e-07, -2.85312123e-09, 3.60512708e-10], [-2.18989545e-07, -1.33489049e-12, -4.44813035e-15, 3.15567278e-11,
# -3.27388330e-11, -1.13761245e-16, 2.39927598e-16, 1.01002982e-05,
# -1.34063169e-06, -2.85312123e-09, 3.63509506e-10], [-1.24152356e-07, -8.86986059e-13, -3.49453903e-15, 2.63563289e-11,
# -3.37705024e-11, -1.38660518e-16, 2.81491560e-16, 1.03400665e-05,
# -1.04565507e-06, -2.71432032e-09, 2.76026214e-10], [-2.80604705e-07, -1.33489049e-12, -4.44813035e-15, 3.38846185e-11,
# -3.27388330e-11, -1.71862677e-16, 2.90712363e-16, 1.03400665e-05,
# -1.04565507e-06, -2.71432032e-09, 4.44659442e-10], [-2.69716633e-07, -1.60662950e-12, -3.92631039e-15, 2.08605905e-11,
# -3.37705024e-11, -1.97633487e-16, 2.81491560e-16, 1.03400665e-05,
# -9.17231762e-07, -2.85312123e-09, 3.63509506e-10], [-1.02453276e-07, -8.86986059e-13, -7.43139226e-15, 2.08154936e-11,
# -3.79851079e-11, -2.25241520e-16, 2.81491560e-16, 1.28523257e-05,
# -1.25022892e-06, -2.93735989e-09, 3.63509506e-10], [-1.85003842e-07, -2.37056104e-12, -4.41022717e-15, 1.32118757e-11,
# -2.03844980e-11, -1.87511417e-16, 5.58757958e-16, 1.28176927e-05,
# -8.29437427e-07, -3.84865948e-09, 3.60512708e-10], [-2.08073248e-07, -1.22732306e-12, -4.74138819e-15, 2.63563289e-11,
# -2.73396202e-11, -1.56880637e-16, 2.90712363e-16, 1.03400665e-05,
# -1.04565507e-06, -2.71432032e-09, 3.63509506e-10], [-2.18989545e-07, -1.33489049e-12, -4.44813035e-15, 3.15567278e-11,
# -3.37705024e-11, -1.97633487e-16, 2.81491560e-16, 9.77685398e-06,
# -1.06805612e-06, -2.85312123e-09, 3.85974088e-10]],
# [[-2.08073248e-07, -1.22732306e-12, -4.74138819e-15, 2.13452113e-11,
# -2.73396202e-11, -1.56880637e-16, 2.90712363e-16, 9.77685398e-06,
# -1.08986232e-06, -2.85312123e-09, 3.85974088e-10], [-2.18989545e-07, -1.33489049e-12, -4.44813035e-15, 3.52047668e-11,
# -2.52099329e-11, -1.97633487e-16, 2.81491560e-16, 1.03400665e-05,
# -1.04565507e-06, -3.04777728e-09, 4.46409100e-10], [-2.09974744e-07, -1.60662950e-12, -5.02181585e-15, 3.03646807e-11,
# -3.03866461e-11, -1.76810287e-16, 2.90712363e-16, 1.03400665e-05,
# -1.22990242e-06, -2.07713149e-09, 2.69364576e-10], [-2.26082942e-07, -9.55005795e-13, -4.74138819e-15, 2.88556583e-11,
# -3.16920914e-11, -1.90686845e-16, 3.20513001e-16, 1.12269459e-05,
# -1.22755448e-06, -3.28704343e-09, 4.21952355e-10], [-2.08073248e-07, -1.22732306e-12, -6.14069752e-15, 2.63563289e-11,
# -2.73396202e-11, -2.02204965e-16, 2.81491560e-16, 1.11076158e-05,
# -1.06805612e-06, -2.85312123e-09, 3.48161821e-10], [-2.45778297e-07, -1.33489049e-12, -4.44813035e-15, 3.15567278e-11,
# -3.37705024e-11, -1.97633487e-16, 3.28506021e-16, 1.03400665e-05,
# -1.04565507e-06, -2.71432032e-09, 3.94126054e-10], [-2.69716633e-07, -1.60662950e-12, -3.92631039e-15, 2.01391031e-11,
# -4.37760799e-11, -1.97633487e-16, 2.81491560e-16, 9.21894801e-06,
# -9.17231762e-07, -2.64076282e-09, 3.85974088e-10], [-2.18989545e-07, -1.31022191e-12, -5.40509635e-15, 3.15567278e-11,
# -3.37705024e-11, -1.97633487e-16, 2.81491560e-16, 9.77685398e-06,
# -1.06805612e-06, -2.85312123e-09, | |
<reponame>aerospike/aerospike-admin
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import locale
import logging
import sys
import time
from io import StringIO
from pydoc import pipepager
from lib.live_cluster.client.node import ASInfoError
from lib.health import constants as health_constants
from lib.health.util import print_dict
from lib.utils import file_size, constants, util
from lib.view import sheet, terminal, templates
from lib.view.sheet import SheetStyle
from lib.view.table import Orientation, Table, TitleFormats
H1_offset = 13
H2_offset = 15
H_width = 80
class CliView(object):
NO_PAGER, LESS, MORE, SCROLL = range(4)
pager = NO_PAGER
logger = logging.getLogger("asadm")
@staticmethod
def print_result(out):
if out is None or out == "":
return
if type(out) is not str:
out = str(out)
if CliView.pager == CliView.LESS:
pipepager(out, cmd="less -RSX")
elif CliView.pager == CliView.SCROLL:
for i in out.split("\n"):
print(i)
time.sleep(0.05)
else:
print(out)
@staticmethod
def print_pager():
if CliView.pager == CliView.LESS:
print("LESS")
elif CliView.pager == CliView.MORE:
print("MORE")
elif CliView.pager == CliView.SCROLL:
print("SCROLL")
else:
print("NO PAGER")
@staticmethod
def _get_timestamp_suffix(timestamp):
if not timestamp:
timestamp = time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime())
return " (" + str(timestamp) + ")"
@staticmethod
def info_network(
stats, cluster_names, versions, builds, cluster, timestamp="", **mods
):
prefixes = cluster.get_node_names(mods.get("with", []))
hosts = cluster.nodes
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "Network Information" + title_suffix
sources = dict(
cluster_names=cluster_names,
prefixes=prefixes,
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
hosts=dict(((k, h.sock_name(use_fqdn=False)) for k, h in hosts.items())),
builds=builds,
versions=versions,
stats=stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(templates.info_network_sheet, title, sources, common=common)
)
@staticmethod
def info_namespace_usage(stats, cluster, timestamp="", **mods):
prefixes = cluster.get_node_names(mods.get("with", []))
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "Namespace Usage Information" + title_suffix
sources = dict(
# TODO - collect cluster-name.
cluster_names=dict([(k, None) for k in stats.keys()]),
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
prefixes=prefixes,
ns_stats=stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(
templates.info_namespace_usage_sheet, title, sources, common=common
)
)
@staticmethod
def info_namespace_object(stats, cluster, timestamp="", **mods):
prefixes = cluster.get_node_names(mods.get("with", []))
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "Namespace Object Information" + title_suffix
sources = dict(
# TODO - collect cluster-name.
cluster_names=dict([(k, None) for k in stats.keys()]),
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
prefixes=prefixes,
ns_stats=stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(
templates.info_namespace_object_sheet, title, sources, common=common
)
)
@staticmethod
def info_set(stats, cluster, timestamp="", **mods):
prefixes = cluster.get_node_names(mods.get("with", []))
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "Set Information%s" + title_suffix
sources = dict(
# TODO - collect cluster-name.
cluster_names=dict([(k, None) for k in stats.keys()]),
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
prefixes=prefixes,
set_stats=stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(templates.info_set_sheet, title, sources, common=common)
)
# pre 5.0
@staticmethod
def info_dc(stats, cluster, timestamp="", **mods):
prefixes = cluster.get_node_names(mods.get("with", []))
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "DC Information%s" % (title_suffix)
sources = dict(
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
prefixes=prefixes,
dc_stats=stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(templates.info_dc_sheet, title, sources, common=common)
)
# pre 5.0
@staticmethod
def info_old_XDR(stats, builds, xdr_enable, cluster, timestamp="", **mods):
if not max(xdr_enable.values()):
return
prefixes = cluster.get_node_names(mods.get("with", []))
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "XDR Information" + title_suffix
sources = dict(
xdr_enable=xdr_enable,
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
prefixes=prefixes,
builds=builds,
xdr_stats=stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(templates.info_old_xdr_sheet, title, sources, common=common)
)
@staticmethod
def info_XDR(
stats, xdr_enable, cluster, timestamp="", title="XDR Information", **ignore
):
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = title + title_suffix
prefixes = cluster.get_node_names()
node_ids = dict(((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys()))
sources = dict(
xdr_enable=xdr_enable, node_ids=node_ids, prefixes=prefixes, xdr_stats=stats
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(templates.info_xdr_sheet, title, sources, common=common)
)
@staticmethod
def info_sindex(stats, cluster, timestamp="", **mods):
# return if sindex stats are empty.
if not stats:
return
# stats comes in {index:{node:{k:v}}}, needs to be {node:{index:{k:v}}}
sindex_stats = {}
for iname, nodes in stats.items():
for node, values in nodes.items():
sindex_stats[node] = node_stats = sindex_stats.get(node, {})
node_stats[iname] = values
prefixes = cluster.get_node_names(mods.get("with", []))
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = "Secondary Index Information" + title_suffix
sources = dict(
node_ids=dict(
((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys())
),
prefixes=prefixes,
sindex_stats=sindex_stats,
)
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(templates.info_sindex_sheet, title, sources, common=common)
)
@staticmethod
def show_distribution(
title, histogram, unit, hist, cluster, like=None, timestamp="", **mods
):
likes = util.compile_likes(like)
title_suffix = CliView._get_timestamp_suffix(timestamp)
description = "Percentage of records having {} less than or ".format(
hist
) + "equal to value measured in {}".format(unit)
namespaces = set(filter(likes.search, histogram.keys()))
for namespace, node_data in histogram.items():
if (
namespace not in namespaces
or not node_data
or isinstance(node_data, Exception)
):
continue
this_title = "{} - {} in {}{}".format(namespace, title, unit, title_suffix)
sources = dict(
prefixes=cluster.get_node_names(mods.get("with", [])),
histogram=dict((k, d["percentiles"]) for k, d in node_data.items()),
)
CliView.print_result(
sheet.render(
templates.show_distribution_sheet,
this_title,
sources,
description=description,
)
)
@staticmethod
def show_object_distribution(
title,
histogram,
unit,
hist,
bucket_count,
set_bucket_count,
cluster,
like=None,
timestamp="",
loganalyser_mode=False,
**mods
):
prefixes = cluster.get_node_names(mods.get("with", []))
likes = util.compile_likes(like)
title_suffix = CliView._get_timestamp_suffix(timestamp)
description = "Number of records having {} in the range ".format(
hist
) + "measured in {}".format(unit)
namespaces = set(filter(likes.search, histogram.keys()))
for namespace, node_data in histogram.items():
if namespace not in namespaces:
continue
ns_title = "{} - {} in {}{}".format(namespace, title, unit, title_suffix)
sources = dict(
prefixes=prefixes,
histogram={
h: d.get("data", {}) for h, d in node_data.items() if h != "columns"
},
)
CliView.print_result(
sheet.render(
templates.show_object_distribution_sheet,
ns_title,
sources,
description=description,
)
)
@staticmethod
def format_latency(orig_latency):
# XXX - eventually, node.py could return this format. Changing here
# because loganalyser also sends this format.
latency = {}
for hist, nodes_data in orig_latency.items():
for node, node_data in nodes_data.items():
node_latency = latency[node] = latency.get(node, {})
if "namespace" in node_data:
for ns, ns_data in node_data["namespace"].items():
for slice_id, values in enumerate(ns_data["values"]):
node_latency[(ns, hist, slice_id)] = dict(
zip(ns_data["columns"], values)
)
elif "total" in node_data:
# batch-index is not under 'namespace'
hist_data = node_data["total"]
for slice_id, values in enumerate(hist_data["values"]):
node_latency[
(templates.show_latency_sheet.no_entry, hist, slice_id)
] = dict(zip(hist_data["columns"], values))
return latency
@staticmethod
def show_latency(latency, cluster, like=None, timestamp="", **mods):
# TODO - May not need to converter now that dicts can be nested.
prefixes = cluster.get_node_names(mods.get("with", []))
likes = util.compile_likes(like)
title = "Latency " + CliView._get_timestamp_suffix(timestamp)
keys = set(filter(likes.search, latency.keys()))
latency = {k: v for k, v in latency.items() if k in keys}
latency = CliView.format_latency(latency)
sources = dict(prefixes=prefixes, histogram=latency)
CliView.print_result(sheet.render(templates.show_latency_sheet, title, sources))
@staticmethod
def show_config(
title,
service_configs,
cluster,
like=None,
diff=False,
show_total=False,
title_every_nth=0,
flip_output=False,
timestamp="",
**mods
):
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = title + title_suffix
prefixes = cluster.get_node_names(mods.get("with", []))
node_ids = dict(((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys()))
sources = dict(prefixes=prefixes, data=service_configs, node_ids=node_ids)
disable_aggregations = not show_total
style = SheetStyle.columns if flip_output else None
common = dict(principal=cluster.get_expected_principal())
CliView.print_result(
sheet.render(
templates.show_config_sheet,
title,
sources,
style=style,
selectors=like,
title_repeat=title_every_nth != 0,
disable_aggregations=disable_aggregations,
dynamic_diff=diff,
common=common,
)
)
@staticmethod
def show_stats(*args, **kwargs):
CliView.show_config(*args, **kwargs)
@staticmethod
def show_health(*args, **kwargs):
CliView.show_config(*args, **kwargs)
@staticmethod
def show_xdr5_config(
title,
service_configs,
cluster,
like=None,
diff=None,
title_every_nth=0,
flip_output=False,
timestamp="",
**mods
):
title_suffix = CliView._get_timestamp_suffix(timestamp)
title = title + title_suffix
prefixes = cluster.get_node_names(mods.get("with", []))
node_ids = dict(((k, cluster.get_node(k)[0].node_id) for k in prefixes.keys()))
common = dict(principal=cluster.get_expected_principal())
style = SheetStyle.columns if flip_output else None
sources = dict(
prefixes=prefixes,
node_ids=node_ids,
data=service_configs["xdr_configs"],
)
CliView.print_result(
sheet.render(
templates.show_config_sheet,
title,
sources,
selectors=like,
style=style,
title_repeat=title_every_nth != 0,
dynamic_diff=diff,
disable_aggregations=True,
common=common,
)
)
for dc in service_configs["dc_configs"]:
title = "DC Configuration for {}{}".format(dc, title_suffix)
sources = dict(
prefixes=prefixes,
node_ids=node_ids,
data=service_configs["dc_configs"][dc],
)
CliView.print_result(
sheet.render(
templates.show_config_sheet,
title,
sources,
selectors=like,
style=style,
title_repeat=title_every_nth != 0,
dynamic_diff=diff,
disable_aggregations=True,
common=common,
)
)
for dc in service_configs["ns_configs"]:
title = "Namespace Configuration for {}{}".format(dc, title_suffix)
sources = dict(
prefixes=prefixes,
node_ids=node_ids,
data=service_configs["ns_configs"][dc],
)
CliView.print_result(
sheet.render(
templates.show_config_xdr_ns_sheet,
title,
sources,
selectors=like,
style=style,
title_repeat=title_every_nth != 0,
dynamic_diff=diff,
common=common,
)
)
@staticmethod
def show_grep(title, summary):
if not summary or len(summary.strip()) == 0:
return
if title:
print("************************** %s **************************") % (title)
CliView.print_result(summary)
@staticmethod
def show_grep_count(title, grep_result, title_every_nth=0, **ignore):
# TODO - get rid of total row in grep_result and add column aggregations to sheets.
node_ids = {}
for node, res in grep_result.items():
# TODO - sheet should be able | |
VISWANATH_NATARAJAN_2, VISWANATH_NATARAJAN_2E,
VDI_TABULAR, LETSOU_STIEL, PRZEDZIECKI_SRIDHAR]
'''Default rankings of the low-pressure methods.'''
ranked_methods_P = [COOLPROP, LUCAS]
'''Default rankings of the high-pressure methods.'''
obj_references = pure_references = ('Psat', 'Vml')
obj_references_types = pure_reference_types = (VaporPressure, VolumeLiquid)
custom_args = ('MW', 'Tm', 'Tc', 'Pc', 'Vc', 'omega', 'Psat', 'Vml')
def __init__(self, CASRN='', MW=None, Tm=None, Tc=None, Pc=None, Vc=None,
omega=None, Psat=None, Vml=None, load_data=True,
extrapolation='linear', poly_fit=None, method=None, method_P=None):
self.CASRN = CASRN
self.MW = MW
self.Tm = Tm
self.Tc = Tc
self.Pc = Pc
self.Vc = Vc
self.omega = omega
self.Psat = Psat
self.Vml = Vml
self.Tmin = None
'''Minimum temperature at which no method can calculate the
liquid viscosity under.'''
self.Tmax = None
'''Maximum temperature at which no method can calculate the
liquid viscosity above.'''
self.tabular_data = {}
'''tabular_data, dict: Stored (Ts, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators = {}
'''tabular_data_interpolators, dict: Stored (extrapolator,
spline) tuples which are interp1d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.tabular_data_P = {}
'''tabular_data_P, dict: Stored (Ts, Ps, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators_P = {}
'''tabular_data_interpolators_P, dict: Stored (extrapolator,
spline) tuples which are interp2d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T, interpolation_P,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.all_methods_P = set()
'''Set of all high-pressure methods available for a given CASRN and
properties; filled by :obj:`load_all_methods`.'''
self.load_all_methods(load_data)
self.extrapolation = extrapolation
if poly_fit is not None:
self._set_poly_fit(poly_fit)
elif method is not None:
self.method = method
else:
methods = self.valid_methods(T=None)
if methods:
self.method = methods[0]
if method_P is not None:
self.method_P = method_P
else:
for m in self.ranked_methods_P:
if m in self.all_methods_P:
self.method_P = m
break
def load_all_methods(self, load_data=True):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods, methods_P = [], []
Tmins, Tmaxs = [], []
self.T_limits = T_limits = {}
if load_data:
if has_CoolProp() and self.CASRN in coolprop_dict:
CP_f = coolprop_fluids[self.CASRN]
if CP_f.has_mu:
self.CP_f = CP_f
methods.append(COOLPROP); methods_P.append(COOLPROP)
Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tc)
T_limits[COOLPROP] = (self.CP_f.Tmin, self.CP_f.Tc)
if self.CASRN in miscdata.VDI_saturation_dict:
methods.append(VDI_TABULAR)
Ts, props = lookup_VDI_tabular_data(self.CASRN, 'Mu (l)')
self.VDI_Tmin = Ts[0]
self.VDI_Tmax = Ts[-1]
self.tabular_data[VDI_TABULAR] = (Ts, props)
Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)
T_limits[VDI_TABULAR] = (self.VDI_Tmin, self.VDI_Tmax)
if self.CASRN in viscosity.mu_data_Dutt_Prasad.index:
methods.append(DUTT_PRASAD)
A, B, C, self.DUTT_PRASAD_Tmin, self.DUTT_PRASAD_Tmax = viscosity.mu_values_Dutt_Prasad[viscosity.mu_data_Dutt_Prasad.index.get_loc(self.CASRN)].tolist()
self.DUTT_PRASAD_coeffs = [A - 3.0, B, C]
Tmins.append(self.DUTT_PRASAD_Tmin); Tmaxs.append(self.DUTT_PRASAD_Tmax)
T_limits[DUTT_PRASAD] = (self.DUTT_PRASAD_Tmin, self.DUTT_PRASAD_Tmax)
if self.CASRN in viscosity.mu_data_VN3.index:
methods.append(VISWANATH_NATARAJAN_3)
A, B, C, self.VISWANATH_NATARAJAN_3_Tmin, self.VISWANATH_NATARAJAN_3_Tmax = viscosity.mu_values_VN3[viscosity.mu_data_VN3.index.get_loc(self.CASRN)].tolist()
self.VISWANATH_NATARAJAN_3_coeffs = [A - 3.0, B, C]
Tmins.append(self.VISWANATH_NATARAJAN_3_Tmin); Tmaxs.append(self.VISWANATH_NATARAJAN_3_Tmax)
T_limits[VISWANATH_NATARAJAN_3] = (self.VISWANATH_NATARAJAN_3_Tmin, self.VISWANATH_NATARAJAN_3_Tmax)
if self.CASRN in viscosity.mu_data_VN2.index:
methods.append(VISWANATH_NATARAJAN_2)
A, B, self.VISWANATH_NATARAJAN_2_Tmin, self.VISWANATH_NATARAJAN_2_Tmax = viscosity.mu_values_VN2[viscosity.mu_data_VN2.index.get_loc(self.CASRN)].tolist()
self.VISWANATH_NATARAJAN_2_coeffs = [A - 4.605170185988092, B] # log(100) = 4.605170185988092
Tmins.append(self.VISWANATH_NATARAJAN_2_Tmin); Tmaxs.append(self.VISWANATH_NATARAJAN_2_Tmax)
T_limits[VISWANATH_NATARAJAN_2] = (self.VISWANATH_NATARAJAN_2_Tmin, self.VISWANATH_NATARAJAN_2_Tmax)
if self.CASRN in viscosity.mu_data_VN2E.index:
methods.append(VISWANATH_NATARAJAN_2E)
C, D, self.VISWANATH_NATARAJAN_2E_Tmin, self.VISWANATH_NATARAJAN_2E_Tmax = viscosity.mu_values_VN2E[viscosity.mu_data_VN2E.index.get_loc(self.CASRN)].tolist()
self.VISWANATH_NATARAJAN_2E_coeffs = [C, D]
Tmins.append(self.VISWANATH_NATARAJAN_2E_Tmin); Tmaxs.append(self.VISWANATH_NATARAJAN_2E_Tmax)
T_limits[VISWANATH_NATARAJAN_2E] = (self.VISWANATH_NATARAJAN_2E_Tmin, self.VISWANATH_NATARAJAN_2E_Tmax)
if self.CASRN in viscosity.mu_data_Perrys_8E_2_313.index:
methods.append(DIPPR_PERRY_8E)
C1, C2, C3, C4, C5, self.Perrys2_313_Tmin, self.Perrys2_313_Tmax = viscosity.mu_values_Perrys_8E_2_313[viscosity.mu_data_Perrys_8E_2_313.index.get_loc(self.CASRN)].tolist()
self.Perrys2_313_coeffs = [C1, C2, C3, C4, C5]
Tmins.append(self.Perrys2_313_Tmin); Tmaxs.append(self.Perrys2_313_Tmax)
T_limits[DIPPR_PERRY_8E] = (self.Perrys2_313_Tmin, self.Perrys2_313_Tmax)
if self.CASRN in viscosity.mu_data_VDI_PPDS_7.index:
methods.append(VDI_PPDS)
# No temperature limits - ideally could use critical point
self.VDI_PPDS_coeffs = VDI_PPDS_coeffs = viscosity.mu_values_PPDS_7[viscosity.mu_data_VDI_PPDS_7.index.get_loc(self.CASRN)].tolist()
low = low_orig = min(self.VDI_PPDS_coeffs[2], self.VDI_PPDS_coeffs[3])# + 5.0
high = high_orig = max(self.VDI_PPDS_coeffs[2], self.VDI_PPDS_coeffs[3])# - 5.0
if low > 0.0:
dmu_low_under, mu_low_under = dPPDS9_dT(low*0.9995, *VDI_PPDS_coeffs)
dmu_low_above, mu_low_above = dPPDS9_dT(low*1.0005, *VDI_PPDS_coeffs)
if high > 0.0:
dmu_high_under, mu_high_under = dPPDS9_dT(high*0.9995, *VDI_PPDS_coeffs)
dmu_high_above, mu_high_above = dPPDS9_dT(high*1.0005, *VDI_PPDS_coeffs)
if self.Tm is not None:
dmu_Tm, mu_Tm = dPPDS9_dT(self.Tm, *VDI_PPDS_coeffs)
if self.Tc is not None:
dmu_Tc_under, mu_Tc_under = dPPDS9_dT(self.Tc, *VDI_PPDS_coeffs)
if high > 0.0 and low < 0.0 or isinf(dmu_low_under) or isinf(dmu_low_above):
# high + a few K as lower limit
low = 0.1*high
high = high-1.0
else:
low, high = low + 5.0, high + 5.0
if self.Tm is not None:
low = self.Tm
if self.Tc is not None:
if dmu_Tc_under < 0.0:
high = self.Tc
if self.Tm is not None and self.Tc is not None and low_orig < 0 and self.Tm < high_orig < self.Tc and dmu_Tc_under < 0.0:
low = high_orig + 1.0
if high == high_orig:
high -= 1.0
dmu_low, mu_low = dPPDS9_dT(low, *VDI_PPDS_coeffs)
dmu_high, mu_high = dPPDS9_dT(high, *VDI_PPDS_coeffs)
if dmu_low*dmu_high < 0.0:
def to_solve(T):
return dPPDS9_dT(T, *VDI_PPDS_coeffs)[0]
T_switch = brenth(to_solve, low, high)
if dmu_high > 0.0:
high = T_switch
else:
low = T_switch
T_limits[VDI_PPDS] = (low, high)
Tmins.append(low); Tmaxs.append(high)
if all((self.MW, self.Tc, self.Pc, self.omega)):
methods.append(LETSOU_STIEL)
Tmins.append(self.Tc/4); Tmaxs.append(self.Tc) # TODO: test model at low T
T_limits[LETSOU_STIEL] = (self.Tc/4, self.Tc)
if all((self.MW, self.Tm, self.Tc, self.Pc, self.Vc, self.omega, self.Vml)):
methods.append(PRZEDZIECKI_SRIDHAR)
Tmins.append(self.Tm); Tmaxs.append(self.Tc) # TODO: test model at Tm
T_limits[PRZEDZIECKI_SRIDHAR] = (self.Tm, self.Tc)
if all([self.Tc, self.Pc, self.omega]):
methods_P.append(LUCAS)
self.all_methods = set(methods)
self.all_methods_P = set(methods_P)
if Tmins and Tmaxs:
self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
@staticmethod
def _method_indexes():
'''Returns a dictionary of method: index for all methods
that use data files to retrieve constants. The use of this function
ensures the data files are not loaded until they are needed.
'''
return {COOLPROP : [CAS for CAS in coolprop_dict if (coolprop_fluids[CAS].has_mu and CAS not in CoolProp_failing_PT_flashes)],
VDI_TABULAR: list(miscdata.VDI_saturation_dict.keys()),
DUTT_PRASAD: viscosity.mu_data_Dutt_Prasad.index,
VISWANATH_NATARAJAN_3: viscosity.mu_data_VN3.index,
VISWANATH_NATARAJAN_2: viscosity.mu_data_VN2.index,
VISWANATH_NATARAJAN_2E: viscosity.mu_data_VN2E.index,
DIPPR_PERRY_8E: viscosity.mu_data_Perrys_8E_2_313.index,
VDI_PPDS: viscosity.mu_data_VDI_PPDS_7.index,
}
def calculate(self, T, method):
r'''Method to calculate low-pressure liquid viscosity at tempearture
`T` with a given method.
This method has no exception handling; see :obj:`T_dependent_property <thermo.utils.TDependentProperty.T_dependent_property>`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and a low pressure, [Pa*s]
'''
if method == DUTT_PRASAD:
A, B, C = self.DUTT_PRASAD_coeffs
mu = Viswanath_Natarajan_3(T, A, B, C, )
elif method == VISWANATH_NATARAJAN_3:
A, B, C = self.VISWANATH_NATARAJAN_3_coeffs
mu = Viswanath_Natarajan_3(T, A, B, C)
elif method == VISWANATH_NATARAJAN_2:
A, B = self.VISWANATH_NATARAJAN_2_coeffs
mu = Viswanath_Natarajan_2(T, self.VISWANATH_NATARAJAN_2_coeffs[0], self.VISWANATH_NATARAJAN_2_coeffs[1])
elif method == VISWANATH_NATARAJAN_2E:
C, D = self.VISWANATH_NATARAJAN_2E_coeffs
mu = Viswanath_Natarajan_2_exponential(T, C, D)
elif method == DIPPR_PERRY_8E:
mu = EQ101(T, *self.Perrys2_313_coeffs)
elif method == COOLPROP:
mu = CoolProp_T_dependent_property(T, self.CASRN, 'V', 'l')
elif method == LETSOU_STIEL:
mu = Letsou_Stiel(T, self.MW, self.Tc, self.Pc, self.omega)
elif method == PRZEDZIECKI_SRIDHAR:
Vml = self.Vml(T) if hasattr(self.Vml, '__call__') else self.Vml
mu = Przedziecki_Sridhar(T, self.Tm, self.Tc, self.Pc, self.Vc, Vml, self.omega, self.MW)
elif method == VDI_PPDS:
return PPDS9(T, *self.VDI_PPDS_coeffs)
elif method == BESTFIT:
if T < self.poly_fit_Tmin:
mu = (T - self.poly_fit_Tmin)*self.poly_fit_Tmin_slope + self.poly_fit_Tmin_value
elif T > self.poly_fit_Tmax:
mu = (T - self.poly_fit_Tmax)*self.poly_fit_Tmax_slope + self.poly_fit_Tmax_value
else:
mu = 0.0
for c in self.poly_fit_coeffs:
mu = mu*T + c
mu = exp(mu)
elif method in self.tabular_data:
mu = self.interpolate(T, method)
return mu
def test_method_validity(self, T, method):
r'''Method to check the validity of a method. Follows the given
ranges for all coefficient-based methods. For CSP methods, the models
are considered valid from 0 K to the critical point. For tabular data,
extrapolation outside of the range is used if
:obj:`tabular_extrapolation_permitted` is set; if it is, the
extrapolation is considered valid for all temperatures.
It is not guaranteed that a method will work or give an accurate
prediction simply because this method considers the method valid.
Parameters
----------
T : float
Temperature at which to test the method, [K]
method : str
Name of the method to test
Returns
-------
validity : bool
Whether or not a | |
#part of the code from openai
#https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import numpy as np
import random
import operator
from numba import njit
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = np.array([neutral_element for _ in range(2 * capacity)])
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
@njit(parallel=False)
def compiled_setitem_maxtree(idx, val, _value, _capacity):
idx += _capacity
_value[idx] = val
idx //= 2
while idx >= 1:
_value[idx] = max(_value[2 * idx], _value[2 * idx + 1])
idx //= 2
class MaxSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MaxSegmentTree, self).__init__(
capacity=capacity,
operation=max,
neutral_element=0. # we assume that all elements are larger than zero
)
# the maximum value can be accessed directly by "._value[1]"
def max(self, start=0, end=None):
"""Returns max(arr[start], ..., arr[end])"""
return super(MaxSegmentTree, self).reduce(start, end)
#return self._value[1]
@njit(parallel=False)
def compiled_setitem_mintree(idx, val, _value, _capacity):
idx += _capacity
_value[idx] = val
idx //= 2
while idx >= 1:
_value[idx] = min(_value[2 * idx], _value[2 * idx + 1])
idx //= 2
class MinSegmentTree(SegmentTree):
def __init__(self, capacity, neutral_element=float("inf")):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=neutral_element
)
@njit(parallel=False)
def compiled_setitem_sumtree(idx, val, _value, _capacity):
idx += _capacity
_value[idx] = val
idx //= 2
while idx >= 1:
_value[idx] = _value[2 * idx] + _value[2 * idx + 1]
idx //= 2
@njit(parallel=False)
def compiled_setitem_min_sumtree(idx, min_val, _value, _capacity):
idx += _capacity
if min_val > _value[idx]:
_value[idx] = min_val
idx //= 2
while idx >= 1:
_value[idx] = _value[2 * idx] + _value[2 * idx + 1]
idx //= 2
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.
)
# the total sum can be accessed directly by "._value[1]"
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
#return self._value[1]
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
return compiled_find_prefixsum_idx(prefixsum, self._capacity, self._value)
@njit(parallel=False)
def compiled_find_prefixsum_idx(prefixsum, _capacity, _value):
idx = 1
while idx < _capacity: # while non-leaf
if _value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= _value[2 * idx]
idx = 2 * idx + 1
return idx - _capacity
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped. The index of the next transition
to store can be accessed by "self._next_idx".
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self.cache = None
self.cached_data = None
self.indices_replaced_after_caching = []
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = [obs_t, action, reward, obs_tp1, done]
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
assert len(self._storage) == self._maxsize
self._storage[self._next_idx] = data
if self.cache is not None:
self.indices_replaced_after_caching.append(self._next_idx)
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
#data = self._storage[i]
obs_t, action, reward, obs_tp1, done = self._storage[i]
obses_t.append(obs_t._frames)
actions.append(action)
rewards.append(reward)
obses_tp1.append(obs_tp1._frames)
dones.append(done)
shp = obs_t._frames[0].shape
obses_t_obses_tp1 = np.array([obses_t, obses_tp1]).reshape(2, len(idxes), -1, shp[-2], shp[-1]) # their data types are np.uint8
return obses_t_obses_tp1, np.array(actions, dtype=np.int64), np.array(rewards, dtype=np.float32), np.array(dones, dtype=np.float32)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch, next_obs_batch: np.array
batch of observations, next set of observations seen after executing act_batch
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
if self.cache is None:
# python random.randint is different from np.random.randint; np.random.randint is the same as random.randrange
idxes = np.random.randint(0, len(self._storage), size = batch_size)
#idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes) + (idxes,)
else:
return self.retrieve_cache()
def _encode_next_state_data(self, idxes):
obses_tp1 = []
for i in idxes:
obs_tp1 = self._storage[i][3]
obses_tp1.append(obs_tp1._frames)
obses_tp1 = np.array(obses_tp1)
return obses_tp1
def sample_next_state_and_cache_indices(self, batch_size):
idxes = np.random.randint(0, len(self._storage), size = batch_size)
self.cache = (idxes, )
return self._encode_next_state_data(idxes), idxes
def update_and_store_cached_data(self):
assert self.cache is not None
idxes = self.cache[-1]
self.cached_data = self._encode_sample(idxes) + self.cache
self.indices_replaced_after_caching.clear()
def retrieve_cache(self):
data = self.cached_data
self.cache, self.cached_data = None, None
return data
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha, IS_weight_only_smaller, allowed_avg_min_ratio = 10):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self.it_capacity = it_capacity
self._it_sum = SumSegmentTree(it_capacity)
self._it_max = MaxSegmentTree(it_capacity)
self._max_priority = 100.
self._max_priority = self._max_priority ** self._alpha
self.IS_weight_only_smaller = IS_weight_only_smaller
if IS_weight_only_smaller:
self._it_min = MinSegmentTree(it_capacity, neutral_element=self._max_priority)
self._min_priority = self._max_priority
assert allowed_avg_min_ratio > 1 or allowed_avg_min_ratio <= 0, "'allowed_avg_min_ratio' ({}) is not within the allowed range.".format(allowed_avg_min_ratio)
if allowed_avg_min_ratio <= 0: allowed_avg_min_ratio = float("inf")
self._allowed_avg_min_ratio = float(allowed_avg_min_ratio) # the maximum allowed relative difference between the min and the avg priorities
def add(self, *args, prio=None, **kwargs): # "prio" stands for priority
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
if prio is None:
prio = self._max_priority
else:
assert prio > 0.
prio = max(prio ** self._alpha, self._it_sum._value[1]/(len(self._storage)*self._allowed_avg_min_ratio))
compiled_setitem_sumtree(idx, prio, self._it_sum._value, self.it_capacity)
super(PrioritizedReplayBuffer, self).add(*args, **kwargs)
def _sample_proportional(self, batch_size, beta=1.):
weights, true_weights, idxes = compiled_sample_proportional(batch_size, self._it_sum._value, self._it_sum._capacity, len(self._storage), beta)
if self.IS_weight_only_smaller:
# divide the weights by the largest weight possible, which corresponds to the minimal priority
weights = weights / ( (self._it_sum._value[1]/len(self._storage)/self._min_priority)**beta )
else:
weights = np.minimum(weights, | |
<reponame>hidaruma/caty
#coding: utf-8
from caty.core.async import AsyncQueue
from caty.core.facility import Facility, AccessManager, FakeFacility, ReadOnlyFacility, EntityProxy, AbstractEntityProxy
from caty.util import cout, error_to_ustr, brutal_error_printer
from caty.util.path import join
from caty.jsontools.util import ManifestReader
from caty import mafs, storage, session
from caty.jsontools import xjson
from caty.util.collection import merge_dict
from caty.core.globalconf import *
from caty.core import script, casm
from caty.core.facility import (Facility,
FakeFacility,
FacilitySet,
AccessManager)
from caty.core.memory import AppMemory
from caty.core.std.command.authutil import (RequestToken,
CATY_USER_INFO_KEY,
CATY_USER_INFO_KEY)
from caty.core.customimporter import AppSpecLibraryImporter
from caty.core.exception import throw_caty_exception
from caty.util.collection import ImmutableDict
from caty.util import cout, error_to_ustr, brutal_error_printer
from caty.vcs import BaseClient
from caty.core.i18n import I18nMessage
from caty.core.action.dispatcher import resource_class_from_assocs, ResourceActionEntry
import caty.core.runtimeobject
import caty.core.logger as logger
from copy import deepcopy
from operator import truth as _
import locale
import codecs
import operator
import os
import platform
import sys
import time
import tempfile
RC_SCRIPT = u'/rc.caty'
RC_ONCE_SCRIPT = u'rc-once'
RC_DONE = u'rc-done'
RESERVED = set(['this', u'global', u'caty'])
ROOT = 'root'
USER = 'main'
LOG_TYPES = ['app', 'performance', 'exec']
class Application(object):
u"""Caty アプリケーションオブジェクト。
Caty アプリケーションは自身に特有なファイルシステム、スキーマ、コマンドなどを格納し、
外部からの入力に応答して出力を返すオブジェクトである(実際の主たる処理は interpreter に委譲する)。
Caty アプリケーションは他のアプリケーションの参照を行うため、
互いへの参照を内部的には持っている。
ただしそれはあくまでも特別なリソースアクセスのために行う事なので、
基本的に Caty アプリケーション同士は分離された状態で動作する。
"""
def __init__(self, name, no_ambient, group, system):
u"""Caty アプリケーションの初期化はブートストラップで行う。
最初は単にアプリケーション名を設定し、内部的な状態を初期化するだけである。
"""
self._initialized = False
self._no_ambient = no_ambient
self._physical_path = join(group.path, name)
self.importer = AppSpecLibraryImporter(os.path.abspath(self._physical_path))
sys.meta_path.append(self.importer) # {$apDir}/libの読み込みに使う
self._loaded = False
self._initialize(name, group, system)
self._initialized = True
def _initialize(self, name, group, system):
self._name = unicode(name)
self._system = system
self._path = unicode(name)
self._facility_classes = {}
self._master_entities = {}
self._facilities = {}
self._group = group
self._finished = False
self._app_map = {name: self}
self._global_config = group.global_config
system.cout.writeln(system.i18n.get("Loading $name", name=self._path))
self._configure()
self.set_parent(system)
if not self._disabled or system.force_app == name:
self._init_filetype()
self._init_mafs()
self._init_msg()
self._init_appdirs()
self._init_schema()
self._init_session()
self._init_storage()
self._init_vcs()
self._init_memory()
self._init_log()
self._init_interpreter()
self._init_action()
else:
self.cout.writeln(u'* ' + system.i18n.get("$name is set not to start", name=self._name))
self.async_queue = AsyncQueue(self)
self._lock_set = set()
def set_parent(self, system):
system._global_app.importer.add_child(self)
self.parent = system._global_app
def reload(self, module_name=None):
self._no_ambient = False
self.importer.discard()
self._initialize(self._name, self._group, self._system)
self.finish_setup()
self.exec_rc_script()
def force_load(self, module_name):
import traceback
from caty.core.casm.module import InMemoryModule
from caty.util.path import is_mafs_path
self._system.casm._core.clear_namespace()
self.parent._schema_module.clear_namespace()
self._schema_module.clear_namespace()
error = False
try:
if is_mafs_path(module_name):
path = module_name
if '@' in path:
place, path = path.split('@', 1)
else:
place = u'data'
if path.startswith('this:'):
path = path.replace('this:', '')
mafs = getattr(self, place)
imm = InMemoryModule(self, self._schema_module)
msg = self.i18n.get('Schema: $path', path=module_name)
self.cout.write(u' * ' + msg)
imm.compile(mafs.start().open(path).read())
self.cout.writeln(u'OK')
self._schema_module.sub_modules[imm.name] = imm
else:
self._schema_module.load_on_demand(module_name)
except:
if is_mafs_path(module_name):
self.cout.writeln(u'NG')
error = True
self.cout.writeln(traceback)
finally:
if error:
self.cout.writeln(self.i18n.get(u'Failed to force-load. Reloading system data'))
try:
self.reinit_schema()
self._loaded = True
except:
self.cout.writeln(traceback)
self.cout.writeln(self.i18n.get(u'Failed to force-load. Reloading system data'))
if not is_mafs_path(module_name):
self._schema_module.discard_module(module_name)
self._system.casm._core.clear_namespace()
self.parent._schema_module.clear_namespace()
self._schema_module.clear_namespace()
self.reinit_schema()
self.reinit_interperter()
def reinit_schema(self):
if self.parent:
self.parent.reinit_schema()
self.cout.writeln(' * ' + self.i18n.get(u'Reconfiguring schema: $name', name=self.name))
self._schema_module.resolve()
def reinit_interperter(self):
if self.parent:
self.parent.reinit_interperter()
self._init_interpreter()
def exec_rc_script(self):
if self._disabled:
return
if self._no_ambient:
return
scripts = self._scripts.start()
if isinstance(scripts, FakeFacility):
return
if not scripts.open(RC_SCRIPT).exists:
return
self.cout.writeln(self.i18n.get("Running init script of $name", name=self.name))
facilities = self.create_facilities()
interpreter = self.interpreter.file_mode(facilities)
modes = [unicode('console')]
self.init_env(facilities, self._system.debug, modes, self._system, {'PATH_INFO': u'/'})
try:
interpreter.build(u'call ' + RC_SCRIPT)(None)
except Exception, e:
import traceback
self.cout.writeln(traceback.format_exc())
self.cout.write(error_to_ustr(e))
self.i18n.write("An error occuered while running init script of $name", name=self.name)
facilities._facilities.clear()
def exec_rc_once_script(self):
if self._disabled:
return
if self._no_ambient:
return
scripts = self._scripts.start()
if isinstance(scripts, FakeFacility):
return
if not scripts.opendir('/' + RC_ONCE_SCRIPT).exists:
return
d = scripts.opendir('/' + RC_ONCE_SCRIPT)
for f in d.read(True):
if not f.path.endswith('.caty') or f.path.count('.') != 1:
continue
facilities = self.create_facilities()
interpreter = self.interpreter.file_mode(facilities)
modes = [unicode('console')]
self.init_env(facilities, self._system.debug, modes, self._system, {'PATH_INFO': u'/'})
try:
self.i18n.write("Running $path of $name", path=f.path, name=self.name)
interpreter.build('call ' + f.path)(None)
except Exception, e:
import traceback
self.cout.writeln(traceback.format_exc())
self.cout.write(error_to_ustr(e))
self.i18n.write("An error occuered while running rc-once of $name", name=self.name)
self._system.error_logger.error(self.i18n.get("An error occuered while running rc-once of $name", name=self.name) + ':' + traceback.format_exc())
raise
s = scripts.open(f.path)
import time
if not scripts.opendir('/rc-done').exists:
scripts.opendir('/rc-done').create()
s.rename('/rc-done/%s.%s.caty' % (f.basename.rsplit('.', 1)[0], time.strftime('%Y%m%d%H%M%S')))
facilities._facilities.clear()
scripts.commit()
def add_app(self, app):
if app.name in app:
raise Exception(self.i18n.get("Application name conflicted: $name", name=app.name))
self._app_map[app.name] = app
def get_app(self, name):
if name == 'this':
return self
return self._system.get_app(name)
def release(self, key):
try:
import fcntl
with open(sys.argv[0]) as f:
fcntl.flock(lockFile.fileno(), fcntl.LOCK_EX)
self._release(key)
except:
fcntl = None
self._release(key)
def _release(self, key):
try:
self._lock_set.remove(key)
except:
pass
def _configure(self):
cfg = self._verify_config(self._read_config())
self._disabled = cfg['disabled']
self._description = cfg['description']
self._more_description = cfg.get('moreDescription', None)
self._implLang = cfg['implLang']
self._assgin = cfg['assign']
self._indexfiles = cfg.get('indexFiles', ['index.html', 'index.htm', 'index.cgi', 'index.act'])
self._filetypes = cfg.get('filetypes', {})
self._storage_confs = cfg.get('storage', {}).get('configs',
{
'sqlite': {'module': u'caty.storage.sqlite', 'conf': {'path': u'./storage.db'}},
'file': {'module': u'caty.storage.file', 'conf': {'data_dir': u'./file_storage'}},
})
for k, v in self._storage_confs.items():
if self._global_config.storage_conf.get(k, {}).get('module') != v.get('module'):
self.cout.writeln('[Warning] Unknown storage backend: %s' % k)
self._default_storage = cfg.get('storage', {}).get('defaultBackend', 'file')
self._storage_conf = self._storage_confs.get(self._default_storage)
if not self._storage_conf:
self.cout.writeln('[Warning] Unknown storage backend: %s' % self._default_storage)
self._encoding = cfg.get('encoding', 'utf-8')
self._mime_types = self._global_config.mime_types
self._raw_associations = self._global_config.associations
self._site_path = self._name
self._hosturl = self._global_config.host_url
self._server_module_name = self._global_config.server_module_name
self._app_spec = AppConfig()
self._app_spec.update(cfg.get('appProp', {}))
self._annotations = cfg.get('anno', {})
self._deprecated = cfg.get('deprecated', False)
self._manifest = cfg
self._lock_wait_limit = cfg.get('lockWaitLimit', 60)
self._facilities_conf = cfg.get('facilities', {})
self._backend_conf = cfg.get(u'facilityBackends', {})
def _read_config(self):
app_dir = self._group._make_super_root(join(self._group.path, self.name)).start()
try:
cfg = ManifestReader(app_dir.create(u'reads'), u'/app-manifest.xjson', u'/app-manifest', self.default_conf()).read()
except Exception, e:
self._system.i18n.write(u'Failed to parse JSON: $path\n$error', path=f.path, error=error_to_ustr(e))
raise
manifest_type = self._system._casm._core.schema_finder.get_type('AppManifest')
manifest_type.validate(cfg)
cfg = manifest_type.fill_default(cfg)
ft = app_dir.create(u'reads').open('/_filetypes.xjson')
if ft.exists:
try:
ft = xjson.loads(ft.read())
except Exception, e:
self.i18n.write(u'Failed to parse JSON: $path\n$error', path=ft.path, error=error_to_ustr(e))
else:
ft = {}
if 'filetypes' in cfg:
cfg['filetypes'] = merge_dict(cfg['filetypes'], ft)
else:
if ft:
cfg['filetypes'] = ft
if 'missingSlash' not in cfg:
cfg['missingSlash'] = u'redirect'
return cfg
def _init_msg(self):
try:
msg = self._load_messages()
self.i18n = self._system.i18n.extend(msg)
except Exception, e:
self.i18n = self._system.i18n
def _load_messages(self):
default_file = self._messages_fs.open('/default.xjson')
try:
base = xjson.loads(default_file.read()) if default_file.exists else []
except Exception, e:
self.i18n.write(u'Failed to parse JSON: $path\n$error', path='/default.xjson', error=error_to_ustr(e))
raise
msg = {}
for m in base:
msg[m] = {}
languages = set([])
for f in self._messages_fs.opendir('/').read():
if not (f.path.endswith('.xjson') and f.path != '/default.xjson'):
continue
try:
messages = xjson.loads(f.read())
except Exception, e:
self.i18n.write(u'Failed to parse JSON: $path\n$error', path=path, error=error_to_ustr(e))
raise
lang = f.path.split('/')[-1].split('.')[0]
languages.add(lang)
for e_msg, trans in messages.items():
if e_msg not in msg:
if self._system.debug:
print '[DEBUG]', self._system.i18n.get('this message is not contained default.xjson: $message($lang)', message=e_msg, lang=lang)
msg[e_msg] = {lang: trans}
else:
msg[e_msg][lang] = trans
for k, v in msg.items():
for l in languages:
if l not in v and self._system.debug:
print '[DEBUG]', self._system.i18n.get('this message is not translated: $message($lang)', message=k, lang=l)
return msg
def default_conf(self):
return {
'disabled': False,
'description': u'%s' % self.name,
'implLang': u"python",
'assign': {
'pub': u'pub',
'include': u'include',
'actions': u'actions',
'commands': u'commands',
'scripts': u'scripts',
'schemata': u'schemata',
'behaviors': u'behaviors',
'data': u'data',
'messages': u'messages',
},
}
def _verify_config(self, obj):
return merge_dict(obj, self.default_conf(), 'pre')
def _init_appdirs(self):
app_mafs = self._group._make_super_root(join(self._group.path, self._path)).start()
app_dir = app_mafs.create(u'uses')
generated = []
for k in self._assgin.values() + ['lib']:
d = app_dir.opendir('/' + k)
if not d.exists:
self.i18n.write("$location does not exists in $app, auto-generating", location=k, app=self.name)
d.create()
generated.append(k)
app_mafs.commit()
def _init_filetype(self):
defined_assoc = set()
for k, v in self._filetypes.items():
assoc = v.get('assoc', None)
if assoc:
if '|' in k:
patterns = k.split('|')
else:
patterns = [k]
for p in patterns:
if p in defined_assoc:
self._raw_associations[p].update(assoc)
else:
self._raw_associations[p] = assoc
defined_assoc.add(p)
it = v.get('isText', True)
ct = v.get('contentType', u'text/plain' if it else u'application/octet-stream')
ds = v.get('description', u'')
t = {
'isText': it,
'contentType': ct,
'description': ds,
}
keys = k.split('|')
for a in keys:
self._mime_types[a] = t
def _init_mafs(self):
root = join(self._group.path, self._path)
assign = self._assgin
mafs_init = lambda type, path: self._global_config.mafs_initializer(self, self._system, type)(root, path, self._mime_types, self._encoding)
self._pub = mafs_init('pub', assign['pub'])
self._include = mafs_init('include', assign['include'])
self._actions = mafs_init('actions', assign['actions'])
self._data = mafs_init('data', assign['data'])
self._behaviors = mafs_init('behaviors', assign['behaviors'])
self._command_fs = mafs_init('commands', assign['commands']).start().create(u'reads')
self._schema_fs = mafs_init('schemata', assign['schemata']).start().create('reads')
self._lib_fs = mafs_init('lib', 'lib').start().create('reads')
self._messages_fs = mafs_init('messages', assign['messages']).start().create(u'reads')
# スクリプトファイル
_scripts = mafs_init('scripts', assign['scripts'])
if _scripts:
self._scripts = _scripts
else:
self._scripts = FakeFacility()
def update_filetypes(self, filetypes):
if filetypes:
for k, v in filetypes.items():
self._mime_types[k] = v
def _init_schema(self):
if self._no_ambient:
self._schema_module = self._system.casm.make_blank_module(self)
else:
self._schema_module = self._system.casm.make_app_module(self)
#self._schema_module.compile()
def _init_interpreter(self):
self._interpreter = script.initialize(self._schema_module, self, self._system)
def _init_session(self):
self._session_storage = self._global_config.session.storage
def _init_storage(self):
self._storage = storage.initialize(self._storage_conf)
def _init_vcs(self):
if self._global_config.vcs_module:
self._vcs = self._global_config.vcs_module.VCSClient
else:
self._vcs = BaseClient
def _init_memory(self):
if not self._initialized:
self._memory = AppMemory()
def _init_log(self):
if not self._initialized:
for tp in LOG_TYPES:
logger.init(self, tp)
def facility_name_conflicted(self, name):
if name in self._master_entities:
raise Exception(self.i18n.get("Facility | |
a, b = t
return a + b
with self.assertRaisesRegexWithHighlight(RuntimeError, "Provided tuple is not fully defined/refined", "t"):
s = torch.jit.script(fn)
def test_augmented_assign(self):
def foo(a, b):
a += b
a -= b
a /= b
a *= b
return a, b
self.checkScript(foo, (torch.rand(3), torch.rand(3)))
def test_ignored_props(self):
class A(nn.Module):
__jit_ignored_attributes__ = ["ignored", "ignored_return_val"]
def __init__(self):
super().__init__()
@property
def ignored(self):
raise ValueError("shouldn't be called")
@property
def ignored_return_val(self):
return 1
@torch.jit.ignore
def call(self):
return self.ignored_return_val
f = torch.jit.script(A())
# jank way to test if there is no error
self.assertTrue(isinstance(f, torch.jit.ScriptModule))
self.assertTrue(isinstance(f.call(), property))
def test_pass(self):
def foo(x):
# type: (bool) -> int
for _i in range(3):
pass
if x:
pass
else:
pass
return 3
self.checkScript(foo, (True,))
def test_lhs_indexing(self):
def foo(a, b):
a = a.clone()
a[0] = b
return a
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_advanced_indexing_assignment(self):
def foo(x, y):
a = torch.exp(x)
b = x == 1
a[b] = y[b]
return a
self.checkScript(foo, (torch.ones(4, 3), torch.ones(4, 3)))
def test_lhs_advanced_indexing_augmented_assignment(self):
def foo(x, y):
a = torch.exp(x)
b = x == 1
a[b] += y[b]
return a
self.checkScript(foo, (torch.ones(4, 3), torch.ones(4, 3)))
def test_lhs_indexing_list(self):
def foo(a, b):
ls = [a]
ls[0] = b
return ls
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_inplace_copy_script(self):
def foo(x):
a = torch.rand(3, 4)
a.copy_(x)
return a
self.checkScript(foo, (torch.rand(3, 4),))
def test_lhs_indexing_increment(self):
def foo(a, b):
a[0] += b
return a
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_indexing_increment_list(self):
def foo(a, b):
a = a.clone()
ls = [a, b]
ls[0] += b
return ls
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_lhs_indexing_increment_list_prim(self):
def foo():
ls = [1, 2, 3]
ls[0] += 5
return ls
self.checkScript(foo, ())
def test_lhs_indexing_multi(self):
def foo(a, b):
a = a.clone()
foo, a[0], bar = (1, b, 3)
return foo, a, bar
self.checkScript(foo, (torch.rand(2, 3), torch.rand(3)))
def test_bool_dispatch(self):
with torch._jit_internal._disable_emit_hooks(): # TODO: Python print broadcasting list
def kwarg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, return_indices=False)
self.checkScript(kwarg_false, (torch.randn(3, 3, 3),))
def kwarg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, return_indices=True)
self.checkScript(kwarg_true, (torch.randn(3, 3, 3),))
def full_kwarg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, ceil_mode=False, return_indices=False)
self.checkScript(full_kwarg_false, (torch.randn(3, 3, 3),))
def full_kwarg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, ceil_mode=False, return_indices=True)
self.checkScript(full_kwarg_true, (torch.randn(3, 3, 3),))
def use_default(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1)
self.checkScript(use_default, (torch.randn(3, 3, 3),))
def arg_false(x):
# type: (Tensor) -> Tensor
return F.max_pool1d(x, 1, 1, 0, 1, False, False)
self.checkScript(arg_false, (torch.randn(3, 3, 3),))
def arg_true(x):
# type: (Tensor) -> Tuple[Tensor, Tensor]
return F.max_pool1d(x, 1, 1, 0, 1, False, True)
self.checkScript(arg_true, (torch.randn(3, 3, 3),))
def test_infer_size(self):
from torch._C import _infer_size
def fn(x, y):
# type: (Tensor, Tensor) -> List[int]
return _infer_size(x.size(), y.size())
self.checkScript(fn, (torch.ones(2, 4, 2), torch.ones(2, 4, 2)))
def test_hash(self):
def tester(fn, inputs):
for x in inputs:
for y in inputs:
if x == y:
self.assertEqual(fn(x), fn(y))
else:
self.assertNotEqual(fn(x), fn(y))
@torch.jit.script
def int_hash(x):
# type: (int) -> int
return hash(x)
@torch.jit.script
def float_hash(x):
# type: (float) -> int
return hash(x)
@torch.jit.script
def str_hash(x):
# type: (str) -> int
return hash(x)
tester(int_hash, (20, 21, 22))
tester(float_hash, (20.0, 21.00001, 22.443))
tester(str_hash, ("", "hello", "a"))
def test_id(self):
with self.assertRaisesRegex(RuntimeError, "Expected a value"):
@torch.jit.script
def test_id_scalars():
return id(2) == id(None)
@torch.jit.script
class FooTest(object):
def __init__(self, x):
self.foo = x
def getFooTest(self):
return self.foo
@torch.jit.script
def test_id_class_types():
obj1 = FooTest(torch.tensor(3))
obj2 = FooTest(torch.tensor(2))
assert obj1 is not obj2
assert id(obj1) != id(obj2)
assert id(obj1) != id(None)
return True
self.assertTrue(test_id_class_types())
def test_mutable_dce(self):
@torch.jit.script
def foo():
a = torch.rand(2, 3)
a += torch.rand(2, 3)
b = torch.rand(2, 3)
b += torch.rand(2, 3)
# b should be cleaned up but not a
return a
FileCheck().check_count("aten::rand", 2, exactly=True) \
.check_count("aten::add", 1, exactly=True).run(str(foo.graph))
def test_mutable_dce_block(self):
@torch.jit.script
def foo():
a = torch.rand(2, 3)
a += torch.rand(2, 3)
b = torch.rand(2, 3)
if bool(a > torch.zeros(2, 3)):
b += torch.rand(2, 3)
a += torch.rand(2, 3)
# a should be cleaned up but not b
return b
FileCheck().check("prim::If").check_count("aten::rand", 1, exactly=True) \
.run(str(foo.graph))
def test_mutable_dce_graph_input(self):
@torch.jit.script
def foo(a):
a += torch.rand(2, 3)
# shouldn't clean up `a` even though it's not used in the output
FileCheck().check("aten::rand").check("aten::add").run(str(foo.graph))
def test_mutable_dce_list(self):
@torch.jit.script
def foo(a):
l = []
l.append(a)
c = l[0]
b = torch.rand(2, 3)
c += torch.rand(2, 3)
return b
# c does not get cleaned up because there is a wildcard + mutation
FileCheck().check_count("aten::rand", 2, exactly=True).run(str(foo.graph))
def test_mutable_dce_loop(self):
@torch.jit.script
def foo(a):
l = []
l.append(a)
i = 0
b = torch.rand(2, 3)
while i < 1:
dead = torch.rand(2, 3)
c = l[0]
c += torch.rand(2, 3)
i += 1
return b
FileCheck().check("prim::Loop").check_not("aten::rand").check("aten::__getitem__") \
.check_count("aten::rand", 1, exactly=True).run(str(foo.graph))
def test_mutable_dce_indirect_wildcards(self):
def fn():
x = torch.ones(2, 3)
x_1 = x.view(-1)
l = []
l.append(x_1)
x_view = l[0]
x.add_(torch.ones(2, 3))
return x_view
self.checkScript(fn, ())
def test_mutable_dce_indirect_wildcard_write(self):
def fn():
indexes = torch.jit.annotate(List[Tensor], [])
word_ids = torch.zeros(10, dtype=torch.int32)
word_ids[1] = 1
indexes.append(word_ids)
return word_ids
self.checkScript(fn, ())
def test_mutable_dce_wildcards(self):
def fn():
x = torch.ones(2, 3)
l = []
l.append(x)
x_view = l[0]
x.add_(torch.ones(2, 3))
return x_view
self.checkScript(fn, (), profiling=ProfilingMode.SIMPLE)
def test_cpp_function_tensor_str(self):
x = torch.randn(2, 2)
scale = torch.randn(2, 2, requires_grad=True)
shift = torch.randn(2, 2, requires_grad=True)
@torch.jit.script
def fn(x, scale, shift):
return scale * x + shift
with self.capture_stdout() as captured:
print(fn(x, scale, shift))
def test_string_index(self):
def fn(x):
# type: (str)
return x[2], x[-1]
self.checkScript(fn, ("abcde",))
def test_ord(self):
def fn(x):
# type: (str) -> int
return ord(x)
self.checkScript(fn, ("h"))
self.checkScript(fn, ("y"))
def index_str_to_tensor(s):
# type: (str) -> Tensor
return torch.tensor(ord(s)) # noqa: T484
s = u'\u00a3'.encode('utf8')[:1]
self.checkScript(index_str_to_tensor, (s,))
def test_chr(self):
def fn(x):
# type: (int) -> str
return chr(x)
self.checkScript(fn, (1,))
self.checkScript(fn, (97,))
def test_round(self):
def round_float(x):
# type: (float) -> float
return round(x)
def round_int(x):
# type: (int) -> float
return round(x)
self.checkScript(round_float, (1.5,))
self.checkScript(round_int, (2,))
def test_convert_base(self):
def test_hex(x):
# type: (int) -> str
return hex(x)
def test_oct(x):
# type: (int) -> str
return oct(x)
def test_bin(x):
# type: (int) -> str
return bin(x)
numbers = [-1000, -10, 0, 1, 10, 2343]
for n in numbers:
self.checkScript(test_bin, (n,))
self.checkScript(test_oct, (n,))
self.checkScript(test_hex, (n,))
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: TemporaryFileName support for Windows or Sandcastle")
def test_get_set_state(self):
class Root(torch.jit.ScriptModule):
__constants__ = ['number']
def __init__(self, number):
super(Root, self).__init__()
self.register_buffer('buffer1', torch.ones(2, 2))
self.register_buffer('buffer2', torch.ones(2, 2))
self.number = number
@torch.jit.script_method
def __getstate__(self):
return (self.buffer1, self.buffer2, 74, self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.buffer1 = state[0] + 10
self.buffer2 = state[1] + 10
self.training = state[3]
class M(torch.jit.ScriptModule):
__constants__ = ['number']
def __init__(self, number, submodule):
super(M, self).__init__()
self.register_buffer('buffer1', torch.ones(2, 2))
self.register_buffer('buffer2', torch.ones(2, 2))
self.number = number
self.submodule = submodule
@torch.jit.script_method
def __getstate__(self):
return (self.buffer1, self.buffer2, 74, self.submodule, self.training)
@torch.jit.script_method
def __setstate__(self, state):
self.buffer1 = state[0] + 10
self.buffer2 = state[1] + 10
self.submodule = state[3]
self.training = state[4]
with TemporaryFileName() as fname:
m = M(23, submodule=Root(99))
m.save(fname)
loaded = torch.jit.load(fname)
# Check original module
self.assertEqual(m.buffer1, torch.ones(2, 2))
self.assertEqual(m.buffer2, torch.ones(2, 2))
# Check top level module
self.assertEqual(loaded.buffer1, torch.ones(2, 2) + 10)
self.assertEqual(loaded.buffer2, torch.ones(2, 2) + 10)
# Check submodule
self.assertEqual(loaded.submodule.buffer1, torch.ones(2, 2) + 10)
self.assertEqual(loaded.submodule.buffer2, torch.ones(2, 2) + 10)
# Check simpler module
class NoArgState(torch.nn.Module):
def __init__(self):
super(NoArgState, self).__init__()
self.register_buffer('buffer1', torch.ones(2, 2))
self.register_buffer('buffer2', torch.ones(2, 2))
def forward(self):
pass
@torch.jit.export
def __getstate__(self):
return 5, self.training
@torch.jit.export
def __setstate__(self, state):
self.buffer1 = torch.ones(2, 2) + state[0]
self.buffer2 = torch.ones(2, 2) + 10
self.training = state[1]
with TemporaryFileName() as fname:
m = torch.jit.script(NoArgState())
m.save(fname)
loaded = torch.jit.load(fname)
self.assertEqual(loaded.buffer1, torch.ones(2, 2) + 5)
self.assertEqual(loaded.buffer2, torch.ones(2, 2) + 10)
def test_string_slicing(self):
def fn1(x):
# type: (str) -> str
return x[1:3]
def fn2(x):
# type: (str) -> str
return x[-1:3]
def fn3(x):
# type: (str) -> str
return x[3:1]
def fn4(x):
# type: (str) -> str
return x[3:100]
self.checkScript(fn1, ("abcdefghi",))
self.checkScript(fn2, ("abcdefghi",))
self.checkScript(fn3, ("abcdefghi",))
self.checkScript(fn4, ("abcdefghi",))
def test_early_return_closure(self):
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
pass
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
g = cu.tanh.graph
FileCheck().check_count("prim::Closure_0", 2).check("NoneType = prim::Constant") \
.check_next("return").run(g)
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
a = | |
from entity import *
# instruction centric modeling
# instruction centric checking
# parameterized model
"""
state
d w t : r
d w : L1 L1.fifo.head L1.fifo.tail lock (single)
d : L2 L2.fifo.head L2.fifo.tail lock (addr-> lock)
"""
# make sure you only read once
"""
state(d).L2.isL2()
L2fr x,entity(d) state(d).L2(x).fr
L2hy x,entity(d) state(d).L2(x).hy
L2val x,entity(d) state(d).L2(x).value
L2fifohead entity(d) state(d).L2.fifo.head
L2fifotail entity(d) state(d).L2.fifo.tail
lockTd x,entity(d) state(d).lockfile(x).ready(d,w,t) := lock(d,w,t)
lockTw x,entity(d)
lockTt x,entity(d)
locked x,entity(d)
L1fr x,entity(d,w) state(d,w).L1(x).fr
L1hy x,entity(d,w) state(d,w).L1(x).hy
L1val x, entity(d,w) state(d,w).L1(x).value __call__
L1fifohead entity(d,w) state(d,w).L1.fifo.head
L1fifotail entity(d,w) state(d,w).L1.fifo.tail
state(d,w).L1.isL1()
rmwTd entity(d,w)
rmwTw entity(d,w)
rmwTt entity(d,w)
rmwed entity(d,w)
r r,entity(d,w,t)
forallDev( lambda )
forallWg( d , lambda )
rlViewInfo =
( None, d, w , num ) : marker : L1 flush
( None, d, None, num ) : marker : L2 flush
( x , d, None, num ) : L2 x
( x , d, w , num ) : L1 x
"""
DIRTY = True
CLEAN = False
VALID = True
INVALID = False
FLUSH_MARKER = None
def newVariable(name, varname): # it uses name, is it okay?
stateName = removeEntity(name)
if stateName in BoolValState:
return z3.Bool(varname)
if stateName in BVState:
return z3.Int(varname)
assert False
def L1Enqueue(state, inst):
inst.setUpdateToState( 'L1fifohead', state('L1fifohead') + 1 )
def L1Dequeue(state, inst):
inst.setUpdateToState( 'L1fifotail', state('L1fifotail') + 1 )
def L2Enqueue(state, inst):
inst.setUpdateToState( 'L2fifohead', state('L2fifohead') + 1)
def L2Dequeue(state, inst):
inst.setUpdateToState( 'L2fifotail', state('L2fifotail') + 1 )
def lockRMW(inst,state, entity = None): # you can overwrite overWriteVA to write to a larger group!
inst.setUpdateToState( 'rmwed' , True , entity = entity)
inst.setUpdateToState( 'rmwTd' , inst.Eid.d , entity = entity)
inst.setUpdateToState( 'rmwTw' , inst.Eid.w , entity = entity)
inst.setUpdateToState( 'rmwTt' , inst.Eid.t , entity = entity)
def unlockRMW(inst,state, entity = None):
inst.setUpdateToState( 'rmwed' , False , entity = entity)
def readyRMW(state, fullEntity, entity = None):
assert not fullEntity.anyd and not fullEntity.anyw and not fullEntity.anyt
return z3.Or( z3.Not( state('rmwed' , entity = entity) ),
z3.And( [
state('rmwTd' , entity = entity) == fullEntity.d ,
state('rmwTw' , entity = entity) == fullEntity.w ,
state('rmwTt' , entity = entity) == fullEntity.t ] ) )
def locklockfile(inst,x,state):
inst.setUpdateToState( 'locked' , True , addr = x)
inst.setUpdateToState( 'lockTd' , inst.Eid.d , addr = x)
inst.setUpdateToState( 'lockTw' , inst.Eid.w , addr = x)
inst.setUpdateToState( 'lockTt' , inst.Eid.t , addr = x)
def unlocklockfile(inst,x,state):
inst.setUpdateToState( 'locked' , False , addr = x)
def readylockfile(x, state , fullEntity):
assert not fullEntity.anyd and not fullEntity.anyw and not fullEntity.anyt
return z3.Or( z3.Not( state('locked', addr = x) ),
z3.And( [
state('lockTd', addr = x) == fullEntity.d ,
state('lockTw', addr = x) == fullEntity.w ,
state('lockTt', addr = x) == fullEntity.t ] ) )
def storeL1( x ,v , inst , state):
inst.setUpdateToState( 'L1val' , v , addr = x)
inst.setUpdateToState( 'L1fr' , VALID , addr = x)
inst.setUpdateToState( 'L1hy' , DIRTY , addr = x)
L1Enqueue(state, inst)
def storeL2( x , v , inst, state ):
inst.setUpdateToState( 'L2val' , v , addr = x)
inst.setUpdateToState( 'L2fr' , VALID , addr = x)
inst.setUpdateToState( 'L2hy' , DIRTY , addr = x)
L2Enqueue(state, inst)
# TODO: create dequeue child must need
# the record info must be read at the time of the instruction
# invlidate forall x
#------------------------------
# May have env trans
def inst_LD(x, r, Eid, state, **useless):
# TODO: create rd ?
# No : may not need
inst = Instruction('LD', state('L1fr',addr = x) == VALID )
inst.setEntity( Eid )
inst.setUpdateToState( 'r' , state('L1val', addr = x), addr = r )
inst.recordAddr( x )
return inst
# has env trans
def inst_ST(x, r, Eid, state, **useless):
inst = Instruction('ST', True )
inst.setEntity( Eid )
storeL1( x, state('r' , addr = r ) , inst , state = state) # TODO: create dequeue child must need
#store( state(d,w).L1, x, state(d,w,t).r(r) , inst )
inst.recordAddr( x )
return inst
# has env trans
def inst_INC_L1(x, r , Eid,state, **useless):
inst = Instruction('INC_L1' , z3.And( readyRMW(state, Eid) , state('L1fr', addr = x) == VALID ) )
inst.setEntity( Eid )
inst.setUpdateToState( 'r', state('L1val', addr = x ), addr = r )
storeL1( x, state('L1val', addr = x) + 1, inst , state = state)
inst.recordAddr( x )
return inst
# has env trans
def inst_INC_L2(x, r , Eid, state , num_dev, **useless): # will need to give a function
decode = z3.And([ \
readyRMW(state, Eid),
state('L1hy', addr = x) != DIRTY,
readylockfile(x, state, Eid),
state('L2fr', addr = x) == VALID
])
inst = Instruction('INC_L2' , decode )
inst.setEntity( Eid )
inst.setUpdateToState( 'L1fr', INVALID, addr = x) #invalidate( state(d,w).L1, inst )
inst.setUpdateToState( 'r', state('L2val', addr = x ) , addr = x )
for devIdx in range(num_dev):
if devIdx == Eid.d: continue
inst.setUpdateToState( 'L2fr', INVALID, addr = x, entity = entity( devIdx ) )
storeL2( x, state('L2val', addr = x) + 1 , inst , state = state) # contains L2fr
inst.recordAddr( x )
return inst
# add unflush block by axioms (?? or states??)
# has env trans
def inst_FLU_L1_WG(Eid, state, **useless):
inst = Instruction('FLU_L1_WG' , True )
inst.setEntity( Eid )
L1Enqueue(state,inst)
return inst
def inst_FLU_L1_DV(Eid, state, num_wg, **useless):
inst = Instruction('FLU_L1_DV', True )
inst.setEntity( Eid )
for wgId in range(num_wg):
inst.setUpdateToState('L1fifohead',
state('L1fifohead', entity = entity(Eid.d,wgId) ) + 1 , entity = entity(Eid.d, wgId) )
# for all wg in the same device, enqueue a flush marker
return inst
def inst_FLU_L2_DV(Eid, state, **useless):
inst = Instruction('FLU_L2_DV', True )
inst.setEntity( Eid )
L2Enqueue(state, inst)
return inst
# ------------------------------
# the above need additional
# ------------------------------
# purly axiomatic
def inst_INV_L1_WG(Eid, state, **useless):
inst = Instruction('INV_L1_WG', True)
inst.setEntity( Eid )
inst.setUpdateToState( 'L1fr' , INVALID , addr = None ) # will not set addr = x
return inst
# purly axiomatic
def inst_INV_L1_DV(Eid, state, num_wg, **useless):
inst = Instruction('INV_L1_DV', True)
inst.setEntity( Eid )
for wgId in range(num_wg):
inst.setUpdateToState('L1fr', INVALID, addr = None , entity = entity(Eid.d, wgId) ) # will not set addr = x
return inst
# same work group should be interpreted as w == w' and d == d'
"""
Axiom INV_L1_WG
forall inv:INV_L1_WG | forall l:LOAD | SameWg[inv,l] /\ HB[inv,l] => HB[inv , l.fetch ]
??? l.fetch ???
Axiom INV_L1_DV
forall inv:INV_L1_DV | forall l:LOAD | SameDv[inv,l] /\ HB[inv,l] => HB[inv , l.fetch]
??? l.fetch ???
"""
"""
def inst_INV_L1_SY(d,w,t, state):
inst = Instruction('INV_L1_SY', )
inst.setEntity( entity(d,w,t) )
return inst
"""
def inst_LK_L2(x, Eid, state, **useless):
inst = Instruction('LK_L2', readylockfile(x, state , Eid) )
inst.setEntity( Eid )
locklockfile(inst,x,state)
inst.recordAddr( x )
return inst
def inst_UL_L2(x, Eid, state, **useless):
inst = Instruction('UL_L2', True )
inst.setEntity( Eid )
unlocklockfile(inst,x,state)
inst.recordAddr( x )
return inst
def inst_LK_rmw_DV(Eid, state, num_wg, **useless):
decode = z3.And(
[ readyRMW( state , Eid, entity = entity(Eid.d, wgId) ) \
for wgId in range(num_wg) ] )
inst = Instruction('LK_rmw_DV', decode )
inst.setEntity( Eid )
for wgId in range(num_wg):
lockRMW(inst, state, entity = entity(Eid.d, wgId) ) # not including w
#forallWg( d, lambda w2: inst.setUpdateToState( state(d,w2).rmw , lock(d,w,t) ) )
return inst
def inst_UL_rmw_DV(Eid, state, num_wg, **useless):
inst = Instruction('UL_rmw_DV', True )
inst.setEntity( Eid )
for wgId in range(num_wg):
unlockRMW(inst, state, entity = entity(Eid.d, wgId) )
return inst
# decode may need to chain
# instantiate the above instructions and give the state()
# need to take care of time and value. maybe register the pi_var?
# try not to model evict!
def ev_FLUSH_L1(x,Eid,state, num_dev, **useless):
inst = Instruction('FLUSH_L1', z3.And( state('L1hy', addr = x) == DIRTY, readylockfile(x, state, Eid) ) )
inst.setEntity( Eid )
for devIdx in range(num_dev):
if devIdx == Eid.d: continue
inst.setUpdateToState( 'L2fr' , INVALID ,addr = x, entity = entity(devIdx) ) # write to all device at x
#forallDev(lambda d2: if d2 != d : inst.setUpdateToState( state(d2).L2(x).fr , INVALID ) )
storeL2(x , state('L1val', addr = x) , inst , state = state)
#store( state(d).L2, x, state(d,w).L1(x).value , inst ) # but one for L2fr
inst.setUpdateToState( 'L1hy', CLEAN, addr = x )
#inst.setUpdateToState( state(d,w).L1(x).hy , CLEAN ) #
inst.recordAddr( x )
inst.tp = 'gpuEnvTrans'
return inst
"""
def ev_FLUSH_L2(x, d,w,t, state):
inst = Instruction('FLUSH_L2', z3.And( state(d).L2(x).hy == DIRTY , state(d).lockfile(x).ready(d,w,t) ) )
"""
# we need to make L2 all valid for the same dv?
def ev_FETCH_L1(x, Eid, state, **useless):
decode = z3.And( [ state('L1hy', addr = x) == | |
(using link MTU)
IP primary address route-preference: 0, tag: 0
IP unnumbered interface (loopback0)
IP proxy ARP : disabled
IP Local Proxy ARP : disabled
IP multicast routing: disabled
IP icmp redirects: disabled
IP directed-broadcast: disabled
IP Forwarding: disabled
IP icmp unreachables (except port): disabled
IP icmp port-unreachable: enabled
IP unicast reverse path forwarding: none
IP load sharing: none
IP interface statistics last reset: never
IP interface software stats: (sent/received/forwarded/originated/consumed)
Unicast packets : 0/0/0/0/0
Unicast bytes : 0/0/0/0/0
Multicast packets : 0/0/0/0/0
Multicast bytes : 0/0/0/0/0
Broadcast packets : 0/0/0/0/0
Broadcast bytes : 0/0/0/0/0
Labeled packets : 0/0/0/0/0
Labeled bytes : 0/0/0/0/0
WCCP Redirect outbound: disabled
WCCP Redirect inbound: disabled
WCCP Redirect exclude: disabled
'''
ShowIpInterfaceVrfAll_vrf1_eth2='''
IP Interface Status for VRF "VRF1"
Ethernet2/1, Interface status: protocol-up/link-up/admin-up, iod: 36,
IP address: 10.4.4.4, IP subnet: 10.4.4.0/24 secondary
IP address: 10.2.2.2, IP subnet: 10.2.2.0/24 secondary
IP address: 10.3.3.3, IP subnet: 10.3.3.0/24 secondary
IP broadcast address: 255.255.255.255
IP multicast groups locally joined:
172.16.58.3 172.16.31.10 172.16.17.32
IP MTU: 1600 bytes (using link MTU)
IP primary address route-preference: 0, tag: 0
IP unnumbered interface (loopback0)
IP proxy ARP : disabled
IP Local Proxy ARP : disabled
IP multicast routing: disabled
IP icmp redirects: disabled
IP directed-broadcast: disabled
IP Forwarding: disabled
IP icmp unreachables (except port): disabled
IP icmp port-unreachable: enabled
IP unicast reverse path forwarding: none
IP load sharing: none
IP interface statistics last reset: never
IP interface software stats: (sent/received/forwarded/originated/consumed)
Unicast packets : 0/0/0/0/0
Unicast bytes : 0/0/0/0/0
Multicast packets : 0/0/0/0/0
Multicast bytes : 0/0/0/0/0
Broadcast packets : 0/0/0/0/0
Broadcast bytes : 0/0/0/0/0
Labeled packets : 0/0/0/0/0
Labeled bytes : 0/0/0/0/0
WCCP Redirect outbound: disabled
WCCP Redirect inbound: disabled
WCCP Redirect exclude: disabled
'''
ShowVrfAllInterface = {
'Ethernet2/1': {'site_of_origin': '--', 'vrf': 'VRF1', 'vrf_id': 3},
'Ethernet2/1.10': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/1.20': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/10': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/11': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/12': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/13': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/14': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/15': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/16': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/17': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/18': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/19': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/20': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/21': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/22': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/23': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/24': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/25': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/26': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/27': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/28': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/29': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/30': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/31': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/32': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/33': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/34': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/35': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/36': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/37': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/38': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/39': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/4': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/40': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/41': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/42': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/43': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/44': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/45': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/46': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/47': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/48': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/5': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/6': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/7': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/8': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet2/9': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/1': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/10': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/11': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/12': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/13': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/14': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/15': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/16': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/17': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/18': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/19': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/2': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/20': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/21': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/22': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/23': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/24': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/25': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/26': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/27': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/28': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/29': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/3': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/30': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/31': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/32': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/33': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/34': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/35': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/36': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/37': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/38': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/39': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/4': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/40': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/41': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/42': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/43': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/44': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/45': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/46': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/47': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/48': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/5': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/6': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/7': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/8': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet3/9': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/1': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/10': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/11': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/12': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/13': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/14': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/15': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/16': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/17': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/18': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/19': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/2': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/20': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/21': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/22': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/23': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/24': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/25': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/26': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/27': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/28': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/29': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/3': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/30': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/31': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/32': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/33': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/34': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/35': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/36': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/37': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/38': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/39': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/4': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/40': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/41': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/42': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/43': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/44': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/45': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/46': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/47': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/48': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/5': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/6': {'site_of_origin': '--', 'vrf': 'default', 'vrf_id': 1},
'Ethernet4/7': {'site_of_origin': '--', | |
being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters:
-----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = 0
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
for i in range(0, minutes_count):
bts = struct.pack("@iiiii", open_col[i], close_col[i], high_col[i], low_col[i], vol_col[i])
table.put(b'default', struct.pack('>i',int(time.mktime(all_minutes_in_window[i].timetuple()))), bts)
self.db.close()
del self.db
self.db = None
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*.db")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
class RocksdbMinuteBarReader(BcolzMinuteBarReader):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters:
-----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ('open', 'close', 'high', 'low', 'volume')
COL_NAMES_BYTE_ALL = (b'default', b'open', b'high', b'low', b'close', b'volume')
dbs = None
FIELD_VAL_SIZE = 4 #int struct pack value size
def __init__(self, rootdir, sid_cache_size=1000, realtime = False):
self._rootdir = rootdir
self._realtime = realtime
self.dbs = dict()
metadata = self._get_metadata()
size = len(struct.pack("@i", 1))
if size != self.FIELD_VAL_SIZE:
self.FIELD_VAL_SIZE = size
self.FIELD_MAP = dict()
for i, val in enumerate(self.FIELDS):
self.FIELD_MAP[val] = i
self._start_session = metadata.start_session
self._end_session = metadata.end_session
self.calendar = metadata.calendar
tz_offset = self.calendar.tz._utcoffset.seconds
self.tz_utcoffset_seconds = tz_offset - (tz_offset % 3600) # hours
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
self._schedule = self.calendar.schedule[slicer]
self._market_opens = self._schedule.market_open
self._market_open_values = self._market_opens.values. \
astype('datetime64[m]').astype(np.int64)
self._market_closes = self._schedule.market_close
self._market_close_values = self._market_closes.values. \
astype('datetime64[m]').astype(np.int64)
self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio
ohlc_ratios = metadata.ohlc_ratios_per_sid
if ohlc_ratios:
self._ohlc_inverses_per_sid = (
valmap(lambda x: 1.0 / x, ohlc_ratios))
else:
self._ohlc_inverses_per_sid = None
self._minutes_per_day = metadata.minutes_per_day
self._carrays = {
field: LRU(sid_cache_size)
for field in self.FIELDS
}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
# This is to avoid any bad data or other performance-killing situation
# where there a consecutive streak of 0 (no volume) starting at an
# asset's start date.
# if asset 1 started on 2015-01-03 but its first trade is 2015-01-06
# 10:31 AM US/Eastern, this dict would store {1: 23675971},
# which is the minute epoch of that date.
self._known_zero_volume_dict = {}
def __del__(self):
if self.dbs is not None and len(self.dbs) > 0:
for db in self.dbs.values():
db.close()
del db
self.dbs.clear()
def _ohlc_ratio_inverse_for_sid(self, sid):
if self._ohlc_inverses_per_sid is not None:
try:
return self._ohlc_inverses_per_sid[sid]
except KeyError:
pass
# If we can not get a sid-specific OHLC inverse for this sid,
# fallback to the default.
return self._default_ohlc_inverse
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns:
--------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
minutes_per_day = self.calendar._minutes_per_session[slicer]
early_indices = np.where(
minutes_per_day != self._minutes_per_day )[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_field_value(self, sid, field):
sidpath = self.sidpath(sid)
db = self._open_db(sid, sidpath)
if field not in self.FIELD_MAP:
return None
it = db.iteritems(b'default')
it.seek_to_first()
dts = []
vals = []
local_tz = self.calendar.tz
for k, v in it:
#dts.append(datetime.datetime.fromtimestamp(struct.unpack(">i", k)[0]).replace(tzinfo=pytz.UTC).astimezone(local_tz))
dts.append(datetime.datetime.fromtimestamp(struct.unpack(">i", k)[0]))
vals.append(struct.unpack("@i", bytearray(v)[self.FIELD_MAP[field] * self.FIELD_VAL_SIZE:(self.FIELD_MAP[field]+1) * self.FIELD_VAL_SIZE]))
del it
self._close_db(sid, db)
items = {"dt": dts}
items[field] = vals
df = pd.DataFrame.from_dict(items)
df["dt"] = pd.to_datetime(df['dt'])
df.set_index(["dt"])
return df
def sidpath(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return os.path.join(self._rootdir, sid_subdir)
def _open_db(self, sid, path):
if not self._realtime:
if sid in self.dbs:
return self.dbs[sid]
cols = self.COL_NAMES_BYTE_ALL
opt = rocksdb.Options(create_if_missing=True, write_buffer_size=512 * 1024 * 1024, max_write_buffer_number=5,
min_write_buffer_number_to_merge=2, compression=rocksdb.CompressionType.lz4_compression)
db = rocksdb.DB(path, opt, cols, read_only=True)
if not self._realtime:
self.dbs[sid] = db
return db
def _close_db(self, sid, db):
if self._realtime:
db.close()
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
carray = self._carrays[field][sid] = self._get_field_value(sid, field)
return carray
def table_len(self, sid):
"""Returns the length of the underlying table for this sid."""
return len(self._open_minute_file('close', sid))
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters:
-----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns:
--------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
dt_value = int(dt.value / 10 ** 9)
if int(dt.tz._utcoffset.seconds / 3600) != int(self.tz_utcoffset_seconds / 3600):
dt_value -= self.tz_utcoffset_seconds
key = struct.pack(">i", dt_value)
path = | |
4
self.postSynaptic['I3'][self.nextState] += 2
self.postSynaptic['I4'][self.nextState] += 6
self.postSynaptic['I5'][self.nextState] += 3
self.postSynaptic['I6'][self.nextState] += 1
self.postSynaptic['M1'][self.nextState] += 2
self.postSynaptic['M3L'][self.nextState] += 1
self.postSynaptic['MCL'][self.nextState] += 1
self.postSynaptic['MCR'][self.nextState] += 1
self.postSynaptic['MI'][self.nextState] += 2
self.postSynaptic['NSML'][self.nextState] += 2
self.postSynaptic['NSMR'][self.nextState] += 3
def M4(self):
self.postSynaptic['I3'][self.nextState] += 1
self.postSynaptic['I5'][self.nextState] += 13
self.postSynaptic['I6'][self.nextState] += 3
self.postSynaptic['M2L'][self.nextState] += 1
self.postSynaptic['M2R'][self.nextState] += 1
self.postSynaptic['M4'][self.nextState] += 6
self.postSynaptic['M5'][self.nextState] += 1
self.postSynaptic['NSML'][self.nextState] += 1
self.postSynaptic['NSMR'][self.nextState] += 1
def M5(self):
self.postSynaptic['I5'][self.nextState] += 3
self.postSynaptic['I5'][self.nextState] += 1
self.postSynaptic['I6'][self.nextState] += 1
self.postSynaptic['M1'][self.nextState] += 2
self.postSynaptic['M2L'][self.nextState] += 2
self.postSynaptic['M2R'][self.nextState] += 2
self.postSynaptic['M5'][self.nextState] += 4
def MCL(self):
self.postSynaptic['I1L'][self.nextState] += 3
self.postSynaptic['I1R'][self.nextState] += 3
self.postSynaptic['I2L'][self.nextState] += 1
self.postSynaptic['I2R'][self.nextState] += 1
self.postSynaptic['I3'][self.nextState] += 1
self.postSynaptic['M1'][self.nextState] += 2
self.postSynaptic['M2L'][self.nextState] += 2
self.postSynaptic['M2R'][self.nextState] += 2
def MCR(self):
self.postSynaptic['I1L'][self.nextState] += 3
self.postSynaptic['I1R'][self.nextState] += 3
self.postSynaptic['I3'][self.nextState] += 1
self.postSynaptic['M1'][self.nextState] += 2
self.postSynaptic['M2L'][self.nextState] += 2
self.postSynaptic['M2R'][self.nextState] += 2
def MI(self):
self.postSynaptic['I1L'][self.nextState] += 1
self.postSynaptic['I1R'][self.nextState] += 1
self.postSynaptic['I3'][self.nextState] += 1
self.postSynaptic['I4'][self.nextState] += 1
self.postSynaptic['I5'][self.nextState] += 2
self.postSynaptic['M1'][self.nextState] += 1
self.postSynaptic['M2L'][self.nextState] += 2
self.postSynaptic['M2R'][self.nextState] += 2
self.postSynaptic['M3L'][self.nextState] += 1
self.postSynaptic['M3R'][self.nextState] += 1
self.postSynaptic['MCL'][self.nextState] += 2
self.postSynaptic['MCR'][self.nextState] += 2
def NSML(self):
self.postSynaptic['I1L'][self.nextState] += 1
self.postSynaptic['I1R'][self.nextState] += 2
self.postSynaptic['I2L'][self.nextState] += 6
self.postSynaptic['I2R'][self.nextState] += 6
self.postSynaptic['I3'][self.nextState] += 2
self.postSynaptic['I4'][self.nextState] += 3
self.postSynaptic['I5'][self.nextState] += 2
self.postSynaptic['I6'][self.nextState] += 2
self.postSynaptic['M3L'][self.nextState] += 2
self.postSynaptic['M3R'][self.nextState] += 2
def NSMR(self):
self.postSynaptic['I1L'][self.nextState] += 2
self.postSynaptic['I1R'][self.nextState] += 2
self.postSynaptic['I2L'][self.nextState] += 6
self.postSynaptic['I2R'][self.nextState] += 6
self.postSynaptic['I3'][self.nextState] += 2
self.postSynaptic['I4'][self.nextState] += 3
self.postSynaptic['I5'][self.nextState] += 2
self.postSynaptic['I6'][self.nextState] += 2
self.postSynaptic['M3L'][self.nextState] += 2
self.postSynaptic['M3R'][self.nextState] += 2
def OLLL(self):
self.postSynaptic['AVER'][self.nextState] += 21
self.postSynaptic['CEPDL'][self.nextState] += 3
self.postSynaptic['CEPVL'][self.nextState] += 4
self.postSynaptic['IL1DL'][self.nextState] += 1
self.postSynaptic['IL1VL'][self.nextState] += 2
self.postSynaptic['OLLR'][self.nextState] += 2
self.postSynaptic['RIBL'][self.nextState] += 8
self.postSynaptic['RIGL'][self.nextState] += 1
self.postSynaptic['RMDDL'][self.nextState] += 7
self.postSynaptic['RMDL'][self.nextState] += 2
self.postSynaptic['RMDVL'][self.nextState] += 1
self.postSynaptic['RMEL'][self.nextState] += 2
self.postSynaptic['SMDDL'][self.nextState] += 3
self.postSynaptic['SMDDR'][self.nextState] += 4
self.postSynaptic['SMDVR'][self.nextState] += 4
self.postSynaptic['URYDL'][self.nextState] += 1
def OLLR(self):
self.postSynaptic['AVEL'][self.nextState] += 16
self.postSynaptic['CEPDR'][self.nextState] += 1
self.postSynaptic['CEPVR'][self.nextState] += 6
self.postSynaptic['IL1DR'][self.nextState] += 3
self.postSynaptic['IL1VR'][self.nextState] += 1
self.postSynaptic['IL2R'][self.nextState] += 1
self.postSynaptic['OLLL'][self.nextState] += 2
self.postSynaptic['RIBR'][self.nextState] += 10
self.postSynaptic['RIGR'][self.nextState] += 1
self.postSynaptic['RMDDR'][self.nextState] += 10
self.postSynaptic['RMDL'][self.nextState] += 3
self.postSynaptic['RMDVR'][self.nextState] += 3
self.postSynaptic['RMER'][self.nextState] += 2
self.postSynaptic['SMDDR'][self.nextState] += 1
self.postSynaptic['SMDVL'][self.nextState] += 4
self.postSynaptic['SMDVR'][self.nextState] += 3
def OLQDL(self):
self.postSynaptic['CEPDL'][self.nextState] += 1
self.postSynaptic['RIBL'][self.nextState] += 2
self.postSynaptic['RICR'][self.nextState] += 1
self.postSynaptic['RIGL'][self.nextState] += 1
self.postSynaptic['RMDDR'][self.nextState] += 4
self.postSynaptic['RMDVL'][self.nextState] += 1
self.postSynaptic['SIBVL'][self.nextState] += 3
self.postSynaptic['URBL'][self.nextState] += 1
def OLQDR(self):
self.postSynaptic['CEPDR'][self.nextState] += 2
self.postSynaptic['RIBR'][self.nextState] += 2
self.postSynaptic['RICL'][self.nextState] += 1
self.postSynaptic['RICR'][self.nextState] += 1
self.postSynaptic['RIGR'][self.nextState] += 1
self.postSynaptic['RIH'][self.nextState] += 1
self.postSynaptic['RMDDL'][self.nextState] += 3
self.postSynaptic['RMDVR'][self.nextState] += 1
self.postSynaptic['RMHR'][self.nextState] += 1
self.postSynaptic['SIBVR'][self.nextState] += 2
self.postSynaptic['URBR'][self.nextState] += 1
def OLQVL(self):
self.postSynaptic['ADLL'][self.nextState] += 1
self.postSynaptic['CEPVL'][self.nextState] += 1
self.postSynaptic['IL1VL'][self.nextState] += 1
self.postSynaptic['IL2VL'][self.nextState] += 1
self.postSynaptic['RIBL'][self.nextState] += 1
self.postSynaptic['RICL'][self.nextState] += 1
self.postSynaptic['RIGL'][self.nextState] += 1
self.postSynaptic['RIH'][self.nextState] += 1
self.postSynaptic['RIPL'][self.nextState] += 1
self.postSynaptic['RMDDL'][self.nextState] += 1
self.postSynaptic['RMDVR'][self.nextState] += 4
self.postSynaptic['SIBDL'][self.nextState] += 3
self.postSynaptic['URBL'][self.nextState] += 1
def OLQVR(self):
self.postSynaptic['CEPVR'][self.nextState] += 1
self.postSynaptic['IL1VR'][self.nextState] += 1
self.postSynaptic['RIBR'][self.nextState] += 1
self.postSynaptic['RICR'][self.nextState] += 1
self.postSynaptic['RIGR'][self.nextState] += 1
self.postSynaptic['RIH'][self.nextState] += 2
self.postSynaptic['RIPR'][self.nextState] += 2
self.postSynaptic['RMDDR'][self.nextState] += 1
self.postSynaptic['RMDVL'][self.nextState] += 4
self.postSynaptic['RMER'][self.nextState] += 1
self.postSynaptic['SIBDR'][self.nextState] += 4
self.postSynaptic['URBR'][self.nextState] += 1
def PDA(self):
self.postSynaptic['AS11'][self.nextState] += 1
self.postSynaptic['DA9'][self.nextState] += 1
self.postSynaptic['DD6'][self.nextState] += 1
self.postSynaptic['MDL21'][self.nextState] += 2
self.postSynaptic['PVNR'][self.nextState] += 1
self.postSynaptic['VD13'][self.nextState] += 3
def PDB(self):
self.postSynaptic['AS11'][self.nextState] += 2
self.postSynaptic['MVL22'][self.nextState] += 1
self.postSynaptic['MVR21'][self.nextState] += 1
self.postSynaptic['RID'][self.nextState] += 2
self.postSynaptic['VD13'][self.nextState] += 2
def PDEL(self):
self.postSynaptic['AVKL'][self.nextState] += 6
self.postSynaptic['DVA'][self.nextState] += 24
self.postSynaptic['PDER'][self.nextState] += 1
self.postSynaptic['PDER'][self.nextState] += 3
self.postSynaptic['PVCR'][self.nextState] += 1
self.postSynaptic['PVM'][self.nextState] += 2
self.postSynaptic['PVM'][self.nextState] += 1
self.postSynaptic['PVR'][self.nextState] += 2
self.postSynaptic['VA9'][self.nextState] += 1
self.postSynaptic['VD11'][self.nextState] += 1
def PDER(self):
self.postSynaptic['AVKL'][self.nextState] += 16
self.postSynaptic['DVA'][self.nextState] += 35
self.postSynaptic['PDEL'][self.nextState] += 3
self.postSynaptic['PVCL'][self.nextState] += 1
self.postSynaptic['PVCR'][self.nextState] += 1
self.postSynaptic['PVM'][self.nextState] += 1
self.postSynaptic['VA8'][self.nextState] += 1
self.postSynaptic['VD9'][self.nextState] += 1
def PHAL(self):
self.postSynaptic['AVDR'][self.nextState] += 1
self.postSynaptic['AVFL'][self.nextState] += 3
self.postSynaptic['AVG'][self.nextState] += 5
self.postSynaptic['AVHL'][self.nextState] += 1
self.postSynaptic['AVHR'][self.nextState] += 1
self.postSynaptic['DVA'][self.nextState] += 2
self.postSynaptic['PHAR'][self.nextState] += 5
self.postSynaptic['PHAR'][self.nextState] += 2
self.postSynaptic['PHBL'][self.nextState] += 5
self.postSynaptic['PHBR'][self.nextState] += 5
self.postSynaptic['PVQL'][self.nextState] += 2
def PHAR(self):
self.postSynaptic['AVG'][self.nextState] += 3
self.postSynaptic['AVHR'][self.nextState] += 1
self.postSynaptic['DA8'][self.nextState] += 1
self.postSynaptic['DVA'][self.nextState] += 1
self.postSynaptic['PHAL'][self.nextState] += 6
self.postSynaptic['PHAL'][self.nextState] += 2
self.postSynaptic['PHBL'][self.nextState] += 1
self.postSynaptic['PHBR'][self.nextState] += 5
self.postSynaptic['PVPL'][self.nextState] += 3
self.postSynaptic['PVQL'][self.nextState] += 2
def PHBL(self):
self.postSynaptic['AVAL'][self.nextState] += 9
self.postSynaptic['AVAR'][self.nextState] += 6
self.postSynaptic['AVDL'][self.nextState] += 1
self.postSynaptic['PHBR'][self.nextState] += 1
self.postSynaptic['PHBR'][self.nextState] += 3
self.postSynaptic['PVCL'][self.nextState] += 13
self.postSynaptic['VA12'][self.nextState] += 1
def PHBR(self):
self.postSynaptic['AVAL'][self.nextState] += 7
self.postSynaptic['AVAR'][self.nextState] += 7
self.postSynaptic['AVDL'][self.nextState] += 1
self.postSynaptic['AVDR'][self.nextState] += 1
self.postSynaptic['AVFL'][self.nextState] += 1
self.postSynaptic['AVHL'][self.nextState] += 1
self.postSynaptic['DA8'][self.nextState] += 1
self.postSynaptic['PHBL'][self.nextState] += 1
self.postSynaptic['PHBL'][self.nextState] += 3
self.postSynaptic['PVCL'][self.nextState] += 6
self.postSynaptic['PVCR'][self.nextState] += 3
self.postSynaptic['VA12'][self.nextState] += 2
def PHCL(self):
self.postSynaptic['AVAL'][self.nextState] += 1
self.postSynaptic['DA9'][self.nextState] += 7
self.postSynaptic['DA9'][self.nextState] += 1
self.postSynaptic['DVA'][self.nextState] += 6
self.postSynaptic['LUAL'][self.nextState] += 1
self.postSynaptic['PHCR'][self.nextState] += 1
self.postSynaptic['PLML'][self.nextState] += 1
self.postSynaptic['PVCL'][self.nextState] += 2
self.postSynaptic['VA12'][self.nextState] += 3
def PHCR(self):
self.postSynaptic['AVHR'][self.nextState] += 1
self.postSynaptic['DA9'][self.nextState] += 2
self.postSynaptic['DVA'][self.nextState] += 8
self.postSynaptic['LUAR'][self.nextState] += 1
self.postSynaptic['PHCL'][self.nextState] += 2
self.postSynaptic['PVCR'][self.nextState] += 9
self.postSynaptic['VA12'][self.nextState] += 2
def PLML(self):
self.postSynaptic['HSNL'][self.nextState] += 1
self.postSynaptic['LUAL'][self.nextState] += 1
self.postSynaptic['PHCL'][self.nextState] += 1
self.postSynaptic['PVCL'][self.nextState] += 1
def PLMR(self):
self.postSynaptic['AS6'][self.nextState] += 1
self.postSynaptic['AVAL'][self.nextState] += 4
self.postSynaptic['AVAR'][self.nextState] += 1
self.postSynaptic['AVDL'][self.nextState] += 1
self.postSynaptic['AVDR'][self.nextState] += 4
self.postSynaptic['DVA'][self.nextState] += 5
self.postSynaptic['HSNR'][self.nextState] += 1
self.postSynaptic['LUAR'][self.nextState] += 1
self.postSynaptic['PDEL'][self.nextState] += 2
self.postSynaptic['PDER'][self.nextState] += 3
self.postSynaptic['PVCL'][self.nextState] += 2
self.postSynaptic['PVCR'][self.nextState] += 1
self.postSynaptic['PVR'][self.nextState] += 2
def PLNL(self):
self.postSynaptic['SAADL'][self.nextState] += 5
self.postSynaptic['SMBVL'][self.nextState] += 6
def PLNR(self):
self.postSynaptic['SAADR'][self.nextState] += 4
self.postSynaptic['SMBVR'][self.nextState] += 6
def PQR(self):
self.postSynaptic['AVAL'][self.nextState] += 8
self.postSynaptic['AVAR'][self.nextState] += 11
self.postSynaptic['AVDL'][self.nextState] += 7
self.postSynaptic['AVDR'][self.nextState] += 6
self.postSynaptic['AVG'][self.nextState] += 1
self.postSynaptic['LUAR'][self.nextState] += 1
self.postSynaptic['PVNL'][self.nextState] += 1
self.postSynaptic['PVPL'][self.nextState] += 4
def PVCL(self):
self.postSynaptic['AS1'][self.nextState] += 1
self.postSynaptic['AVAL'][self.nextState] += 3
self.postSynaptic['AVAR'][self.nextState] += 4
self.postSynaptic['AVBL'][self.nextState] += 5
self.postSynaptic['AVBR'][self.nextState] += 12
self.postSynaptic['AVDL'][self.nextState] += 5
self.postSynaptic['AVDR'][self.nextState] += 2
self.postSynaptic['AVEL'][self.nextState] += 3
self.postSynaptic['AVER'][self.nextState] += 1
self.postSynaptic['AVJL'][self.nextState] += 4
self.postSynaptic['AVJR'][self.nextState] += 2
self.postSynaptic['DA2'][self.nextState] += 1
self.postSynaptic['DA5'][self.nextState] += 1
self.postSynaptic['DA6'][self.nextState] += 1
self.postSynaptic['DB2'][self.nextState] += 3
self.postSynaptic['DB3'][self.nextState] += 4
self.postSynaptic['DB4'][self.nextState] += 3
self.postSynaptic['DB5'][self.nextState] += 2
self.postSynaptic['DB6'][self.nextState] += 2
self.postSynaptic['DB7'][self.nextState] += 3
self.postSynaptic['DVA'][self.nextState] += 5
self.postSynaptic['PLML'][self.nextState] += 1
self.postSynaptic['PVCR'][self.nextState] += 7
self.postSynaptic['RID'][self.nextState] += 5
self.postSynaptic['RIS'][self.nextState] += 2
self.postSynaptic['SIBVL'][self.nextState] += 2
self.postSynaptic['VB10'][self.nextState] += 3
self.postSynaptic['VB11'][self.nextState] += 1
self.postSynaptic['VB3'][self.nextState] += 1
self.postSynaptic['VB4'][self.nextState] += 1
self.postSynaptic['VB5'][self.nextState] += 1
self.postSynaptic['VB6'][self.nextState] += 2
self.postSynaptic['VB8'][self.nextState] += 1
self.postSynaptic['VB9'][self.nextState] += 2
def PVCR(self):
self.postSynaptic['AQR'][self.nextState] += 1
self.postSynaptic['AS2'][self.nextState] += 1
self.postSynaptic['AVAL'][self.nextState] += 12
self.postSynaptic['AVAR'][self.nextState] += 10
self.postSynaptic['AVBL'][self.nextState] += 8
self.postSynaptic['AVBR'][self.nextState] += 6
self.postSynaptic['AVDL'][self.nextState] += 5
self.postSynaptic['AVDR'][self.nextState] += 1
self.postSynaptic['AVEL'][self.nextState] += 1
self.postSynaptic['AVER'][self.nextState] += 1
self.postSynaptic['AVJL'][self.nextState] += 3
self.postSynaptic['AVL'][self.nextState] += 1
self.postSynaptic['DA9'][self.nextState] += 1
self.postSynaptic['DB2'][self.nextState] += 1
self.postSynaptic['DB3'][self.nextState] += 3
self.postSynaptic['DB4'][self.nextState] += 4
self.postSynaptic['DB5'][self.nextState] += 1
self.postSynaptic['DB6'][self.nextState] += 2
self.postSynaptic['DB7'][self.nextState] += 1
self.postSynaptic['FLPL'][self.nextState] += 1
self.postSynaptic['LUAR'][self.nextState] += 1
self.postSynaptic['PDEL'][self.nextState] += 2
self.postSynaptic['PHCR'][self.nextState] += 1
self.postSynaptic['PLMR'][self.nextState] += 1
self.postSynaptic['PVCL'][self.nextState] += 8
self.postSynaptic['PVDL'][self.nextState] += 1
self.postSynaptic['PVR'][self.nextState] += 1
self.postSynaptic['PVWL'][self.nextState] += 2
self.postSynaptic['PVWR'][self.nextState] += 2
self.postSynaptic['RID'][self.nextState] += 5
self.postSynaptic['SIBVR'][self.nextState] += 2
self.postSynaptic['VA8'][self.nextState] += 2
self.postSynaptic['VA9'][self.nextState] += 1
self.postSynaptic['VB10'][self.nextState] += 1
self.postSynaptic['VB4'][self.nextState] += 3
self.postSynaptic['VB6'][self.nextState] += 2
self.postSynaptic['VB7'][self.nextState] += 3
self.postSynaptic['VB8'][self.nextState] += 1
def PVDL(self):
self.postSynaptic['AVAL'][self.nextState] += 6
self.postSynaptic['AVAR'][self.nextState] += 6
self.postSynaptic['DD5'][self.nextState] += 1
self.postSynaptic['PVCL'][self.nextState] += 1
self.postSynaptic['PVCR'][self.nextState] += 6
self.postSynaptic['VD10'][self.nextState] += 6
def PVDR(self):
self.postSynaptic['AVAL'][self.nextState] += 6
self.postSynaptic['AVAR'][self.nextState] += 9
self.postSynaptic['DVA'][self.nextState] += 3
self.postSynaptic['PVCL'][self.nextState] += 13
self.postSynaptic['PVCR'][self.nextState] += 10
self.postSynaptic['PVDL'][self.nextState] += 1
self.postSynaptic['VA9'][self.nextState] += 1
def PVM(self):
self.postSynaptic['AVKL'][self.nextState] += 11
self.postSynaptic['AVL'][self.nextState] += 1
self.postSynaptic['AVM'][self.nextState] += 1
self.postSynaptic['DVA'][self.nextState] += 3
self.postSynaptic['PDEL'][self.nextState] += 7
self.postSynaptic['PDEL'][self.nextState] += 1
self.postSynaptic['PDER'][self.nextState] += 8
self.postSynaptic['PDER'][self.nextState] += 1
self.postSynaptic['PVCL'][self.nextState] += 2
self.postSynaptic['PVR'][self.nextState] += 1
def PVNL(self):
self.postSynaptic['AVAL'][self.nextState] += 2
self.postSynaptic['AVBR'][self.nextState] += 3
self.postSynaptic['AVDL'][self.nextState] += 3
self.postSynaptic['AVDR'][self.nextState] += 3
self.postSynaptic['AVEL'][self.nextState] += 1
self.postSynaptic['AVFR'][self.nextState] += 1
self.postSynaptic['AVG'][self.nextState] += 1
self.postSynaptic['AVJL'][self.nextState] += 5
self.postSynaptic['AVJR'][self.nextState] += 5
self.postSynaptic['AVL'][self.nextState] += 2
self.postSynaptic['BDUL'][self.nextState] += 1
self.postSynaptic['BDUR'][self.nextState] += 2
self.postSynaptic['DD1'][self.nextState] += 2
self.postSynaptic['MVL09'][self.nextState] += 3
self.postSynaptic['PQR'][self.nextState] += 1
self.postSynaptic['PVCL'][self.nextState] += 1
self.postSynaptic['PVNR'][self.nextState] += 5
self.postSynaptic['PVQR'][self.nextState] += 1
self.postSynaptic['PVT'][self.nextState] += 1
self.postSynaptic['PVWL'][self.nextState] += 1
self.postSynaptic['RIFL'][self.nextState] += 1
def PVNR(self):
self.postSynaptic['AVAL'][self.nextState] += 2
self.postSynaptic['AVBL'][self.nextState] += 1
self.postSynaptic['AVBR'][self.nextState] += 2
self.postSynaptic['AVDR'][self.nextState] += 1
self.postSynaptic['AVEL'][self.nextState] += 3
self.postSynaptic['AVJL'][self.nextState] += 4
self.postSynaptic['AVJR'][self.nextState] += 1
self.postSynaptic['AVL'][self.nextState] += 2
self.postSynaptic['BDUL'][self.nextState] += 1
self.postSynaptic['BDUR'][self.nextState] += 2
self.postSynaptic['DD3'][self.nextState] += 1
self.postSynaptic['HSNR'][self.nextState] += 2
self.postSynaptic['MVL12'][self.nextState] += 1
self.postSynaptic['MVL13'][self.nextState] += 2
self.postSynaptic['PQR'][self.nextState] += 2
self.postSynaptic['PVCL'][self.nextState] += 1
self.postSynaptic['PVNL'][self.nextState] += 1
self.postSynaptic['PVT'][self.nextState] += 2
self.postSynaptic['PVWL'][self.nextState] += 2
self.postSynaptic['VC2'][self.nextState] += 1
self.postSynaptic['VC3'][self.nextState] += 1
self.postSynaptic['VD12'][self.nextState] += 1
self.postSynaptic['VD6'][self.nextState] += 1
self.postSynaptic['VD7'][self.nextState] += 1
| |
<reponame>WitnessNR/Updated_WiNR
from numba import njit
import numpy as np
import matplotlib.pyplot as plt
from solve import *
# from tensorflow.contrib.keras.api.keras.models import Sequential
# from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D, Lambda
# from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, InputLayer, BatchNormalization, Reshape
# from tensorflow.contrib.keras.api.keras.models import load_model
# from tensorflow.contrib.keras.api.keras import backend as K
# from tensorflow.contrib.keras.api.keras.datasets import mnist, cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D, Lambda
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, InputLayer, BatchNormalization, Reshape
from tensorflow.keras.models import load_model
from tensorflow.keras.datasets import mnist, cifar10
import tensorflow as tf
from utils import generate_data_myself
import time
from activations import sigmoid_linear_bounds
from pgd_attack import *
linear_bounds = None
import random
def fn(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted)
class CNNModel:
def __init__(self, model, inp_shape = (28,28,1)):
print('-----------', inp_shape, '---------')
temp_weights = [layer.get_weights() for layer in model.layers]
self.weights = []
self.biases = []
self.shapes = []
self.pads = []
self.strides = []
self.model = model
cur_shape = inp_shape
self.shapes.append(cur_shape)
for layer in model.layers:
print(cur_shape)
weights = layer.get_weights()
if type(layer) == Conv2D:
print('conv')
if len(weights) == 1:
W = weights[0].astype(np.float32)
b = np.zeros(W.shape[-1], dtype=np.float32)
else:
W, b = weights
W = W.astype(np.float32)
b = b.astype(np.float32)
padding = layer.get_config()['padding']
stride = layer.get_config()['strides']
pad = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride[0]))
desired_w = int(np.ceil(cur_shape[0]/stride[1]))
total_padding_h = stride[0]*(desired_h-1)+W.shape[0]-cur_shape[0]
total_padding_w = stride[1]*(desired_w-1)+W.shape[1]-cur_shape[1]
pad = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad[0]+pad[1]-W.shape[0])/stride[0])+1, int((cur_shape[1]+pad[2]+pad[3]-W.shape[1])/stride[1])+1, W.shape[-1])
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == GlobalAveragePooling2D:
print('global avg pool')
b = np.zeros(cur_shape[-1], dtype=np.float32)
W = np.zeros((cur_shape[0],cur_shape[1],cur_shape[2],cur_shape[2]), dtype=np.float32)
for f in range(W.shape[2]):
W[:,:,f,f] = 1/(cur_shape[0]*cur_shape[1])
pad = (0,0,0,0)
stride = ((1,1))
cur_shape = (1,1,cur_shape[2])
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == AveragePooling2D:
print('avg pool')
b = np.zeros(cur_shape[-1], dtype=np.float32)
pool_size = layer.get_config()['pool_size']
stride = layer.get_config()['strides']
W = np.zeros((pool_size[0],pool_size[1],cur_shape[2],cur_shape[2]), dtype=np.float32)
for f in range(W.shape[2]):
W[:,:,f,f] = 1/(pool_size[0]*pool_size[1])
pad = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride[0]))
desired_w = int(np.ceil(cur_shape[0]/stride[1]))
total_padding_h = stride[0]*(desired_h-1)+pool_size[0]-cur_shape[0]
total_padding_w = stride[1]*(desired_w-1)+pool_size[1]-cur_shape[1]
pad = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad[0]+pad[1]-pool_size[0])/stride[0])+1, int((cur_shape[1]+pad[2]+pad[3]-pool_size[1])/stride[1])+1, cur_shape[2])
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == Activation:
print('activation')
elif type(layer) == Lambda:
print('lambda')
elif type(layer) == InputLayer:
print('input')
elif type(layer) == BatchNormalization:
print('batch normalization')
gamma, beta, mean, std = weights
std = np.sqrt(std+0.001) #Avoids zero division
a = gamma/std
b = -gamma*mean/std+beta
self.weights[-1] = a*self.weights[-1]
self.biases[-1] = a*self.biases[-1]+b
elif type(layer) == Dense:
print('FC')
W, b = weights
b = b.astype(np.float32)
W = W.reshape(list(cur_shape)+[W.shape[-1]]).astype(np.float32)
cur_shape = (1,1,W.shape[-1])
self.strides.append((1,1))
self.pads.append((0,0,0,0))
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == Dropout:
print('dropout')
elif type(layer) == MaxPooling2D:
print('pool')
pool_size = layer.get_config()['pool_size']
stride = layer.get_config()['strides']
pad = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride[0]))
desired_w = int(np.ceil(cur_shape[0]/stride[1]))
total_padding_h = stride[0]*(desired_h-1)+pool_size[0]-cur_shape[0]
total_padding_w = stride[1]*(desired_w-1)+pool_size[1]-cur_shape[1]
pad = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad[0]+pad[1]-pool_size[0])/stride[0])+1, int((cur_shape[1]+pad[2]+pad[3]-pool_size[1])/stride[1])+1, cur_shape[2])
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(np.full(pool_size+(1,1),np.nan,dtype=np.float32))
self.biases.append(np.full(1,np.nan,dtype=np.float32))
elif type(layer) == Flatten:
print('flatten')
elif type(layer) == Reshape:
print('reshape')
else:
print(str(type(layer)))
raise ValueError('Invalid Layer Type')
print(cur_shape)
for i in range(len(self.weights)):
self.weights[i] = np.ascontiguousarray(self.weights[i].transpose((3,0,1,2)).astype(np.float32))
self.biases[i] = np.ascontiguousarray(self.biases[i].astype(np.float32))
def predict(self, data):
return self.model(data)
@njit
def conv(W, x, pad, stride):
p_hl, p_hr, p_wl, p_wr = pad
s_h, s_w = stride
y = np.zeros((int((x.shape[0]-W.shape[1]+p_hl+p_hr)/s_h)+1, int((x.shape[1]-W.shape[2]+p_wl+p_wr)/s_w)+1, W.shape[0]), dtype=np.float32)
for a in range(y.shape[0]):
for b in range(y.shape[1]):
for c in range(y.shape[2]):
for i in range(W.shape[1]):
for j in range(W.shape[2]):
for k in range(W.shape[3]):
if 0<=s_h*a+i-p_hl<x.shape[0] and 0<=s_w*b+j-p_wl<x.shape[1]:
y[a,b,c] += W[c,i,j,k]*x[s_h*a+i-p_hl,s_w*b+j-p_wl,k]
return y
@njit
def pool(pool_size, x0, pad, stride):
p_hl, p_hr, p_wl, p_wr = pad
s_h, s_w = stride
y0 = np.zeros((int((x0.shape[0]+p_hl+p_hr-pool_size[0])/s_h)+1, int((x0.shape[1]+p_wl+p_wr-pool_size[1])/s_w)+1, x0.shape[2]), dtype=np.float32)
for x in range(y0.shape[0]):
for y in range(y0.shape[1]):
for r in range(y0.shape[2]):
cropped = LB[s_h*x-p_hl:pool_size[0]+s_h*x-p_hl, s_w*y-p_wl:pool_size[1]+s_w*y-p_wl,r]
y0[x,y,r] = cropped.max()
return y0
@njit
def conv_bound(W, b, pad, stride, x0, eps, p_n):
y0 = conv(W, x0, pad, stride)
UB = np.zeros(y0.shape, dtype=np.float32)
LB = np.zeros(y0.shape, dtype=np.float32)
for k in range(W.shape[0]):
if p_n == 105: # p == "i", q = 1
dualnorm = np.sum(np.abs(W[k,:,:,:]))
elif p_n == 1: # p = 1, q = i
dualnorm = np.max(np.abs(W[k,:,:,:]))
elif p_n == 2: # p = 2, q = 2
dualnorm = np.sqrt(np.sum(W[k,:,:,:]**2))
mid = y0[:,:,k]+b[k]
UB[:,:,k] = mid+eps*dualnorm
LB[:,:,k] = mid-eps*dualnorm
return LB, UB
@njit
def conv_full(A, x, pad, stride):
p_hl, p_hr, p_wl, p_wr = pad
s_h, s_w = stride
y = np.zeros((A.shape[0], A.shape[1], A.shape[2]), dtype=np.float32)
for a in range(y.shape[0]):
for b in range(y.shape[1]):
for c in range(y.shape[2]):
for i in range(A.shape[3]):
for j in range(A.shape[4]):
for k in range(A.shape[5]):
if 0<=s_h*a+i-p_hl<x.shape[0] and 0<=s_w*b+j-p_wl<x.shape[1]:
y[a,b,c] += A[a,b,c,i,j,k]*x[s_h*a+i-p_hl,s_w*b+j-p_wl,k]
return y
@njit
def conv_bound_full(A, B, pad, stride, x0, eps, p_n):
y0 = conv_full(A, x0, pad, stride)
UB = np.zeros(y0.shape, dtype=np.float32)
LB = np.zeros(y0.shape, dtype=np.float32)
for a in range(y0.shape[0]):
for b in range(y0.shape[1]):
for c in range(y0.shape[2]):
if p_n == 105: # p == "i", q = 1
dualnorm = np.sum(np.abs(A[a,b,c,:,:,:]))
elif p_n == 1: # p = 1, q = i
dualnorm = np.max(np.abs(A[a,b,c,:,:,:]))
elif p_n == 2: # p = 2, q = 2
dualnorm = np.sqrt(np.sum(A[a,b,c,:,:,:]**2))
mid = y0[a,b,c]+B[a,b,c]
UB[a,b,c] = mid+eps*dualnorm
LB[a,b,c] = mid-eps*dualnorm
return LB, UB
@njit
def upper_bound_conv(A, B, pad, stride, W, b, inner_pad, inner_stride, inner_shape, LB, UB):
A_new = np.zeros((A.shape[0], A.shape[1], A.shape[2], inner_stride[0]*(A.shape[3]-1)+W.shape[1], inner_stride[1]*(A.shape[4]-1)+W.shape[2], W.shape[3]), dtype=np.float32)
B_new = np.zeros(B.shape, dtype=np.float32)
A_plus = np.maximum(A, 0)
A_minus = np.minimum(A, 0)
alpha_u, alpha_l, beta_u, beta_l = linear_bounds(LB, UB)
assert A.shape[5] == W.shape[0]
for x in range(A_new.shape[0]):
for y in range(A_new.shape[1]):
for t in range(A_new.shape[3]):
for u in range(A_new.shape[4]):
if 0<=t+stride[0]*inner_stride[0]*x-inner_stride[0]*pad[0]-inner_pad[0]<inner_shape[0] and 0<=u+stride[1]*inner_stride[1]*y-inner_stride[1]*pad[2]-inner_pad[2]<inner_shape[1]:
for p in range(A.shape[3]):
for q in range(A.shape[4]):
if 0<=t-inner_stride[0]*p<W.shape[1] and 0<=u-inner_stride[1]*q<W.shape[2] and 0<=p+stride[0]*x-pad[0]<alpha_u.shape[0] and 0<=q+stride[1]*y-pad[2]<alpha_u.shape[1]:
for z in range(A_new.shape[2]):
for v in range(A_new.shape[5]):
for r in range(W.shape[0]):
A_new[x,y,z,t,u,v] += W[r,t-inner_stride[0]*p,u-inner_stride[1]*q,v]*alpha_u[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]*A_plus[x,y,z,p,q,r]
A_new[x,y,z,t,u,v] += W[r,t-inner_stride[0]*p,u-inner_stride[1]*q,v]*alpha_l[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]*A_minus[x,y,z,p,q,r]
B_new = conv_full(A_plus,alpha_u*b+beta_u,pad,stride) + conv_full(A_minus,alpha_l*b+beta_l,pad,stride)+B
return A_new, B_new
@njit
def lower_bound_conv(A, B, pad, stride, W, b, inner_pad, inner_stride, inner_shape, LB, UB):
A_new = np.zeros((A.shape[0], A.shape[1], A.shape[2], inner_stride[0]*(A.shape[3]-1)+W.shape[1], inner_stride[1]*(A.shape[4]-1)+W.shape[2], W.shape[3]), dtype=np.float32)
B_new = np.zeros(B.shape, dtype=np.float32)
A_plus = np.maximum(A, 0)
A_minus = np.minimum(A, 0)
alpha_u, alpha_l, beta_u, beta_l = linear_bounds(LB, UB)
assert A.shape[5] == W.shape[0]
for x in range(A_new.shape[0]):
for y in range(A_new.shape[1]):
for t in range(A_new.shape[3]):
for u in range(A_new.shape[4]):
if 0<=t+stride[0]*inner_stride[0]*x-inner_stride[0]*pad[0]-inner_pad[0]<inner_shape[0] and 0<=u+stride[1]*inner_stride[1]*y-inner_stride[1]*pad[2]-inner_pad[2]<inner_shape[1]:
for p in range(A.shape[3]):
for q in range(A.shape[4]):
if 0<=t-inner_stride[0]*p<W.shape[1] and 0<=u-inner_stride[1]*q<W.shape[2] and 0<=p+stride[0]*x-pad[0]<alpha_u.shape[0] and 0<=q+stride[1]*y-pad[2]<alpha_u.shape[1]:
for z in range(A_new.shape[2]):
for v in range(A_new.shape[5]):
for r in range(W.shape[0]):
A_new[x,y,z,t,u,v] += W[r,t-inner_stride[0]*p,u-inner_stride[1]*q,v]*alpha_l[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]*A_plus[x,y,z,p,q,r]
A_new[x,y,z,t,u,v] += W[r,t-inner_stride[0]*p,u-inner_stride[1]*q,v]*alpha_u[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]*A_minus[x,y,z,p,q,r]
B_new = conv_full(A_plus,alpha_l*b+beta_l,pad,stride) + conv_full(A_minus,alpha_u*b+beta_u,pad,stride)+B
return A_new, B_new
@njit
def pool_linear_bounds(LB, UB, pad, stride, pool_size):
p_hl, p_hr, p_wl, p_wr = pad
s_h, s_w = stride
alpha_u = np.zeros((pool_size[0], pool_size[1], int((UB.shape[0]+p_hl+p_hr-pool_size[0])/s_h)+1, int((UB.shape[1]+p_wl+p_wr-pool_size[1])/s_w)+1, UB.shape[2]), dtype=np.float32)
beta_u = np.zeros((int((UB.shape[0]+p_hl+p_hr-pool_size[0])/s_h)+1, int((UB.shape[1]+p_wl+p_wr-pool_size[1])/s_w)+1, UB.shape[2]), dtype=np.float32)
alpha_l = np.zeros((pool_size[0], pool_size[1], int((LB.shape[0]+p_hl+p_hr-pool_size[0])/s_h)+1, int((LB.shape[1]+p_wl+p_wr-pool_size[1])/s_w)+1, LB.shape[2]), dtype=np.float32)
beta_l = np.zeros((int((LB.shape[0]+p_hl+p_hr-pool_size[0])/s_h)+1, int((LB.shape[1]+p_wl+p_wr-pool_size[1])/s_w)+1, LB.shape[2]), dtype=np.float32)
for x in range(alpha_u.shape[2]):
for y in range(alpha_u.shape[3]):
for r in range(alpha_u.shape[4]):
cropped_LB = LB[s_h*x-p_hl:pool_size[0]+s_h*x-p_hl, s_w*y-p_wl:pool_size[1]+s_w*y-p_wl,r]
cropped_UB = UB[s_h*x-p_hl:pool_size[0]+s_h*x-p_hl, s_w*y-p_wl:pool_size[1]+s_w*y-p_wl,r]
max_LB = cropped_LB.max()
idx = np.where(cropped_UB>=max_LB)
u_s = np.zeros(len(idx[0]), dtype=np.float32)
l_s = np.zeros(len(idx[0]), dtype=np.float32)
gamma = np.inf
for i in range(len(idx[0])):
l_s[i] = cropped_LB[idx[0][i],idx[1][i]]
u_s[i] = cropped_UB[idx[0][i],idx[1][i]]
if l_s[i] == u_s[i]:
gamma = l_s[i]
if gamma == np.inf:
gamma = (np.sum(u_s/(u_s-l_s))-1)/np.sum(1/(u_s-l_s))
if gamma < np.max(l_s):
gamma = np.max(l_s)
elif gamma > np.min(u_s):
gamma = np.min(u_s)
weights = ((u_s-gamma)/(u_s-l_s)).astype(np.float32)
else:
weights = np.zeros(len(idx[0]), dtype=np.float32)
w_partial_sum = 0
num_equal = 0
for i in range(len(idx[0])):
if l_s[i] != u_s[i]:
weights[i] = (u_s[i]-gamma)/(u_s[i]-l_s[i])
w_partial_sum += weights[i]
else:
num_equal += 1
gap = (1-w_partial_sum)/num_equal
if gap < 0.0:
gap = 0.0
elif gap > 1.0:
gap = 1.0
for i in range(len(idx[0])):
if l_s[i] == u_s[i]:
weights[i] = gap
for i in range(len(idx[0])):
t = idx[0][i]
u = idx[1][i]
alpha_u[t,u,x,y,r] = weights[i]
alpha_l[t,u,x,y,r] = weights[i]
beta_u[x,y,r] = gamma-np.dot(weights, l_s)
growth_rate = np.sum(weights)
if growth_rate <= 1:
beta_l[x,y,r] = np.min(l_s)*(1-growth_rate)
else:
beta_l[x,y,r] = np.max(u_s)*(1-growth_rate)
return alpha_u, alpha_l, beta_u, beta_l
@njit
def upper_bound_pool(A, B, pad, stride, pool_size, inner_pad, inner_stride, inner_shape, LB, UB):
A_new = np.zeros((A.shape[0], A.shape[1], A.shape[2], inner_stride[0]*(A.shape[3]-1)+pool_size[0], inner_stride[1]*(A.shape[4]-1)+pool_size[1], A.shape[5]), dtype=np.float32)
B_new = np.zeros(B.shape, dtype=np.float32)
A_plus = np.maximum(A, 0)
A_minus = np.minimum(A, 0)
alpha_u, alpha_l, beta_u, beta_l = pool_linear_bounds(LB, UB, inner_pad, inner_stride, pool_size)
for x in range(A_new.shape[0]):
for y in range(A_new.shape[1]):
for t in range(A_new.shape[3]):
for u in range(A_new.shape[4]):
inner_index_x = t+stride[0]*inner_stride[0]*x-inner_stride[0]*pad[0]-inner_pad[0]
inner_index_y = u+stride[1]*inner_stride[1]*y-inner_stride[1]*pad[2]-inner_pad[2]
if 0<=inner_index_x<inner_shape[0] and 0<=inner_index_y<inner_shape[1]:
for p in range(A.shape[3]):
for q in | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the 'License' );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
from jsonpath_rw import parse
from bson.objectid import ObjectId
from girder.api import access
from girder.api.describe import Description
from girder.constants import AccessType
from girder.api.docs import addModel
from girder.api.rest import RestException, getCurrentUser, getBodyJson
from girder.api.rest import loadmodel
from girder.utility.model_importer import ModelImporter
from .base import BaseResource
import cumulus
from cumulus.constants import VolumeType
from cumulus.constants import VolumeState
from cumulus.common.girder import get_task_token, _get_profile, \
send_status_notification
import cumulus.ansible.tasks.volume
from cumulus.ansible.tasks.providers import CloudProvider, InstanceState
class Volume(BaseResource):
def __init__(self):
super(Volume, self).__init__()
self.resourceName = 'volumes'
self.route('POST', (), self.create)
self.route('GET', (':id', ), self.get)
self.route('PATCH', (':id', ), self.patch)
self.route('GET', (), self.find)
self.route('GET', (':id', 'status'), self.get_status)
self.route('POST', (':id', 'log'), self.append_to_log)
self.route('GET', (':id', 'log'), self.log)
self.route('PUT', (':id', 'clusters', ':clusterId', 'attach'),
self.attach)
self.route('PUT', (':id', 'clusters', ':clusterId',
'attach', 'complete'),
self.attach_complete)
self.route('PUT', (':id', 'detach'), self.detach)
self.route('PUT', (':id', 'detach', 'complete'), self.detach_complete)
self.route('DELETE', (':id', ), self.delete)
self.route('PUT', (':id', 'delete', 'complete'), self.delete_complete)
self._model = ModelImporter.model('volume', 'cumulus')
def _create_ebs(self, body, zone):
user = getCurrentUser()
name = body['name']
size = body['size']
fs = body.get('fs', None)
profileId = body['profileId']
return self._model.create_ebs(user, profileId, name, zone, size, fs)
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.WRITE)
def patch(self, volume, params):
body = getBodyJson()
if not volume:
raise RestException('Volume not found.', code=404)
if 'ec2' in body:
if 'ec2' not in volume:
volume['ec2'] = {}
volume['ec2'].update(body['ec2'])
mutable = ['status', 'msg', 'path']
for k in mutable:
if k in body:
volume[k] = body[k]
user = getCurrentUser()
volume = self._model.update_volume(user, volume)
return self._model.filter(volume, user)
patch.description = (
Description('Patch a volume')
.param(
'id',
'The volume id.', paramType='path', required=True)
.param(
'body',
'The properties to use to create the volume.',
required=True, paramType='body'))
@access.user
def create(self, params):
body = getBodyJson()
self.requireParams(['name', 'type', 'size', 'profileId'], body)
if not VolumeType.is_valid_type(body['type']):
raise RestException('Invalid volume type.', code=400)
profile_id = parse('profileId').find(body)
if not profile_id:
raise RestException('A profile id must be provided', 400)
profile_id = profile_id[0].value
profile, secret_key = _get_profile(profile_id)
if not profile:
raise RestException('Invalid profile', 400)
if 'zone' in body:
zone = body['zone']
else:
zone = profile['availabilityZone']
volume = self._create_ebs(body, zone)
cherrypy.response.status = 201
cherrypy.response.headers['Location'] = '/volumes/%s' % volume['_id']
return self._model.filter(volume, getCurrentUser())
addModel('VolumeParameters', {
'id': 'VolumeParameters',
'required': ['name', 'config', 'type', 'zone', 'size'],
'properties': {
'name': {'type': 'string',
'description': 'The name to give the cluster.'},
'profileId': {'type': 'string',
'description': 'Id of profile to use'},
'type': {'type': 'string',
'description': 'The type of volume to create ( currently '
'only esb )'},
'zone': {'type': 'string',
'description': 'The availability region'},
'size': {'type': 'integer',
'description': 'The size of the volume to create'}
},
}, 'volumes')
create.description = (
Description('Create a volume')
.param(
'body',
'The properties to use to create the volume.',
dataType='VolumeParameters',
required=True, paramType='body'))
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.READ)
def get(self, volume, params):
return self._model.filter(volume, getCurrentUser())
get.description = (
Description('Get a volume')
.param(
'id',
'The volume id.', paramType='path', required=True))
@access.user
def find(self, params):
user = getCurrentUser()
query = {}
if 'clusterId' in params:
query['clusterId'] = ObjectId(params['clusterId'])
limit = params.get('limit', 50)
volumes = self._model.find(query=query)
volumes = list(volumes)
volumes = self._model \
.filterResultsByPermission(volumes, user, AccessType.ADMIN,
limit=int(limit))
return [self._model.filter(volume, user) for volume in volumes]
find.description = (
Description('Search for volumes')
.param('limit', 'The max number of volumes to return',
paramType='query', required=False, default=50))
@access.user
@loadmodel(map={'clusterId': 'cluster'}, model='cluster', plugin='cumulus',
level=AccessType.ADMIN)
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def attach_complete(self, volume, cluster, params):
user = getCurrentUser()
path = params.get('path', None)
# Is path being passed in as apart of the body json?
if path is None:
path = getBodyJson().get('path', None)
if path is not None:
cluster.setdefault('volumes', [])
cluster['volumes'].append(volume['_id'])
cluster['volumes'] = list(set(cluster['volumes']))
volume['status'] = VolumeState.INUSE
volume['path'] = path
# TODO: removing msg should be refactored into
# a general purpose 'update_status' function
# on the volume model. This way msg only referes
# to the current status.
try:
del volume['msg']
except KeyError:
pass
# Add cluster id to volume
volume['clusterId'] = cluster['_id']
ModelImporter.model('cluster', 'cumulus').save(cluster)
self._model.update_volume(user, volume)
else:
volume['status'] = VolumeState.ERROR
volume['msg'] = 'Volume path was not communicated on complete'
self._model.update_volume(user, volume)
attach_complete.description = None
@access.user
@loadmodel(map={'clusterId': 'cluster'}, model='cluster', plugin='cumulus',
level=AccessType.ADMIN)
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def attach(self, volume, cluster, params):
body = getBodyJson()
self.requireParams(['path'], body)
path = body['path']
profile_id = parse('profileId').find(volume)[0].value
profile, secret_key = _get_profile(profile_id)
girder_callback_info = {
'girder_api_url': cumulus.config.girder.baseUrl,
'girder_token': get_task_token()['_id']}
log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
volume['_id'])
p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
aws_volume = p.get_volume(volume)
# If volume exists it needs to be available to be attached. If
# it doesn't exist it will be created as part of the attach
# playbook.
if aws_volume is not None and \
aws_volume['state'] != VolumeState.AVAILABLE:
raise RestException('This volume is not available to attach '
'to a cluster',
400)
master = p.get_master_instance(cluster['_id'])
if master['state'] != InstanceState.RUNNING:
raise RestException('Master instance is not running!',
400)
cluster = ModelImporter.model('cluster', 'cumulus').filter(
cluster, getCurrentUser(), passphrase=False)
cumulus.ansible.tasks.volume.attach_volume\
.delay(profile, cluster, master,
self._model.filter(volume, getCurrentUser()), path,
secret_key, log_write_url, girder_callback_info)
volume['status'] = VolumeState.ATTACHING
volume = self._model.update_volume(getCurrentUser(), volume)
return self._model.filter(volume, getCurrentUser())
addModel('AttachParameters', {
'id': 'AttachParameters',
'required': ['path'],
'properties': {
'path': {'type': 'string',
'description': 'The path to mount the volume'}
}
}, 'volumes')
attach.description = (
Description('Attach a volume to a cluster')
.param(
'id',
'The id of the volume to attach', required=True,
paramType='path')
.param(
'clusterId',
'The cluster to attach the volume to.', required=True,
paramType='path')
.param(
'body',
'The properties to template on submit.',
dataType='AttachParameters',
paramType='body'))
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def detach(self, volume, params):
profile_id = parse('profileId').find(volume)[0].value
profile, secret_key = _get_profile(profile_id)
girder_callback_info = {
'girder_api_url': cumulus.config.girder.baseUrl,
'girder_token': get_task_token()['_id']}
log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
volume['_id'])
p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
aws_volume = p.get_volume(volume)
if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
raise RestException('This volume is not attached '
'to a cluster',
400)
if 'clusterId' not in volume:
raise RestException('clusterId is not set on this volume!', 400)
try:
volume['path']
except KeyError:
raise RestException('path is not set on this volume!', 400)
cluster = ModelImporter.model('cluster', 'cumulus').load(volume['clusterId'],
user=getCurrentUser(),
level=AccessType.ADMIN)
master = p.get_master_instance(cluster['_id'])
if master['state'] != InstanceState.RUNNING:
raise RestException('Master instance is not running!',
400)
user = getCurrentUser()
cluster = ModelImporter.model('cluster', 'cumulus').filter(
cluster, user, passphrase=False)
cumulus.ansible.tasks.volume.detach_volume\
.delay(profile, cluster, master,
self._model.filter(volume, user),
secret_key, log_write_url, girder_callback_info)
volume['status'] = VolumeState.DETACHING
volume = self._model.update_volume(user, volume)
return self._model.filter(volume, user)
detach.description = (
Description('Detach a volume from a cluster')
.param(
'id',
'The id of the attached volume', required=True,
paramType='path'))
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def detach_complete(self, volume, params):
# First remove from cluster
user = getCurrentUser()
cluster = ModelImporter.model('cluster', 'cumulus').load(volume['clusterId'],
user=user,
level=AccessType.ADMIN)
cluster.setdefault('volumes', []).remove(volume['_id'])
del volume['clusterId']
for attr in ['path', 'msg']:
try:
del volume[attr]
except KeyError:
pass
volume['status'] = VolumeState.AVAILABLE
ModelImporter.model('cluster', 'cumulus').save(cluster)
self._model.save(volume)
send_status_notification('volume', volume)
detach_complete.description = None
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def delete(self, volume, params):
if 'clusterId' in volume:
raise RestException('Unable to delete attached volume')
# If the volume is in state created and it has no ec2 volume id
# associated with it, we should be able to just delete it
if volume['status'] in (VolumeState.CREATED, VolumeState.ERROR):
if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
raise RestException(
'Unable to delete volume, it is '
'associated with an ec2 volume %s' % volume['ec2']['id'])
self._model.remove(volume)
return None
log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
volume['_id'])
# Call EC2 to delete volume
profile_id = parse('profileId').find(volume)[0].value
profile, secret_key = _get_profile(profile_id)
girder_callback_info = {
'girder_api_url': cumulus.config.girder.baseUrl,
'girder_token': get_task_token()['_id']}
p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
aws_volume = p.get_volume(volume)
if aws_volume['state'] != VolumeState.AVAILABLE:
raise RestException(
'Volume must be in an "%s" status to be deleted'
% VolumeState.AVAILABLE, 400)
user = getCurrentUser()
cumulus.ansible.tasks.volume.delete_volume\
.delay(profile, self._model.filter(volume, user),
secret_key, log_write_url, girder_callback_info)
volume['status'] = VolumeState.DELETING
volume = self._model.update_volume(user, volume)
return self._model.filter(volume, user)
delete.description = (
Description('Delete a volume')
.param('id', 'The volume id.', paramType='path', required=True))
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def delete_complete(self, volume, params):
self._model.remove(volume)
delete_complete.description = None
@access.user
@loadmodel(model='volume', plugin='cumulus', level=AccessType.ADMIN)
def get_status(self, volume, params):
return {'status': volume['status']}
get_status.description = (
Description('Get the status | |
continue
if line.__contains__("Not owner") or \
line.__contains__(" Rewind of device 40") or \
line.__contains__("Stage failed, all retries exhausted") or \
line.__contains__("Request for locked or disabled device") or \
line.__contains__('Unexpected error in LTO Library') or \
line.__contains__('LTO I/O failure') or \
line.__contains__('"sendIOD"') or \
line.__contains__('hpss_RPCGetReply') or \
line.__contains__("gk_Cleanup") or \
line.__contains__("gk_Close failed") or \
line.__contains__('Invalid parameters passed to LTO Library') or \
line.__contains__('Open of device 4') or \
line.__contains__('pos failed on dev 4') or \
line.__contains__('Retrying stage from level') or \
line.__contains__('Cartridge not found in LTO library') or \
line.__contains__("Read of label on") or \
line.__contains__('Can not find the PVL') or \
line.__contains__('all retries exhausted') or \
line.__contains__('locked by non-HPSS') or \
line.__contains__('SCSI ') or \
line.__contains__('hpss_RPCSendRequest') or \
line.__contains__('Forward space file failed') or \
line.__contains__('Verification of label on dev') or \
line.__contains__('Open of device') or \
line.__contains__('No space left on device') or \
line.__contains__('Metadata manager error') or \
line.__contains__('Connection refused') or \
line.__contains__('Connection timed out') or \
line.__contains__('ACSLM spawned process') or \
line.__contains__('Cannot Establish Connection') or \
line.__contains__('VV metadata') or \
line.__contains__('Open of delog') or \
line.__contains__('database deadlock condition') or \
line.__contains__('to execute stateme') or \
line.__contains__('LOCKING the DRIVE') or \
line.__contains__('Returned, function') or\
line.__contains__('storage service start') or \
line.__contains__('Invalid session') or \
line.__contains__('repair to server') or \
line.__contains__('MM error'):
continue # minor
if line.__contains__(' WARN '):
# drive = "102100", arg = "XB063500"
#m = re_warning.match(line)
#if m:
#self.handle_warning(epoch, m.groups()[0], m.groups()[1][:6])
m2 = re_warn_drive.match(line)
if m2:
drive = m2.groups()[0]
self.handle_event(FSM_EVNT_FATALERROR_1, {'drive':drive,'epoch':epoch})
continue
m3 = re_warn_d1.match(line)
if m3:
cartridge_id = m3.groups()[1][:6]
drive = m3.groups()[0]
#self.handle_event(FSM_EVNT_FATALERROR_1, {'drive':drive, 'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__("will retry in another drive,"):
m = re_crtanddrv.match(line)
if m:
crt = m.groups()[0]
drv = m.groups()[1]
self.handle_event(FSM_EVNT_D2DMV, {'drive':drv, 'epoch':epoch, 'cid':crt})
continue
if line.__contains__('"read_label"') or \
line.__contains__('are disabled,') or \
line.__contains__("LOCKING the DRIVE will exit the dismount loop") or \
line.__contains__(' no response from robot') or \
line.__contains__('rtm_GetRequestEntries') or \
line.__contains__('NOT exist in DriveTable') or \
line.__contains__('cartridge = "MP') or \
line.__contains__('label written') or \
line.__contains__('Client Cancels All Jobs') or \
line.__contains__('Job recovered, di') or \
line.__contains__('hardware defined in HPSS does not exist') or \
line.__contains__('Dismount reason') or \
line.__contains__('Job not found in queue') or \
line.__contains__(' PVR) are disabled, arg = "MA') or \
line.__contains__('Cache Overflow') or \
line.__contains__('Cartridge has not been checked in') or \
line.__contains__('No drives of this type in') or \
line.__contains__('not found in LTO') or \
line.__contains__('Not enough drives of this type') or \
line.__contains__('Drive Notify failed') or \
line.__contains__('= "eject_cart"') or \
line.__contains__('information request failed') or \
line.__contains__(' STATUS_') or \
line.__contains__('Address types'):
continue #warn
if line.__contains__(' Dismount reason') and line.__contains__('drive = "4'):
continue
if line.__contains__(' EVNT '):
m1 = re_evnt_drive_enabl .match(line)
if m1:
drive = m1.groups()[0]
self.handle_event(FSM_EVNT_RECOVER_FAT1, {'drive':drive, 'epoch':epoch})
continue
if line.__contains__("Client logged ") or \
line.__contains__('Total Drive Count') or \
line.__contains__(' logfiles ') or \
line.__contains__('Storage map state') or \
line.__contains__("CONAN") or \
line.__contains__("erver 'Mover") or \
line.__contains__("'STK PVR'") or \
line.__contains__("Connection table full") or \
line.__contains__("Open files on connection shutdown") or \
line.__contains__("Repack Completed SClassId") or \
line.__contains__('robot is offline, drive = "0') or \
line.__contains__("Deferred state change") or \
line.__contains__("End of media on ") or \
line.__contains__('Reclaim completed for storage') or \
line.__contains__("Request w/o client") or \
line.__contains__("Exporting cartridge") or \
line.__contains__("Export of ") or \
(line.__contains__(", Disabled") and line.__contains__("dmin drive change") )or\
line.__contains__("av_Initialize") or \
line.__contains__("Mount failed, no drives") or \
line.__contains__("Import of cartridge ") or \
line.__contains__("STK volume ejects are done asynchronously") or \
line.__contains__("could not be mounted, Condition") or \
line.__contains__('Job not found in queue') or \
line.__contains__("Core Server shutting") or \
line.__contains__('All disk storage maps') or \
line.__contains__('SSMS0115') or \
line.__contains__('Core Server Shutdown Complete') or \
line.__contains__('Running with restricted') or \
line.__contains__('No initialization is necessary') or \
line.__contains__('Reissuing ') or \
line.__contains__(' in PVR') or \
line.__contains__('mm_ReadPVR') or \
line.__contains__('Ejecting cartridge=') or \
line.__contains__('Core Server startup') or \
line.__contains__('Starting server') or \
line.__contains__('been shutdown') or \
line.__contains__('Delog complete') or \
line.__contains__('Startup of server') or \
line.__contains__('core_SignalThread') or \
line.__contains__('has been renamed') or \
line.__contains__('abel written') or \
line.__contains__('CHECK_DISK_') or \
line.__contains__('Core Server Admin'):
continue #evnt
if line.__contains__(" ALRM "):
if line.__contains__(" Write request failed") or \
line.__contains__(" Read request failed") or \
line.__contains__('Data copy operation failed') or \
line.__contains__("Cannot lock VV cache record") or \
line.__contains__("Connection timed out") or\
line.__contains__("Not owner") or \
line.__contains__('No such file or ') or \
line.__contains__("HPSS system failure") or \
line.__contains__(" request descriptor table") or \
line.__contains__('Error creating credentials') or \
line.__contains__('File too large') or \
line.__contains__('hpss_FilesetGetAttributes') or \
line.__contains__('request threads busy') or \
line.__contains__('DB connection has been busy') or \
line.__contains__('Failed to get RTM records') or \
line.__contains__("Retrying read from level") or \
line.__contains__(" Rewind of device") or \
line.__contains__('PVR reports mounting a cartridge in a drive which') or \
line.__contains__('Request queue full') or \
line.__contains__('No space left on device') or \
line.__contains__('Internal software error') or \
line.__contains__('Unable to obtain the Fileset') or \
line.__contains__(' CAP priorit') or \
line.__contains__('Restricted User list') or \
line.__contains__('sending RPC reply') or \
line.__contains__('Error sending data') or \
line.__contains__('Deferred state change') or \
line.__contains__('rtm_Reconnect') or \
line.__contains__(' SAN3P ') or \
line.__contains__('Resource locked') or \
line.__contains__('missing mover error ') :
continue #alrm
if line.__contains__('Cartridge reported IN_TRANSIT'):
m = re_alrm_a.match(line)
if m:
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':m.groups()[0]})
continue
if line.__contains__(" NUNN "):
m = re_nunn_importerr.match(line)
if m:
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':m.groups()[0]})
continue
if line.__contains__('Error no owner found') or \
line.__contains__('on write()') or \
line.__contains__('SS and BFS '):
continue
if line.__contains__(" MAJR "):
if line.__contains__("Gatekeeper Server") or \
line.__contains__("RPC reply") or \
line.__contains__('hpss_ConnMgrGrabConn') or \
line.__contains__('died on host') or \
line.__contains__('not initialize socket') or \
line.__contains__('Error receiving data') or \
line.__contains__('rror obtaining transmit'):
continue
if line.__contains__("ECFS "):
if line.__contains__("CORE") or \
line.__contains__('MPS'):
continue
if line.__contains__('MARS '):
if line.__contains__('CORE') or \
line.__contains__('MPS') :
continue
if line.__contains__('ROOT' ):
if line.__contains__(' MPS') or \
line.__contains__(' CORE'):
continue
if line.__contains__(' HPSS '):
continue
if line.__contains__('Checking out cartridge') or \
line.__contains__('Shutdown of server') or \
line.__contains__('Tape aggregation') or \
line.__contains__('itfile') or \
line.__contains__('PVR reestablished') or \
line.__contains__('eer uuid') or \
line.__contains__('hpss_RPC') or \
line.__contains__('RPC runtime error') or \
line.__contains__('pvr_PVRSetAttrs') or \
line.__contains__("Gatekeeper") or \
line.__contains__("GateKeeper") or \
line.__contains__('Authentication') or \
line.__contains__('Bad connection handle') or \
line.__contains__("PVR 'STK PVR") or \
line.__contains__(' log files ') or \
line.__contains__(' Mover ') or \
line.__contains__('passive side of') or \
line.__contains__('einitialization ') or \
line.__contains__('hpss_prod') or \
line.__contains__('136.156.') or \
line.__contains__(' TRAC ') or \
line.__contains__('-mvr1') or \
line.__contains__('USERSPACE1') or \
line.__contains__('pvr_Check') or \
line.__contains__('hdrv01'):
continue
if line.__contains__('Failure'):
if line.__contains__('querying') or \
line.__contains__:
continue
print "unparsed line", line,
else:
pass
#if len(line)> 16:
# print "time did not match", line
except:
print "unknown", line
raise
sys.exit()
#print " file done"
#sys.exit()
def correlation(self):
#with Timer("Correlation-Finished:"):
def _exec_(csvfile, p):
c = corrPearson()
print datetime.datetime.now()
print csvfile
c.read(csvfile)
#while True:
#p = q.get()
if p in string.ascii_uppercase:
# p = 'Y'
# for p in reversed(string.ascii_uppercase):
# print p
fields = c.filter_fields(['%s.+'%p], [])
if len(fields) > 1:
print "Run for argument ", p
res = c.full_correlation_matrix(fields, "correlation_%s"%p)
c.jsondump("%s_proj_%s.json"%(csvfile, p),res)
#sorted_res = {}
#for x in res.keys():
# for y,v in res[x].items():
# if not v in sorted_res.keys():
# sorted_res[v[0]]=[]
# sorted_res[v[0]].append((x,y))
#for i in sorted(sorted_res.keys()):
# print i, sorted_res[i]
# else:
# break
a = 'cartridges_tmt_per_hour.csv'
csvfile = os.path.join(self.outputdir,a)
#for p in ['Z']:
for p in reversed(string.ascii_uppercase):
_exec_(csvfile,p)
gc.collect()
print "done"
def highestlevelstats(self):
res = {}
res3d = []
for drv in sorted(self.drv.keys()):
curmax = max(DRV_INT_MAP.values())
DRV_INT_MAP[drv] = curmax+1
for hid in sorted(self.hm.keys()):
curmax = max(HID_INT_MAP.values())
HID_INT_MAP[hid] = curmax+1
for hid, obj in self.hm.items():
c = obj.cost_drive()
for k,v in c.items():
if v[1]< 600:
res3d.append([HID_INT_MAP[hid],DRV_INT_MAP[k],v[1], v[0]])
latency = round(v[1] ,0)
ent = res.setdefault(latency, 0)
res[latency] = ent+1
filtered = {}
for i in range(0,300):
v = res.setdefault(i, 0)
filtered[i]=v
figp = os.path.join(self.outputdir, "drive_home_latency.png")
plot_dict(filtered, figp)
costhd = os.path.join(self.outputdir, "home_drive_costs.csv")
with open(costhd , | |
<gh_stars>1-10
#! /usr/bin/env python
#! /opt/casa/packages/RHEL7/release/current/bin/python
#
# AAP = Admit After Pipeline
#
# Example python script (and module) that for a given directory finds all ALMA pbcor.fits files
# and runs a suite of predefined ADMIT recipes on them, in a local directory named madmit_<YMD_HMS>
# It normally matches the pb.fits files, so ADMIT can work on noise flat image cubes.
#
# Notes:
# 1. this is still for old-style admit, not ADMIT3, but should port to python3 when ready
# 2. this does not encode the different tree view that is encoded in the old do_aap5 or runa1
# 3. it handles *.pb.fits as well as *.pb.fits.gz files that should mirror the *.pbcor.fits files
#
# SCRIPT usage
# aap.py -d dir1 [-c] [-n] [-r] [-s] [-v]
# -c check files to see if there are orphans we may not have encoded for ADMIT processing
# -n dry-run, prints out the commands as they would run (old style ADMIT)
# -r remove all CASA images/tables after the ADMIT run
# -s single mode, only one default run per image/cube
# -v verbose
#
# To use as a script, your shell environment must have 'casa' and CASA's 'python' in the $PATH,
# this normally takes two modifications, e.g.
# export PATH=$CASAROOT/bin:$CASAROOT/lib/casa/bin:$PATH
#
# MODULE usage
# import aap
# madmitname = aap.compute_admit(dirname)
#
# @todo ...
#
_version = "9-sep-2020 PJT"
import os, sys
import argparse as ap
import glob
import datetime
# decipher the python environment (yuck)
try:
import casa
print("Warning fake: still assuming classic ADMIT")
is_admit3 = False
except:
try:
import casatasks # pre-release now does this????
is_admit3 = True
print("Good fake news: running ADMIT3")
except:
print("Bad fake news: your python doesn't know casa or casatasks")
def version():
"""
identify yourself
"""
print("AAP Version %s" % _version)
def usage():
"""
command line helper
"""
print("Usage: %s -d DIR(s)")
print("For one or more DIR's find the pbcor.fits files that are needed for 'runa1' and 'runa2' type recipes in ADMIT")
sys.exit(0)
def splitall(path):
"""
Taken from https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def casa_cleanup(admitname):
"""
clean an admit directory of all CASA images
the method here needs to be certified by a CASA expert
"""
# @todo in Python3: from pathlib import Path
# this is only 3 levels deep, works for now
files = glob.glob("%s/*/table.info" % admitname) + glob.glob("%s/*/*/table.info" % admitname) + glob.glob("%s/*/*/*/table.info" % admitname)
for f in files:
dat = f.replace("table.info","")
cmd = "rm -rf %s" % dat
print("CLEANUP: %s" % cmd)
os.system(cmd)
def find_pbcor(dirname, mfs=False, cube=False, verbose=False):
"""
find the ALMA pbcor files in a directory.... since everything starts with a pbcor file.
We keep the MFS/CONT separate from CUBE, since they require different recipes.
"""
pbcor = []
if cube:
pbcor1a = glob.glob('%s/*.cube.*pbcor*fits' % dirname)
for p1 in pbcor1a:
if verbose:
print(p1)
pbcor.append(p1)
if mfs:
pbcor2a = glob.glob('%s/*.mfs.*pbcor*fits' % dirname)
pbcor2c = glob.glob('%s/*.cont.*pbcor*fits' % dirname)
for p2 in pbcor2a+pbcor2c:
if verbose:
print(p2)
pbcor.append(p2)
return pbcor
def runa1(pbcorname,pbname=None,label=None,apars=[],dryrun=False,cleanup=False):
"""
the runa1 recipe, with optional extra args
$ADMIT/admit/test/admit2.py --pb fname.pb.fits.gz --basename x --out fname.<label>.admit --apar fname.<label>.apar fname.pbcor.fits
e.g. runa1(fname, "native.5sigma", ["numsigma=5"])
runa1(fname, "binned16.3sigma", ["insmooth=-16","numsigma=5"])
"""
r = '$ADMIT/admit/test/admit1.py'
r = r + ' --pb %s' % pbname
r = r + ' --basename x'
if len(apars) > 0:
if label == None:
aparname = pbcorname + '.apar'
else:
aparname = pbcorname.replace('.fits','') + '.%s.apar' % label
if not dryrun:
fp = open(aparname,"w")
fp.write("# written by AAP\n")
for apar in apars:
fp.write("%s\n" % apar)
fp.close()
r = r + ' --apar %s' % aparname
if label == None:
outname = pbcorname.replace('.fits','') + ".admit"
else:
outname = pbcorname.replace('.fits','') + '.%s.admit' % label
r = r + ' --out %s' % outname
r = r + ' %s' % pbcorname
r = r + ' > %s.log 2>&1' % outname
print(r)
if not dryrun:
os.system(r)
if cleanup:
casa_cleanup(outname)
def runa2(pbcorname,pbname=None,label=None,apars=[],dryrun=False,cleanup=False):
"""
the runa2 recipe, with optional extra args
$ADMIT/admit/test/admit2.py --pb fname.pb.fits.gz --basename x --out fname.<label>.admit --apar fname.<label>.apar fname.pbcor.fits
e.g. runa1(fname, "native.5sigma", ["numsigma=5"])
runa1(fname, "binned16.3sigma", ["insmooth=-16","numsigma=5"])
pbcorname = basename.pbcor.fits
outname = basename.pbcor.admit or basename.pbcor.<label>.admit
aparname = basename.pbcor.fits.apar or basename.pbcor.<label>.apar
"""
r = '$ADMIT/admit/test/admit2.py'
r = r + ' --pb %s' % pbname
r = r + ' --basename x'
if len(apars) > 0:
if label == None:
aparname = pbcorname + '.apar'
else:
aparname = pbcorname.replace('.fits','') + '.%s.apar' % label
if not dryrun:
fp = open(aparname,"w")
fp.write("# written by AAP\n")
for apar in apars:
fp.write("%s\n" % apar)
fp.close()
r = r + ' --apar %s' % aparname
if label == None:
outname = pbcorname.replace('.fits','') + ".admit"
else:
outname = pbcorname.replace('.fits','') + '.%s.admit' % label
r = r + ' --out %s' % outname
r = r + ' %s' % pbcorname
r = r + ' > %s.log 2>&1' % outname
print(r)
if not dryrun:
os.system(r)
if cleanup:
casa_cleanup(outname)
def run_admit(recipe, pbcor, madmitname, dryrun=False, verbose=False, single=False, cleanup=False):
"""
based on a full pbcor file run an ADMIT recipe
"""
idx = pbcor.find('.pbcor.fits')
pb = glob.glob(pbcor[:idx] + '.pb.fits*')
if len(pb) == 0:
print("Warning: no matching pb found for %s" % pbcor)
return
pb = pb[0]
if verbose:
print(pbcor)
print(pb)
# pbcor and pb are filenames relative to the dirname
# e.g. PID/S/G/M/product/member.uid___A001_X133f_X1a2.Tile_004_SMC_SWBar_sci.spw22.cube.I.pbcor.fits
# product/member.uid___A001_X133f_X1a2.Tile_004_SMC_SWBar_sci.spw22.cube.I.pbcor.fits
pbname = splitall(pb)[-1]
d = splitall(pbcor)
pbcorname = d[-1]
pbcorpath = os.path.abspath(pbcor)
pbpath = os.path.abspath(pb)
pdir = '/'.join(d[:-1])
adir = '/'.join(d[:-2]) + '/admit'
adir = madmitname
if verbose:
print(adir)
# now some horrid file operations which can possible be done more efficiently if I had a better toolkit
cmd = 'mkdir -p %s' % adir
if not dryrun:
os.system(cmd)
cwd = os.getcwd()
os.chdir(adir)
os.system('ln -sf %s' % (pbcorpath))
os.system('ln -sf %s' % (pbpath))
if recipe == 'runa2':
os.system('listfitsa %s' % pbcorname)
if single:
runa2(pbcorname,pbname,dryrun=dryrun,cleanup=cleanup)
else:
# @todo add some smoothing? go from 5ppx to 10ppx ?
# @todo LGM's default is numsigma=6
runa2(pbcorname,pbname,"5sigma",["numsigma=5"],dryrun=dryrun,cleanup=cleanup)
runa2(pbcorname,pbname,"3sigma",["numsigma=3"],dryrun=dryrun,cleanup=cleanup)
elif recipe == 'runa1':
os.system('listfitsa %s' % pbcorname)
if single:
runa1(pbcorname,pbname,dryrun=dryrun,cleanup=cleanup)
else:
# @todo LineID's default is numsigma=5
#runa1(pbcorname,pbname,"native.5sigma",["numsigma=5"],dryrun=dryrun,cleanup=cleanup)
#runa1(pbcorname,pbname,"binned4.3sigma",["insmooth=[-4]","numsigma=3"],dryrun=dryrun,cleanup=cleanup)
runa1(pbcorname,pbname,"native.3sigma",["numsigma=3"],dryrun=dryrun,cleanup=cleanup)
runa1(pbcorname,pbname,"binned16.3sigma",["insmooth=[-16]","numsigma=3"],dryrun=dryrun,cleanup=cleanup)
if not dryrun:
os.chdir(cwd)
def alma_names(dirname):
"""
debugging: search and destroy what we know
"""
cwd = os.getcwd()
os.chdir(dirname)
files = glob.glob('*fits*')
pbcors = glob.glob('*.pbcor.fits')
pbcors.sort()
nfiles = len(files)
npbcors = len(pbcors)
print("Found %d pbcor in %d fits files" % (npbcors,nfiles))
for pbcor in pbcors:
pb = pbcor.replace('.pbcor.fits','.pb.fits.gz')
try:
i1=files.index(pb)
files.remove(pbcor)
files.remove(pb)
except:
print("missing %s" % pb)
mask = pb.replace('.pb.','.mask.')
try:
i1=files.index(mask)
files.remove(mask)
except:
print("missing %s" % mask)
for f in files:
print("orphan %s" % f)
if len(files)==0:
print("Hurray, no orphan files")
os.chdir(cwd)
def compute_admit(dirname, madmitname=None, verbose=False, dryrun=False, single=False, cleanup=False):
"""
do it all
"""
# @todo if dirname contains the whole P/S/G/M name, store that too
if madmitname == None:
prefix=dirname.split('/')
# try some unique name that name-completes but also parses fast by the human eye and filebrowsers
madmitname = os.path.abspath('./madmit_'+datetime.datetime.now().strftime('%Y%m%d_%H%M%S.%f'))
madmitname = os.path.abspath(prefix[-1]+"_"+prefix[-2]+"_"+datetime.datetime.now().strftime('%Y%m%d_%H%M%S.%f'))
print("MADMIT: %s" % madmitname)
# @todo only mfs and cube? what about cont ? or _ph and _pb
p1 = find_pbcor(dirname,cube=True, verbose=verbose)
print("Found %d cube pbcor fits files for ADMIT to process" % len(p1))
p2 = find_pbcor(dirname,mfs=True, verbose=verbose)
print("Found %d msf pbcor fits files for ADMIT to process" % len(p2))
if len(p1) + len(p2) == 0:
return None
# the cheap continuum maps
for p in p2:
run_admit('runa2', p, madmitname, verbose=verbose, dryrun=dryrun, single=single, cleanup=cleanup)
# the expensive cubes
for p in p1:
run_admit('runa1', p, madmitname, verbose=verbose, dryrun=dryrun, single=single, cleanup=cleanup)
return madmitname
if __name__ == "__main__":
parser = ap.ArgumentParser(description='AAP (ADMIT After Pipeline) processing - %s' % _version)
parser.add_argument('-d', '--dirname', nargs = 1, type = str, default = ['.'],
help = 'Name of the directory containing data')
parser.add_argument('-c', '--checknames', action="store_true", default = False,
help = 'Name Check on all fits files, report orphans')
parser.add_argument('-n', '--dryrun', action = "store_true", default = False,
help = 'Dryrun mode')
parser.add_argument('-r', '--cleanup', action = "store_true", default = False,
help | |
<filename>packages/Qpyl/qgeninp.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
"""
This module contains functions for generating FEP and
equilibration inputs for Qdyn.
Example of procedure files can be found in
qtools/template_examples/
"""
import sys
import os
import copy
import re
import time
import locale
import shutil
import tempfile
import random
import logging
from collections import OrderedDict as ODict
from Qpyl.core.qdyn import QDynInput, QDynInputError
from Qpyl.core.qstructure import QStruct, QStructError, find_placeholders
from Qpyl.common import __version__, raise_or_log
logger = logging.getLogger(__name__)
class QGenrelaxError(Exception):
pass
class QGenfepsError(Exception):
pass
# TODO: break these two functions into something digestable
def genrelax(relax_proc_file, outdir, restraint,
top_file=None, fep_file=None, runscript_file=None,
pdb_file=None, cont_file=None, ignore_errors=False):
"""Generates inputs for an MD simulation with Q (Qdyn).
Arguments:
relax_proc_file (string): genrelax procedure file pathname
outdir (string): output directory
restraint (string): restraint coordinate (a)
Optional arguments (b)
top_file (string): Q topology pathname
fep_file (string): fep file pathname
runscript_file (string): slurm/sge run script
pdb_file (string): pdb pathname (used to convert placeholders)
cont_file (string): pathname of previous Qdyn input (continuation)
ignore_errors (boolean): passed to QStruct and QDynInp - write to\
logger instead of raising exceptions on\
non-critical things
(a) Restraint coordinate can be set to:
'top' - topology
'cont_inp' - whatever is defined in cont_file
'cont_final' - endpoint of previous simulation\
(final restart of cont_file)
(b) top_file and cont_file are mutually exclusive, one of them has to\
be provided
"""
# check if files exist
for k, v in locals().iteritems():
if k in ["pdb_file", "cont_file", "relax_proc_file",
"fep_file", "top_file", "runscript_file", "relax_input"]:
if v and not os.path.lexists(v):
raise QGenrelaxError("File '{}' doesn't exist.".format(v))
if restraint not in ["top", "cont_inp", "cont_final"]:
raise QGenrelaxError("Argument 'restraint' has to be either "
"'cont_inp', 'top' or 'cont_final'")
# constants
PREFIX = "relax_"
DIR = os.path.join(os.getcwd(), outdir)
if os.path.lexists(DIR):
raise QGenrelaxError("Directory '{}' exists. Please (re)move it "
"or set 'outdir'.".format(DIR))
TMPDIR = tempfile.mkdtemp()
header_comment = """\
# Generated with QTools, version {}
# Date: {}
# CWD: {}
# Cmdline: {}
""".format(__version__, time.ctime(), os.getcwd(), " ".join(sys.argv))
# find and replace placeholders. if not PDB was given to replace them, exit
relax_proc_str = open(relax_proc_file, 'r').read()
c = find_placeholders(relax_proc_str)
if c and not pdb_file:
raise QGenrelaxError("Found placeholders in proc.file, but no PDB "
"was given: {}".format(", ".join(c)))
elif c:
logger.info("These placeholders will be replaced with atom indices: {}"
"".format(", ".join(c)))
try:
qstruct = QStruct(pdb_file, "pdb", ignore_errors=ignore_errors)
relax_proc_str = qstruct.convert_placeholders(relax_proc_str)
except QStructError as err_msg:
raise QGenrelaxError("Failed to replace placeholders: "
"{}".format(err_msg))
# get topology and fep and others from previous input if given (--cont)
if cont_file:
if top_file:
raise QGenrelaxError("'top_file' and 'cont_file' don't like each "
"other. Difficult to continue with a "
"different topology...")
try:
c = QDynInput(open(cont_file, 'r').read(),
ignore_errors=ignore_errors)
except QDynInputError as err_msg:
raise QGenrelaxError("There is something wrong with the given "
"input file ({}): {}".format(cont_file,
err_msg))
cont_files = c.parameters["files"]
di = os.path.dirname(cont_file)
top_fn = cont_files["topology"]
cont_re_fn = cont_files["final"]
re_fn = "cont_{}".format(cont_re_fn)
shutil.copy2(os.path.join(di, top_fn), TMPDIR)
shutil.copy2(os.path.join(di, cont_re_fn),
os.path.join(TMPDIR, re_fn))
if restraint == "cont_inp" and "restraint" in cont_files:
cont_rest_fn = cont_files["restraint"]
rest_fn = "cont_{}".format(cont_rest_fn)
elif restraint == "cont_final":
cont_rest_fn = cont_re_fn
rest_fn = "cont_{}.rest".format(cont_rest_fn)
else:
rest_fn = None
if rest_fn:
shutil.copy2(os.path.join(di, cont_rest_fn),
os.path.join(TMPDIR, rest_fn))
if fep_file:
logger.warning("Using the fep file '{}', instead of the one "
"found in the input".format(fep_file))
fep_fn = os.path.basename(fep_file)
shutil.copy2(fep_file, TMPDIR)
else:
try:
fep_fn = cont_files["fep"]
shutil.copy2(os.path.join(di, fep_fn), TMPDIR)
except KeyError:
logger.info("No FEP file found in the input")
# or take the arguments
else:
if not top_file:
raise QGenrelaxError("Please specify the topology file or "
"a previous input for a continuation run.")
cont_files = None
top_fn = os.path.basename(top_file)
shutil.copy2(top_file, TMPDIR)
try:
fep_fn = os.path.basename(fep_file)
shutil.copy2(fep_file, TMPDIR)
except AttributeError:
logger.info("NOTE: No FEP file!")
if restraint in ["cont_inp", "cont_final"]:
raise QGenrelaxError("Can't restrain to '{}'. Specify 'cont_file'."
"".format(restraint))
else:
rest_fn = None
logger.info("Restraining to: '{}'".format(rest_fn or 'topology'))
try:
shutil.copy2(runscript_file, TMPDIR)
except AttributeError:
logger.info("No submission script was given.")
general_inp = []
steps_inps = [[],]
script_vars = {}
section = ""
for line in relax_proc_str.split("\n"):
# remove comments and strip whitespaces.
line = re.split("#|\!", line)[0].strip()
# empty lines are useless
if line == "":
continue
# found a section
if line[0] == "{":
section = line.strip("{}").lower()
continue
if not section:
raise QGenrelaxError("Failed to parse '{}'... this line - '{}' "
"is not inside any section:"
"".format(relax_proc_file, line))
if section == "script_vars":
c = line.split()
var = c[0]
value = " ".join(c[1:])
script_vars[var] = value
elif section == "general":
general_inp.append(line)
elif section == "steps":
if "__________" in line:
steps_inps.append([])
else:
steps_inps[-1].append(line)
if "fep_fn" in locals():
# find and replace atom placeholders in FEP file
# if no PDB was given to replace them, exit
fep_tmp = os.path.join(TMPDIR, fep_fn)
fep_file_str = open(fep_tmp, 'r').read()
c = find_placeholders(fep_file_str)
if c and not pdb_file:
raise QGenfepsError("Found placeholders in FEP file, but no "
"PDB was given: {}".format(", ".join(c)))
elif c:
logger.info("Replacing FEP file placeholders...")
try:
qstruct = QStruct(pdb_file, "pdb", ignore_errors=ignore_errors)
fep_file_str = qstruct.convert_placeholders(fep_file_str)
except QStructError as err_msg:
raise QGenfepsError("Failed to replace placeholders: {}"
"".format(err_msg))
else:
open(fep_tmp, 'w').write(fep_file_str)
# check for steps with no parameters
# (too many _________ lines)and remove them
for i in range(len(steps_inps)-1, -1, -1):
if not steps_inps[i]:
steps_inps.pop(i)
# join lists of lines to strings and replace the placeholders
gen_inp_s = "\n".join(general_inp)
for placeholder, value in script_vars.iteritems():
gen_inp_s = gen_inp_s.replace(placeholder, value)
step_inps_s = []
for i, step_inp in enumerate(steps_inps):
s = "\n".join(step_inp)
for placeholder, value in script_vars.iteritems():
s = s.replace(placeholder, value)
step_inps_s.append(s)
# make and save the inputs
steps = []
overridden_prms_all = []
step_n = 1
inp_fns = [] # to store the filenames and use the return value
for step_inp_s in step_inps_s:
# create the files section
final = "{}{:03d}.re".format(PREFIX, step_n)
dcd = "{}{:03d}.dcd".format(PREFIX, step_n)
files = {"final" : final,
"trajectory" : dcd,
"topology" : top_fn}
try:
files["fep"] = fep_fn
except NameError:
pass
if step_n != 1:
prev_step = step_n - 1
files["restart"] = "{}{:03d}.re".format(PREFIX, prev_step)
elif cont_files:
files["restart"] = re_fn
if rest_fn != None:
files["restraint"] = rest_fn
try:
# parse the general input
inp = QDynInput(gen_inp_s, ignore_errors=ignore_errors)
# update the general parameters with step input, printout the
# overriden parms, update the files section
overridden_prms = inp.update(step_inp_s)
if overridden_prms:
overridden_prms_all.append((step_n, ", ".join(
["{}:{}->{}".format(key, value_old, value_new) \
for key, (value_old, value_new) in \
overridden_prms.iteritems()])))
if "energy" in inp.parameters["intervals"]:
files["energy"] = "{}{:03d}.en".format(PREFIX, step_n)
inp.update(parameters={"files": files})
except QDynInputError as err_msg:
raise QGenrelaxError("Problem with step no. {}: {}"
"".format(step_n, err_msg))
# set the random seed
mdp = inp.parameters["md"]
if "random_seed" in mdp and int(mdp["random_seed"]) < 1:
rs = random.randint(1, 1000000)
inp.update(parameters={"md": {"random_seed": rs}})
logger.info("Generated random seed in step {}: {}"
"".format(step_n, rs))
# get the input string
try:
inpstr = inp.get_string()
except QDynInputError as err_msg:
raise QGenrelaxError("Error in step {}: {}"
"".format(step_n, err_msg))
inpfn = "{}{:03d}.inp".format(PREFIX, step_n)
inp_fns.append(os.path.join(DIR, inpfn))
s = header_comment + inpstr
open(os.path.join(TMPDIR, inpfn), 'w').write(s)
steps.append(inp)
step_n += 1
try:
shutil.copytree(TMPDIR, DIR)
except OSError:
raise QGenrelaxError("Cannot create directory '{}'.".format(DIR))
# remove temporary directory
shutil.rmtree(TMPDIR)
logger.info("Created inputs {}{:03d}.inp - {}{:03d}.inp"
"".format(PREFIX, 1, PREFIX, len(steps)))
# print some useful information
if overridden_prms_all:
logger.info("Overridden parameters:")
for step_n, op in overridden_prms_all:
logger.info("{}: {}".format(step_n, op))
summary = """
Quick summary
{0:<10} {1:>5} {2:>10} | |
the cached list of resolutions. In the unlikely
case that your device driver is misconfigured and there is no active
resolution, this returns the first resolution."""
for resolution in self._resolutions:
if resolution.is_active:
return resolution
print("No active resolution. Please report this bug to the libratbag developers", file=sys.stderr)
return self._resolutions[0]
@property
def buttons(self):
"""A list of RatbagdButton objects with this profile's button mappings.
Note that the list of buttons differs between profiles but the number
of buttons is identical across profiles."""
return self._buttons
@property
def leds(self):
"""A list of RatbagdLed objects with this profile's leds. Note that the
list of leds differs between profiles but the number of leds is
identical across profiles."""
return self._leds
@property
def is_active(self):
"""Returns True if the profile is currenly active, false otherwise."""
return libratbag.ratbag_profile_is_active(self._profile)
def set_active(self):
"""Set this profile to be the active profile."""
libratbag.ratbag_profile_set_active(self._profile)
class RatbagdResolution(metaclass=MetaRatbag):
"""Represents a ratbagd resolution."""
_PREFIX = "RATBAG_RESOLUTION_"
def __init__(self, profile, id):
self._id = id
self._res = libratbag.ratbag_profile_get_resolution(profile, id)
self._capabilities = get_capabilities("resolution", self._res)
def __exit__(self):
libratbag.ratbag_resolution_unref(self._res)
@property
def index(self):
"""The index of this resolution."""
return self._id
@property
def capabilities(self):
"""The capabilities of this resolution as a list. Capabilities not
present on the resolution are not in the list. Thus use e.g.
if RatbagdResolution.CAP_SEPARATE_XY_RESOLUTION is in resolution.capabilities:
do something
"""
return self._capabilities
@property
def resolution(self):
"""The tuple (xres, yres) with each resolution in DPI."""
dpi_y = dpi_x = libratbag.ratbag_resolution_get_dpi_x(self._res)
if libratbag.RATBAG_RESOLUTION_CAP_SEPARATE_XY_RESOLUTION in self._capabilities:
dpi_y = libratbag.ratbag_resolution_get_dpi_y(self._res)
return (dpi_x, dpi_y)
@resolution.setter
def resolution(self, res):
"""Set the x- and y-resolution using the given (xres, yres) tuple.
@param res The new resolution, as (int, int)
"""
if libratbag.RATBAG_RESOLUTION_CAP_SEPARATE_XY_RESOLUTION in self._capabilities:
libratbag.ratbag_resolution_set_dpi_xy(self._res, *res)
else:
libratbag.ratbag_resolution_set_dpi(self._res, res[0])
@property
def report_rate(self):
"""The report rate in Hz."""
return libratbag.ratbag_resolution_get_report_rate(self._res)
@report_rate.setter
def report_rate(self, rate):
"""Set the report rate in Hz.
@param rate The new report rate, as int
"""
libratbag.ratbag_resolution_set_report_rate(self._res, rate)
@property
def resolutions(self):
"""The list of supported DPI values"""
dpis = [0 for i in range(300)]
n = libratbag.ratbag_resolution_get_dpi_list(self._res, dpis)
return dpis[:n]
@property
def report_rates(self):
"""The list of supported report rates"""
rates = [0 for i in range(300)]
n = libratbag.ratbag_resolution_get_report_rate_list(self._res, rates)
return rates[:n]
@property
def is_active(self):
"""True if this is the currently active resolution, False
otherwise"""
return libratbag.ratbag_resolution_is_active(self._res)
@property
def is_default(self):
"""True if this is the currently default resolution, False
otherwise"""
return libratbag.ratbag_resolution_is_default(self._res)
def set_default(self):
"""Set this resolution to be the default."""
return libratbag.ratbag_resolution_set_default(self._res)
def set_active(self):
"""Set this resolution to be the active one."""
return libratbag.ratbag_resolution_set_active(self._res)
class RatbagdButton(metaclass=MetaRatbag):
"""Represents a ratbagd button."""
_PREFIX = "RATBAG_BUTTON_"
MACRO_KEY_PRESS = libratbag.RATBAG_MACRO_EVENT_KEY_PRESSED
MACRO_KEY_RELEASE = libratbag.RATBAG_MACRO_EVENT_KEY_RELEASED
MACRO_WAIT = libratbag.RATBAG_MACRO_EVENT_WAIT
"""A table mapping a button's index to its usual function as defined by X
and the common desktop environments."""
BUTTON_DESCRIPTION = {
0: N_("Left mouse button click"),
1: N_("Right mouse button click"),
2: N_("Middle mouse button click"),
3: N_("Backward"),
4: N_("Forward"),
}
"""A table mapping a special function to its human-readable description."""
SPECIAL_DESCRIPTION = {}
@classmethod
def __late_init__(cls):
cls.SPECIAL_DESCRIPTION = {
cls.ACTION_SPECIAL_UNKNOWN: N_("Unknown"),
cls.ACTION_SPECIAL_DOUBLECLICK: N_("Doubleclick"),
cls.ACTION_SPECIAL_WHEEL_LEFT: N_("Wheel Left"),
cls.ACTION_SPECIAL_WHEEL_RIGHT: N_("Wheel Right"),
cls.ACTION_SPECIAL_WHEEL_UP: N_("Wheel Up"),
cls.ACTION_SPECIAL_WHEEL_DOWN: N_("Wheel Down"),
cls.ACTION_SPECIAL_RATCHET_MODE_SWITCH: N_("Ratchet Mode"),
cls.ACTION_SPECIAL_RESOLUTION_CYCLE_UP: N_("Cycle Resolution Up"),
cls.ACTION_SPECIAL_RESOLUTION_CYCLE_DOWN: N_("Cycle Resolution Down"),
cls.ACTION_SPECIAL_RESOLUTION_UP: N_("Resolution Up"),
cls.ACTION_SPECIAL_RESOLUTION_DOWN: N_("Resolution Down"),
cls.ACTION_SPECIAL_RESOLUTION_ALTERNATE: N_("Resolution Switch"),
cls.ACTION_SPECIAL_RESOLUTION_DEFAULT: N_("Default Resolution"),
cls.ACTION_SPECIAL_PROFILE_CYCLE_UP: N_("Cycle Profile Up"),
cls.ACTION_SPECIAL_PROFILE_CYCLE_DOWN: N_("Cycle Profile Down"),
cls.ACTION_SPECIAL_PROFILE_UP: N_("Profile Up"),
cls.ACTION_SPECIAL_PROFILE_DOWN: N_("Profile Down"),
cls.ACTION_SPECIAL_SECOND_MODE: N_("Second Mode"),
cls.ACTION_SPECIAL_BATTERY_LEVEL: N_("Battery Level"),
}
def __init__(self, profile, id):
self._id = id
self._button = libratbag.ratbag_profile_get_button(profile, id)
self._capabilities = get_capabilities("button", self._button)
def __exit__(self):
libratbag.ratbag_button_unref(self._button)
@property
def index(self):
"""The index of this button."""
return self._id
@property
def type(self):
"""An enum describing this button's type."""
return libratbag.ratbag_button_get_type(self._button)
@property
def mapping(self):
"""An integer of the current button mapping, if mapping to a button."""
return libratbag.ratbag_button_get_button(self._button)
@mapping.setter
def mapping(self, button):
"""Set the button mapping to the given button.
@param button The button to map to, as int
"""
libratbag.ratbag_button_set_button(self._button, button)
@property
def macro(self):
"""A RatbagdMacro object representing the currently set macro."""
return RatbagdMacro.from_ratbag(libratbag.ratbag_button_get_macro(self._button))
@macro.setter
def macro(self, macro):
"""Set the macro to the macro represented by the given RatbagdMacro
object.
@param macro A RatbagdMacro object representing the macro to apply to
the button, as RatbagdMacro.
"""
macro_object = libratbag.ratbag_button_macro_new("macro")
i = 0
for type, value in macro.keys:
libratbag.ratbag_button_macro_set_event(macro_object, i, type, value)
i += 1
libratbag.ratbag_button_set_macro(self._button, macro_object)
libratbag.ratbag_button_macro_unref(macro_object)
@property
def special(self):
"""An enum describing the current special mapping, if mapped to special."""
return libratbag.ratbag_button_get_special(self._button)
@special.setter
def special(self, special):
"""Set the button mapping to the given special entry.
@param special The special entry, as one of RatbagdButton.ACTION_SPECIAL_*
"""
libratbag.ratbag_button_set_special(self._button, special)
@property
def action_type(self):
"""An enum describing the action type of the button. One of
ACTION_TYPE_NONE, ACTION_TYPE_BUTTON, ACTION_TYPE_SPECIAL,
ACTION_TYPE_MACRO. This decides which
*Mapping property has a value.
"""
return libratbag.ratbag_button_get_action_type(self._button)
@property
def action_types(self):
"""An array of possible values for ActionType."""
return [t for t in (RatbagdButton.ACTION_TYPE_BUTTON, RatbagdButton.ACTION_TYPE_SPECIAL, RatbagdButton.ACTION_TYPE_MACRO)
if libratbag.ratbag_button_has_action_type(self._button, t)]
def disable(self):
"""Disables this button."""
return libratbag.ratbag_button_disable(self._button)
class RatbagdMacro(metaclass=MetaRatbag):
"""Represents a button macro. Note that it uses keycodes as defined by
linux/input.h and not those used by X.Org or any other higher layer such as
Gdk."""
# All keys from ecodes.KEY have a KEY_ prefix. We strip it.
_PREFIX_LEN = len("KEY_")
# Both a key press and release.
_MACRO_KEY = 1000
_MACRO_DESCRIPTION = {
RatbagdButton.MACRO_KEY_PRESS: lambda key:
"↓{}".format(ecodes.KEY[key][RatbagdMacro._PREFIX_LEN:]),
RatbagdButton.MACRO_KEY_RELEASE: lambda key:
"↑{}".format(ecodes.KEY[key][RatbagdMacro._PREFIX_LEN:]),
RatbagdButton.MACRO_WAIT: lambda val:
"{}ms".format(val),
_MACRO_KEY: lambda key:
"↕{}".format(ecodes.KEY[key][RatbagdMacro._PREFIX_LEN:]),
}
def __init__(self):
self._macro = []
def __str__(self):
if not self._macro:
return "None"
keys = []
idx = 0
while idx < len(self._macro):
t, v = self._macro[idx]
try:
if t == RatbagdButton.MACRO_KEY_PRESS:
# Check for a paired press/release event
t2, v2 = self._macro[idx + 1]
if t2 == RatbagdButton.MACRO_KEY_RELEASE and v == v2:
t = self._MACRO_KEY
idx += 1
except IndexError:
pass
keys.append(self._MACRO_DESCRIPTION[t](v))
idx += 1
return " ".join(keys)
@property
def keys(self):
"""A list of (RatbagdButton.MACRO_*, value) tuples representing the
current macro."""
return self._macro
@staticmethod
def from_ratbag(macro_object):
"""Instantiates a new RatbagdMacro instance from the given macro in
libratbag format.
@param macro The macro in libratbag format, as
[(RatbagdButton.MACRO_*, value)].
"""
ratbagd_macro = RatbagdMacro()
for i in range(libratbag.ratbag_button_macro_get_num_events(macro_object)):
type = libratbag.ratbag_button_macro_get_event_type(macro_object, i)
value = None
if type == RatbagdButton.MACRO_WAIT:
value = libratbag.ratbag_button_macro_get_event_timeout(macro_object, i)
else:
value = libratbag.ratbag_button_macro_get_event_key(macro_object, i)
ratbagd_macro.append(type, value)
return ratbagd_macro
def accept(self):
"""Applies the currently cached macro."""
self.emit("macro-set")
def append(self, type, value):
"""Appends the given event to the current macro.
@param type The type of event, as one of RatbagdButton.MACRO_*.
@param value If the type denotes a key event, the X.Org or Gdk keycode
of the event, as int. Otherwise, the value of the timeout
in milliseconds, as int.
"""
# Only append if the entry isn't identical to the last one, as we cannot
# e.g. have two identical key presses in a row.
if len(self._macro) == 0 or (type, value) != self._macro[-1]:
self._macro.append((type, value))
self.notify("keys")
class RatbagdLed(metaclass=MetaRatbag):
"""Represents a ratbagd led."""
_PREFIX = "RATBAG_LED_"
MODE_OFF = libratbag.RATBAG_LED_OFF
MODE_ON = libratbag.RATBAG_LED_ON
MODE_CYCLE = libratbag.RATBAG_LED_CYCLE
MODE_BREATHING = libratbag.RATBAG_LED_BREATHING
LED_DESCRIPTION = {
# Translators: the LED is off.
MODE_OFF: N_("Off"),
# Translators: the LED has a single, solid color.
MODE_ON: N_("Solid"),
# Translators: the LED is cycling between red, green and blue.
MODE_CYCLE: N_("Cycle"),
# Translators: the LED's is pulsating a single color on different
# brightnesses.
MODE_BREATHING: N_("Breathing"),
}
def __init__(self, profile, id):
self._id = id
self._led = libratbag.ratbag_profile_get_led(profile, id)
self._capabilities = get_capabilities("led", self._led)
def __exit__(self):
libratbag.ratbag_led_unref(self._led)
@property
def index(self):
"""The index of this led."""
return self._id
@property
def mode(self):
"""This led's mode, one of MODE_OFF, MODE_ON, MODE_CYCLE and
MODE_BREATHING."""
return libratbag.ratbag_led_get_mode(self._led)
@mode.setter
def mode(self, mode):
"""Set the led's mode to the given mode.
@param mode The new mode, as one of MODE_OFF, MODE_ON, MODE_CYCLE and
MODE_BREATHING.
"""
libratbag.ratbag_led_set_mode(self._led, mode)
@property
def type(self):
"""An enum describing this led's type, one of RatbagdLed.TYPE_UNKNOWN,
RatbagdLed.TYPE_LOGO or RatbagdLed.TYPE_SIDE."""
return libratbag.ratbag_led_get_type(self._led)
@property
def color(self):
"""An integer triple of the current LED color."""
c = libratbag.ratbag_led_get_color(self._led)
return (c.red, c.green, c.blue)
@color.setter
def color(self, color):
"""Set the led color to the given color.
@param | |
range implementation
if self.isVisible():
plot = self.getPlot()
if plot is not None:
plot._invalidateDataRange()
def getRgbaImageData(self, copy: bool = True):
"""Get the displayed RGB(A) image
:returns: Array of uint8 of shape (height, width, 4)
:rtype: numpy.ndarray
"""
return self.getColormap().applyToData(self)
def getData(self, copy: bool = True):
"""Returns the image data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
slicing = [slice(None)] * 3
slicing[self.getSliceAxis()] = self.getSliceIndex()
return numpy.array(self.getStackData(copy=False)[tuple(slicing)], copy=copy)
def setData(self, data, copy: bool = True):
data = numpy.array(data, copy=False)
if data.ndim == 2: # Make it a 3D stack
data.shape = (1,) + data.shape
self.setStackData(data, copy)
def getStackData(self, copy: bool = True):
"""Returns the image stack data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
return self.getScenePrimitive().getData(copy=copy)
def setStackData(self, data, copy: bool = True):
""" "Set the image stack data
:param numpy.ndarray data:
Data array with 3 dimensions (depth, height, width)
:param bool copy: True (Default) to make a copy,
False to use as is (do not modify!)
"""
data = numpy.array(data, copy=copy)
assert data.ndim == 3
if data.dtype.kind == "b":
_logger.warning("Converting boolean image to int8 to plot it.")
data = numpy.array(data, copy=False, dtype=numpy.int8)
elif numpy.iscomplexobj(data):
_logger.warning("Converting complex image to absolute value to plot it.")
data = numpy.absolute(data)
# TODO cast to valid type (u)int8|16 or float32
previousShape = self.getStackData(copy=False).shape
self.getScenePrimitive().setData(data, copy=False)
if previousShape != data.shape:
self._invalidateDataRange()
self._updated(items.ItemChangedType.DATA)
def __validSliceIndex(self, index: int, axis: int) -> int:
"""Returns a valid slice index for given axis and current data."""
length = self.getStackData(copy=False).shape[axis]
if index < 0: # Handle negative index
index += length
index = numpy.clip(index, 0, length - 1)
return index
def setSlice(self, index: int, axis: int) -> None:
"""Set both the slice index and dimension index at once.
:param int index: Slice index
:param int axis: Dimension index to slice
"""
assert 0 <= axis <= 2
index = self.__validSliceIndex(index, axis)
if index != self.__index or axis != self.__axis:
self.__index = index
if axis != self.__axis:
self.__axis = axis
self._invalidateDataRange()
self._updated(items.ItemChangedType.DATA)
def getSliceIndex(self) -> int:
"""Returns slice index.
:rtype: int
"""
return self.__index
def setSliceIndex(self, index: int) -> None:
"""Set the slice index.
Negative index are converted to positive ones.
Index is clipped to the stack shape.
:param int index: The index of the slice.
"""
index = self.__validSliceIndex(index, self.getSliceAxis())
if index != self.__index:
self.__index = index
self._updated(items.ItemChangedType.DATA)
def getSliceAxis(self) -> int:
"""Returns slice dimension index in [0, 2].
:rtype: int
"""
return self.__axis
def setSliceAxis(self, axis: int) -> None:
"""Set the slice dimension index in [0, 2].
:param int index: The index of the slice.
"""
assert 0 <= axis <= 2
if axis != self.__axis:
self.__axis = axis
self._invalidateDataRange()
self._updated(items.ItemChangedType.DATA)
class DataTexture(Texture):
"""Texture keeping a CPU memory copy of the data"""
def __init__(
self,
internalFormat,
data,
format_=None,
texUnit=0,
minFilter=None,
magFilter=None,
wrap=None,
):
self.__data = numpy.array(data, copy=False)
super().__init__(
internalFormat,
self.__data,
format_,
None,
texUnit,
minFilter,
magFilter,
wrap,
)
def getData(self, copy=True):
"""Returns the image data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
return numpy.array(self.__data, copy=copy)
def update(self, format_, data, offset=(0, 0, 0), copy=True):
data = numpy.array(data, copy=False)
oz, oy, oz = offset
depth, height, width = data.shape
self.__data[oz : oz + depth, oy : oy + height, ox : ox + width] = data
super().update(format_, data, offset, copy)
class ColormapTexturedMesh3D(primitives.Geometry):
"""A 3D mesh with color from a 3D texture, no lighting."""
_shaders = (
"""
attribute vec3 position;
attribute vec3 texcoord;
uniform mat4 matrix;
uniform mat4 transformMat;
varying vec4 vCameraPosition;
varying vec3 vTexCoord;
void main(void)
{
vCameraPosition = transformMat * vec4(position, 1.0);
vTexCoord = texcoord;
gl_Position = matrix * vec4(position, 1.0);
}
""",
string.Template(
"""
varying vec4 vCameraPosition;
varying vec3 vTexCoord;
uniform sampler3D data;
uniform float alpha;
$colormapDecl
$sceneDecl
void main(void)
{
$scenePreCall(vCameraPosition);
float value = texture3D(data, vTexCoord).r;
gl_FragColor = $colormapCall(value);
gl_FragColor.a *= alpha;
$scenePostCall(vCameraPosition);
}
"""
),
)
def __init__(
self, position, texcoord, texture, mode="triangles", indices=None, colormap=None
):
assert mode in self._TRIANGLE_MODES
assert texture is None or isinstance(texture, Texture)
self._alpha = 1.0
self._colormap = colormap or function.Colormap() # Default colormap
self._colormap.addListener(self._cmapChanged)
self._texturesToDiscard = []
self._texture = texture
super().__init__(mode, indices, position=position, texcoord=texcoord)
@property
def texture(self):
"""Texture storing the data"""
return self._texture
@texture.setter
def texture(self, texture):
if self._texture is not None:
self._texturesToDiscard.append(self._texture)
self._texture = texture
@property
def alpha(self):
"""Transparency of the plane, float in [0, 1]"""
return self._alpha
@alpha.setter
def alpha(self, alpha):
self._alpha = float(alpha)
self.notify()
@property
def colormap(self):
"""The colormap used by this primitive"""
return self._colormap
def _cmapChanged(self, source, *args, **kwargs):
"""Broadcast colormap changes"""
self.notify(*args, **kwargs)
def getData(self, copy: bool = True):
"""Returns the image data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
return self.texture.getData(copy=copy)
def prepareGL2(self, ctx):
while self._texturesToDiscard:
self._texturesToDiscard.pop(0).discard()
if self.texture is not None:
self.texture.prepare()
super().prepareGL2(ctx)
def renderGL2(self, ctx):
if self.texture is None:
return
fragment = self._shaders[1].substitute(
sceneDecl=ctx.fragDecl,
scenePreCall=ctx.fragCallPre,
scenePostCall=ctx.fragCallPost,
colormapDecl=self.colormap.decl,
colormapCall=self.colormap.call,
)
program = ctx.glCtx.prog(self._shaders[0], fragment)
program.use()
self.colormap.setupProgram(ctx, program)
program.setUniformMatrix("matrix", ctx.objectToNDC.matrix)
program.setUniformMatrix("transformMat", ctx.objectToCamera.matrix, safe=True)
gl.glUniform1f(program.uniforms["alpha"], self._alpha)
gl.glUniform1i(program.uniforms["data"], self.texture.texUnit)
ctx.setupProgram(program)
with self.texture:
self._draw(program)
class ImageStackSlice(ColormapTexturedMesh3D):
"""Display an image corresponding to a slice of a stack"""
def __init__(self):
self.__axis = 0
self.__index = 0
super().__init__(
position=numpy.zeros((4, 3), dtype=numpy.float32),
texcoord=numpy.zeros((4, 3), dtype=numpy.float32),
texture=None,
mode="triangle_strip",
)
def __updatePrimitive(self):
"""Update the vertices and tex coords"""
if self.texture is None:
return
shape = self.getStackData(copy=False).shape
axis = self.getSliceAxis()
unitsquare = numpy.array(
[(0.0, 0.0, 0.0), (0.0, 1.0, 0.0), (1.0, 0.0, 0.0), (1.0, 1.0, 0.0)],
dtype=numpy.float32,
)
size = list(reversed(shape))
size.pop(2 - axis)
vertices = unitsquare[:, :2] * size
self.setAttribute("position", vertices, copy=False)
texcoord = numpy.array(unitsquare, copy=True)
texcoord[:, -1] = (self.getSliceIndex() + 0.5) / shape[axis]
texcoord = numpy.roll(texcoord, axis=1, shift=-axis)
self.setAttribute("texcoord", texcoord, copy=False)
def getStackData(self, copy: bool = True):
"""Returns the stack data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
return super().getData(copy=copy)
def getData(self, copy: bool = True):
"""Returns the image data
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
slicing = [slice(None)] * 3
slicing[self.getSliceAxis()] = self.getSliceIndex()
return numpy.array(self.getStackData(copy=False)[tuple(slicing)], copy=copy)
def getSliceIndex(self) -> int:
"""Returns slice index.
:rtype: int
"""
return self.__index
def __validSliceIndex(self, index: int, axis: int) -> int:
"""Returns a valid slice index for given axis and current data."""
length = self.getData(copy=False).shape[axis]
if index < 0: # Handle negative index
index += length
return numpy.clip(index, 0, length - 1)
def setSliceIndex(self, index: int) -> None:
"""Set the slice index.
Negative index are converted to positive ones.
Index is clipped to the stack shape.
:param int index: The index of the slice.
"""
index = self.__validSliceIndex(index, self.getSliceAxis())
if index != self.__index:
self.__index = index
self.__updatePrimitive()
def getSliceAxis(self) -> int:
"""Returns slice dimension index in [0, 2].
:rtype: int
"""
return self.__axis
def setSliceAxis(self, axis: int) -> None:
"""Set the slice dimension index in [0, 2].
:param int index: The index of the slice.
"""
assert 0 <= axis <= 2
if axis != self.__axis:
self.__axis = axis
self.__updatePrimitive()
class GLImageStack(GLImage):
"""Data image PlotWidget item based on plot3d.scene"""
# TODO origin and scale taking 3 values
def __init__(self):
GLImage.__init__(self)
self.__index = 0
self.__axis = 0
self.__texture = None
def _initPrimitive(self):
return ColormapTexturedMesh3D(
position=numpy.zeros((4, 3), dtype=numpy.float32),
texcoord=numpy.zeros((4, 3), dtype=numpy.float32),
texture=None,
mode="triangle_strip",
)
def __updatePrimitive(self):
"""Update the vertices and tex coords"""
if self.__texture is None:
return
shape = self.__texture.getData(copy=False).shape
axis = self.getSliceAxis()
mesh = self.getScenePrimitive()
unitsquare = numpy.array(
[(0.0, 0.0, 0.0), (0.0, 1.0, 0.0), (1.0, 0.0, 0.0), (1.0, 1.0, 0.0)],
dtype=numpy.float32,
)
size = list(reversed(shape))
size.pop(2 - axis)
vertices = unitsquare[:, :2] * size
mesh.setAttribute("position", vertices, copy=False)
texcoord = numpy.array(unitsquare, copy=True)
texcoord[:, -1] = (self.getSliceIndex() + 0.5) / shape[axis]
texcoord = numpy.roll(texcoord, axis=1, shift=-axis)
mesh.setAttribute("texcoord", texcoord, copy=False)
def _updated(self, event=None, checkVisibility: bool = True):
if event == items.ItemChangedType.DATA:
self.__updatePrimitive()
self._setColormappedData(self.getData(copy=False), | |
<reponame>ihgazni2/navegador5
import urllib.parse
import os
import re
from xdict import utils
import elist.elist as elel
def get_origin(url):
rslt = urllib.parse.urlparse(url)
origin = rslt.scheme +'://'+rslt.netloc
return(origin)
def get_base_url(url):
temp = urllib.parse.urlparse(url)
netloc = temp.netloc
scheme = temp.scheme
base_url = ''.join((scheme,'://',netloc))
return(base_url)
def trim_after_netloc(url):
temp = urllib.parse.urlparse(url)
netloc = temp.netloc
scheme = temp.scheme
base_url = ''.join((scheme,'://',netloc))
return(base_url)
def get_path_arr(url):
'''
url = "http://baidu.com/a/b/c/d.html;p1;p2?q=a#frag"
>>> url = "http://baidu.com/a/b/c/d.html;p1;p2?q=a#frag"
>>>
>>> parr = get_path_arr(url)
>>> pobj(parr)
[
'http://baidu.com/a',
'http://baidu.com/a/b',
'http://baidu.com/a/b/c',
'http://baidu.com/a/b/c/d.html'
]
'''
temp = urllib.parse.urlparse(url)
netloc = temp.netloc
scheme = temp.scheme
path = temp.path
parent = ''.join((scheme,'://',netloc))
path = utils.str_lstrip(path,'/',1)
paths = path.split('/')
rslt = []
length = paths.__len__()
for i in range(0,length):
fp = parent + '/' + paths[i]
rslt.append(fp)
parent = fp
return(rslt)
def trim_after_parent(url):
'''
url = "http://baidu.com/a/b/c/d.html;p1;p2?q=a#frag"
'''
rslt = get_path_arr(url)[-2]
return(rslt)
def trim_after_ancestor(url,which):
rslt = get_path_arr(url)[which]
return(rslt)
def trim_after_path(url):
temp = urllib.parse.urlparse(url)
netloc = temp.netloc
scheme = temp.scheme
path = temp.path
path_url = ''.join((scheme,'://',netloc,path))
return(path_url)
def trim_after_params(url):
temp = urllib.parse.urlparse(url)
netloc = temp.netloc
scheme = temp.scheme
path = temp.path
params = temp.params
params_url = ''.join((scheme,'://',netloc,path,';',params))
return(params_url)
def trim_after_query(url):
temp = urllib.parse.urlparse(url)
netloc = temp.netloc
scheme = temp.scheme
path = temp.path
params = temp.params
query = temp.query
query_url = ''.join((scheme,'://',netloc,path,';',params,'?',query))
return(query_url)
def url_to_dirpath(url):
netloc = urllib.parse.urlparse(url).netloc
path = urllib.parse.urlparse(url).path
dirpath = ''.join((netloc,path))
if(os.path.exists(dirpath)):
pass
else:
os.makedirs(dirpath)
return(dirpath)
def url_to_fn(url):
netloc = urllib.parse.urlparse(url).netloc
path = urllib.parse.urlparse(url).path
path = path.replace("/","_")
fn = ''.join((netloc,"__",path,".","html"))
return(fn)
def parse_url_netloc(url_Netloc,default_Port):
regex_Netloc = re.compile('(.*):(.*)')
m = regex_Netloc.search(url_Netloc)
if(m == None):
url_Netloc_Host = url_Netloc
url_Netloc_Port = default_Port
else:
url_Netloc_Host = m.group(1)
url_Netloc_Port = m.group(2)
return((url_Netloc_Host,url_Netloc_Port))
def url_to_tuple(url):
url_Scheme = urllib.parse.urlparse(url).scheme
url_Netloc = urllib.parse.urlparse(url).netloc
url_Path = urllib.parse.urlparse(url).path
url_Params = urllib.parse.urlparse(url).params
url_Query = urllib.parse.urlparse(url).query
url_Fragment = urllib.parse.urlparse(url).fragment
return((url_Scheme,url_Netloc,url_Path,url_Params,url_Query,url_Fragment))
def url_to_dict(url,**kwargs):
'''
url = "foo://example.com:8042/over/there?name=ferret#nose"
url = "http://www.blah.com/some;param1=foo/crazy;param2=bar/path.html"
url = "http://www.blah.com/some/crazy/path.html;param1=foo;param2=bar"
'''
if("http_default_port" in kwargs):
http_default_port = kwargs['http_default_port']
else:
http_default_port = 80
if("https_default_port" in kwargs):
https_default_port = kwargs['https_default_port']
else:
https_default_port = 443
url_Scheme = urllib.parse.urlparse(url).scheme
if(url_Scheme == 'http'):
default_Port = http_default_port
else:
default_Port = https_default_port
url_Netloc = urllib.parse.urlparse(url).netloc
url_NL_HP = parse_url_netloc(url_Netloc,default_Port)
url_Netloc_Host = url_NL_HP[0]
url_Netloc_Port = url_NL_HP[1]
url_Path = urllib.parse.urlparse(url).path
url_Params = urllib.parse.urlparse(url).params
url_Query = urllib.parse.urlparse(url).query
url_Fragment = urllib.parse.urlparse(url).fragment
rslt = {}
rslt['scheme'] = url_Scheme
rslt['netloc'] = url_Netloc
rslt['host'] = url_Netloc_Host
rslt['port'] = url_Netloc_Port
rslt['path'] = url_Path
rslt['params'] = url_Params
rslt['query'] = url_Query
rslt['fragment'] = url_Fragment
return(rslt)
def dict_to_url(url_dict):
'''scheme://host:port/path?query#fragment
url = "https://servicegate.suunto.com/UserAuthorityService/?callback=jQuery18108122223665320987_1485771086287&service=Movescount&emailAddress=xxxxxxxx%40163.com&password=<PASSWORD>&_=1485<PASSWORD>6#a=b&c=d&e=f"
urllib.parse.urlparse(url)
<scheme>://<username>:<password>@<host>:<port>/<path>;<parameters>?<query>#<fragment>
url2 = "http://www.blah.com/some;param1=foo/crazy;param2=bar/path.html"
>>> urllib.parse.urlparse(url2)
ParseResult(scheme='http', netloc='www.blah.com', path='/some;param1=foo/crazy;param2=bar/path.html', params='', query='', fragment='')
urllib.parse.urlparse(url2)
url3 = "http://www.blah.com/some/crazy/path.html;param1=foo;param2=bar"
>>> urllib.parse.urlparse(url3)
ParseResult(scheme='http', netloc='www.blah.com', path='/some/crazy/path.html', params='param1=foo;param2=bar', query='', fragment='')
params_dict = {'param1': 'foo', 'param2': 'bar'}
servicegate_url_dict = {
'scheme':"https",
'netloc':"servicegate.suunto.com",
'path':"UserAuthorityService",
'query_dict':{
'callback': jQuery_get_random_jsonpCallback_name(),
'emailAddress':"<EMAIL>",
'password':"<PASSWORD>",
'_':jQuery_unix_now(),
'service':"Movescount"
}
}
'''
url_dict_template = {
'scheme':"",
'sp_scheme_host':"://",
'host':"",
'sp_host_port':":",
'port':{
'explicit':"",
'implicit':""
},
'netloc':"",
'sp_netloc_path':"/",
'path':"",
'sp_path_params':";",
'params':"",
'params_dict':{},
'sp_params_query':"?",
'query':"",
'query_dict':{},
'sp_query_fragment':"#",
'fragment':"",
'fragment_dict':{},
'hash':"",
'hash_dict':{}
}
url_dict_template['scheme'] = url_dict['scheme']
if('sp_scheme_host' in url_dict):
url_dict_template['sp_scheme_host'] = url_dict['sp_scheme_host']
if('sp_host_port' in url_dict):
url_dict_template['sp_host_port'] = url_dict['sp_host_port']
if('sp_netloc_path' in url_dict):
url_dict_template['sp_netloc_path'] = url_dict['sp_netloc_path']
if('sp_path_params' in url_dict):
url_dict_template['sp_path_params'] = url_dict['sp_path_params']
if('sp_params_query' in url_dict):
url_dict_template['sp_params_query'] = url_dict['sp_params_query']
if('sp_query_fragment' in url_dict):
url_dict_template['sp_query_fragment'] = url_dict['sp_query_fragment']
if('netloc' in url_dict):
url_dict_template['netloc'] = url_dict['netloc']
elif('host' in url_dict):
if('port' in url_dict):
url_dict_template['netloc'] = ''.join((url_dict['host'],url_dict['sp_host_port'],url_dict['port']['explicit']))
else:
url_dict_template['netloc'] = url_dict['port']['explicit']
if(url_dict_template['scheme'] == 'https'):
url_dict_template['port']['implicit'] = "443"
elif(url_dict_template['scheme'] == 'http'):
url_dict_template['port']['implicit'] = "80"
else:
pass
else:
pass
if('path' in url_dict):
url_dict_template['path'] = url_dict['path']
sec_1 = ''.join((url_dict_template['scheme'],url_dict_template['sp_scheme_host'],url_dict_template['netloc'],url_dict_template['sp_netloc_path'],url_dict['path']))
else:
return(''.join((url_dict_template['scheme'],url_dict_template['sp_scheme_host'],url_dict_template['netloc'])))
if('params' in url_dict):
url_dict_template['params'] = url_dict['params'];
elif('params_dict' in url_dict):
url_dict_template['params_dict'] = url_dict['params_dict'];
url_dict_template['params'] = params_dict_urlencode(url_dict['params_dict']);
else:
pass
if(url_dict_template['params']==""):
sec_2 = sec_1;
else:
sec_2 = ''.join((url_dict_template['sp_path_params'],sec_1))
if('query' in url_dict):
url_dict_template['query'] = url_dict['query']
sec_3 = ''.join((sec_2,url_dict_template['sp_params_query'],url_dict_template['query']))
elif('query_dict' in url_dict):
url_dict_template['query_dict'] = url_dict['query_dict']
url_dict_template['query'] = params_dict_urlencode(url_dict['query_dict'],sp="&")
sec_3 = ''.join((sec_2,url_dict_template['sp_params_query'],url_dict_template['query']))
else:
return(sec_2)
if('fragment' in url_dict):
url_dict_template['fragment'] = url_dict['fragment']
sec_4 = ''.join((sec_3,url_dict_template['sp_query_fragment'],url_dict_template['fragment']))
return(sec_4)
elif('fragment_dict' in url_dict):
url_dict_template['fragment_dict'] = url_dict['fragment_dict']
url_dict_template['fragment'] = params_dict_urlencode(url_dict['fragment_dict'],sp="&")
sec_4 = ''.join((sec_3,url_dict_template['sp_query_fragment'],url_dict_template['fragment']))
return(sec_4)
else:
if('hash' in url_dict):
url_dict_template['hash'] = url_dict['hash']
sec_4 = ''.join((sec_3,url_dict_template['sp_query_fragment'],url_dict_template['hash']))
return(sec_4)
elif('hash_dict' in url_dict):
url_dict_template['hash_dict'] = url_dict['hash_dict']
url_dict_template['fragment'] = params_dict_urlencode(url_dict['hash_dict'],sp="&")
sec_4 = ''.join((sec_3,url_dict_template['sp_query_fragment'],url_dict_template['fragment']))
return(sec_4)
else:
return(sec_3)
def params_to_params_dict(params_str):
eles = params_str.split(";")
eles_len = eles.__len__()
r1 = {}
for i in range(0,eles_len):
kv = eles[i]
if("=" in kv):
kv_arr = kv.split("=")
k=kv_arr[0]
v=kv_arr[1]
k=urllib.parse.unquote(k)
v=urllib.parse.unquote(v)
r1[k] = v
else:
k = kv
v = {}
k=urllib.parse.unquote(k)
r1[k] = v
return(r1)
def params_dict_urlencode(decoded_dict,sp=";"):
eles = decoded_dict
eles_len = eles.__len__()
r1_dict = {}
r2_str = ""
for k in eles:
if(type(eles[k]) == type({})):
r2_str = ''.join((r2_str,sp,k))
else:
r1_dict[k] = eles[k]
rslt_str = urllib.parse.urlencode(r1_dict)
rslt_str = ''.join((rslt_str,r2_str))
rslt_str = rslt_str.lstrip(sp)
rslt_str = rslt_str.replace("&",sp)
return(rslt_str)
def urldecode(encoded_str,**kwargs):
if('quote_plus' in kwargs):
quote_plus=kwargs['quote_plus']
else:
quote_plus=True
if('sp' in kwargs):
sp=kwargs['sp']
else:
sp="&"
eles = encoded_str.split(sp)
eles_len = eles.__len__()
r1 = {}
####improvement for value such as 'SourceUrl=//xyz/query.aspx?ID=000001'
regex = re.compile('(.*?)=(.*)')
for i in range(0,eles_len):
kv = eles[i]
if("=" in kv):
####improvement for value such as 'SourceUrl=//xyz/query.aspx?ID=000001'
m = regex.search(kv)
#kv_arr = kv.split("=")
#k=kv_arr[0]
#v=kv_arr[1]
k = m.group(1)
v = m.group(2)
###################
if(quote_plus):
k=urllib.parse.unquote_plus(k)
v=urllib.parse.unquote_plus(v)
else:
k=urllib.parse.unquote(k)
v=urllib.parse.unquote(v)
r1[k] = v
else:
k = kv
v = {}
if(quote_plus):
k=urllib.parse.unquote_plus(k)
else:
k=urllib.parse.unquote(k)
r1[k] = v
return(r1)
def urldecode_half_ordered(encoded_str,**kwargs):
if('quote_plus' in kwargs):
quote_plus=kwargs['quote_plus']
else:
quote_plus=True
if('sp' in kwargs):
sp=kwargs['sp']
else:
sp="&"
eles = encoded_str.split(sp)
eles_len = eles.__len__()
r1 = []
regex = re.compile('(.*?)=(.*)')
for i in range(0,eles_len):
kv = eles[i]
if("=" in kv):
#kv_arr = kv.split("=")
#k=kv_arr[0]
#v=kv_arr[1]
####improvement for value such as 'SourceUrl=//xyz/query.aspx?ID=000001'
m = regex.search(kv)
k = m.group(1)
v = m.group(2)
###################
if(quote_plus):
k=urllib.parse.unquote_plus(k)
v=urllib.parse.unquote_plus(v)
else:
k=urllib.parse.unquote(k)
v=urllib.parse.unquote(v)
r1.append({k:v})
else:
k = kv
v = ""
if(quote_plus):
k=urllib.parse.unquote_plus(k)
else:
k=urllib.parse.unquote(k)
r1.append({k:v})
return(r1)
def urldecode_ordered(encoded_str,**kwargs):
if('quote_plus' in kwargs):
quote_plus=kwargs['quote_plus']
else:
quote_plus=True
if('sp' in kwargs):
sp=kwargs['sp']
else:
sp="&"
eles = encoded_str.split(sp)
eles_len = eles.__len__()
r1 = []
regex = re.compile('(.*?)=(.*)')
for i in range(0,eles_len):
kv = eles[i]
if("=" in kv):
####improvement for value such as 'SourceUrl=//xyz/query.aspx?ID=000001'
m = regex.search(kv)
#kv_arr = kv.split("=")
#k=kv_arr[0]
#v=kv_arr[1]
k = m.group(1)
v = m.group(2)
###################
if(quote_plus):
k=urllib.parse.unquote_plus(k)
v=urllib.parse.unquote_plus(v)
else:
k=urllib.parse.unquote(k)
v=urllib.parse.unquote(v)
r1.append((k,v))
else:
k = kv
v = ""
if(quote_plus):
k=urllib.parse.unquote_plus(k)
else:
k=urllib.parse.unquote(k)
r1.append((k,v))
return(r1)
def urlencode(decoded_dict,**kwargs):
if('quote_plus' in kwargs):
quote_plus=kwargs['quote_plus']
else:
quote_plus=True
if('sp' in kwargs):
sp=kwargs['sp']
else:
sp="&"
eles = decoded_dict
eles_len = eles.__len__()
r1_dict = {}
r2_str = ""
for k in eles:
if(type(eles[k]) == type({})):
r2_str = ''.join((r2_str,sp,k))
else:
r1_dict[k] = eles[k]
rslt_str = urllib.parse.urlencode(r1_dict)
rslt_str = ''.join((rslt_str,r2_str))
rslt_str = rslt_str.lstrip(sp)
rslt_str = rslt_str.replace("&",sp)
if(quote_plus):
pass
else:
rslt_str = rslt_str.replace("+","%20")
return(rslt_str)
def urlencode_half_ordered(decoded_dict_list,**kwargs):
if('quote_plus' in kwargs):
quote_plus=kwargs['quote_plus']
else:
quote_plus=True
if('sp' in kwargs):
sp=kwargs['sp']
else:
sp="&"
eles = decoded_dict_list
eles_len = eles.__len__()
rslt_str = ""
for i in range(0,eles.__len__()):
r1_dict = eles[i]
k = list(r1_dict.keys())[0]
v = list(r1_dict.values())[0]
if(v == {}):
rslt_str = ''.join((rslt_str,sp,k))
else:
tmp = urllib.parse.urlencode(r1_dict)
rslt_str = rslt_str+sp+tmp
rslt_str = rslt_str.lstrip(sp)
rslt_str = rslt_str.replace("&",sp)
if(quote_plus):
pass
else:
rslt_str = rslt_str.replace("+","%20")
return(rslt_str)
def urlencode_ordered(decoded_tuple_list,**kwargs):
if('quote_plus' in kwargs):
quote_plus=kwargs['quote_plus']
else:
quote_plus=True
if('sp' in kwargs):
sp=kwargs['sp']
else:
sp="&"
eles = decoded_tuple_list
eles_len = eles.__len__()
rslt_str = ""
for i in range(0,eles.__len__()):
kv = eles[i]
k = kv[0]
v = kv[1]
if(v == {}):
rslt_str = ''.join((rslt_str,sp,k))
else:
tmp = urllib.parse.urlencode({k:v})
rslt_str = rslt_str+sp+tmp
rslt_str = rslt_str.lstrip(sp)
rslt_str = rslt_str.replace("&",sp)
if(quote_plus):
pass
else:
rslt_str = rslt_str.replace("+","%20")
return(rslt_str)
######
######六元组
######(scheme, netloc, path, params, query, fragment)
#####
def six_tn():
return(['scheme', 'netloc', 'path', 'params', 'query', 'fragment'])
def six_md():
md = {
'path': 2,
'netloc': 1,
'fragment': 5,
'params': 3,
'scheme': 0,
'query': 4,
0:'scheme',
1:'netloc',
2:'path',
3:'params',
4:'query',
5:'fragment'
}
return(md)
def six_u2d(url):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
pobj(six_u2d(url))
'''
d = {}
rslt = urllib.parse.urlparse(url)
for k in rslt._fields:
d[k] = rslt.__getattribute__(k)
return(d)
def six_u2t(url):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
pobj(six_u2t(url))
'''
rslt = urllib.parse.urlparse(url)
t = (rslt.scheme,rslt.netloc,rslt.path,rslt.params,rslt.query,rslt.fragment)
return(t)
def six_d2t(d):
'''
d = {
'path': '/index.php',
'netloc': 'www.baidu.com',
'fragment': 'frag',
'params': 'params',
'scheme': 'http',
'query': 'username=query'
}
t = six_d2t(d)
pobj(t)
'''
t = (d['scheme'],d['netloc'],d['path'],d['params'],d['query'],d['fragment'])
return(t)
def six_d2u(d):
'''
d = {
'path': '/index.php',
'netloc': 'www.baidu.com',
'fragment': 'frag',
'params': 'params',
'scheme': 'http',
'query': 'username=query'
}
url = six_d2u(d)
url
'''
t = six_d2t(d)
url = urllib.parse.urlunparse(t)
return(url)
def six_t2d(t):
'''
t = ('http', 'www.baidu.com', '/index.php', 'params', 'username=query', 'frag')
pobj(six_t2d(t))
'''
d = {}
d['scheme'] = t[0]
d['netloc'] = | |
"""
- THIS FILE IS GENERATED -
CoveoInterfaces/CoveoInterfaces/IndexService.jid
"""
from attr import attrib, attrs
from datetime import datetime
from enum import auto
from typing import Dict, List, Optional as Opt
from .root import CASING, CoveoInterface, ExceptionBase, JidEnumFlag, JidType, MultiOut, api
from .indexer_config import (
CollaborativeRanking,
Collection,
Field,
HighlightTag,
PhysicalIndex,
QueryHighlighter,
Ranking,
ResultsPreviewer,
SearchCertificate,
Slice,
Source,
System,
TagField,
)
from .index_tracking import IndexStatus
from .document_definition import PermissionModel, PermissionSet
from .security import EffectivePermissionsListingOptions, PermissionModelInformation
from .security_provider import SID
@attrs(kw_only=True, auto_attribs=True)
class IndexException(ExceptionBase, hint="Coveo.IndexService.IndexException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidBinaryVersionException(IndexException, hint="Coveo.IndexService.InvalidBinaryVersionException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class OutOfRangeException(ExceptionBase, hint="Coveo.IndexService.OutOfRangeException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class InvalidDocumentKeyException(IndexException, hint="Coveo.IndexService.InvalidDocumentKeyException"):
def __init__(self) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class DocumentNotFoundException(IndexException, hint="Coveo.IndexService.DocumentNotFoundException"):
def __init__(self) -> None:
...
class BladeState(JidEnumFlag):
Created: int = auto()
Initialized: int = auto()
Starting: int = auto()
Running: int = auto()
WaitingForConfig: int = auto()
OutOfSync: int = auto()
ShuttingDown: int = auto()
Synchronizing: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class IndexerConfig(JidType, hint="Coveo.IndexService.IndexerConfig"):
"""Some global configuration for the indexer blade
Attributes:
tagging_manager_uri: The URI to the tagging manager
security_server_uri: The security server URI
mirror_name: The mirror name of this index
index_path: The physical path for the index
realtime_indexing_path: The physical path for the realtime indexing files.
slice_paths: The physical paths for each slice.
index_identifier: The index identifier.
config_cache_page_size: The b-tree page size for the config cache.
config_cache_size: The b-tree cache size for the config cache.
"""
tagging_manager_uri: Opt[str] = attrib(default=None, metadata={CASING: "TaggingManagerURI"})
security_server_uri: Opt[str] = attrib(default=None, metadata={CASING: "SecurityServerURI"})
mirror_name: Opt[str] = None
index_path: Opt[str] = None
realtime_indexing_path: Opt[str] = None
slice_paths: Opt[Dict[str, str]] = None
index_identifier: Opt[str] = None
config_cache_page_size: int = 1048576
config_cache_size: int = 67108864
def __init__(
self,
*,
tagging_manager_uri: Opt[str] = attrib(default=None, metadata={CASING: "TaggingManagerURI"}),
security_server_uri: Opt[str] = attrib(default=None, metadata={CASING: "SecurityServerURI"}),
mirror_name: Opt[str] = None,
index_path: Opt[str] = None,
realtime_indexing_path: Opt[str] = None,
slice_paths: Opt[Dict[str, str]] = None,
index_identifier: Opt[str] = None,
config_cache_page_size: int = 1048576,
config_cache_size: int = 67108864,
) -> None:
"""
Parameters:
tagging_manager_uri: The URI to the tagging manager
security_server_uri: The security server URI
mirror_name: The mirror name of this index
index_path: The physical path for the index
realtime_indexing_path: The physical path for the realtime indexing files.
slice_paths: The physical paths for each slice.
index_identifier: The index identifier.
config_cache_page_size: The b-tree page size for the config cache.
config_cache_size: The b-tree cache size for the config cache.
"""
@attrs(kw_only=True, auto_attribs=True)
class ElasticSearchConnection(JidType, hint="Coveo.IndexService.ElasticSearchConnection"):
"""Elasticsearch connection.
Attributes:
host: The URI of th elasticsearch host (like 'host.adomain.com').
port: The port to used (default is 9200).
username: The user name for http_auth.
password: <PASSWORD> for http_auth.
url_prefix: The URL prefix.
use_ssl: Whether we use SSL or not.
verify_certs: Whether we check the certificates or not.
ca_certs: Optional path to CA bundle on disk.
client_cert: Path to the file containing the private key and the certificate, or cert only if using CientKey.
client_key: Path to the file containing the private key if using separate cert and key files (client_cert will contain only the cert).
aws_access_key: The AWS access key.
aws_secret_key: The AWS secret key.
aws_region: The AWS region.
aws_service_name: The AWS service name.
"""
host: Opt[str] = None
port: int = 9200
username: Opt[str] = None
password: Opt[str] = None
url_prefix: Opt[str] = attrib(default=None, metadata={CASING: "URLPrefix"})
use_ssl: Opt[bool] = attrib(default=None, metadata={CASING: "UseSSL"})
verify_certs: Opt[bool] = None
ca_certs: Opt[str] = attrib(default=None, metadata={CASING: "CACerts"})
client_cert: Opt[str] = None
client_key: Opt[str] = None
aws_access_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSAccessKey"})
aws_secret_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSSecretKey"})
aws_region: Opt[str] = attrib(default=None, metadata={CASING: "AWSRegion"})
aws_service_name: Opt[str] = attrib(default=None, metadata={CASING: "AWSServiceName"})
def __init__(
self,
*,
host: Opt[str] = None,
port: int = 9200,
username: Opt[str] = None,
password: Opt[str] = None,
url_prefix: Opt[str] = attrib(default=None, metadata={CASING: "URLPrefix"}),
use_ssl: Opt[bool] = attrib(default=None, metadata={CASING: "UseSSL"}),
verify_certs: Opt[bool] = None,
ca_certs: Opt[str] = attrib(default=None, metadata={CASING: "CACerts"}),
client_cert: Opt[str] = None,
client_key: Opt[str] = None,
aws_access_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSAccessKey"}),
aws_secret_key: Opt[str] = attrib(default=None, metadata={CASING: "AWSSecretKey"}),
aws_region: Opt[str] = attrib(default=None, metadata={CASING: "AWSRegion"}),
aws_service_name: Opt[str] = attrib(default=None, metadata={CASING: "AWSServiceName"}),
) -> None:
"""
Parameters:
host: The URI of th elasticsearch host (like 'host.adomain.com').
port: The port to used (default is 9200).
username: The user name for http_auth.
password: The <PASSWORD> for http_auth.
url_prefix: The URL prefix.
use_ssl: Whether we use SSL or not.
verify_certs: Whether we check the certificates or not.
ca_certs: Optional path to CA bundle on disk.
client_cert: Path to the file containing the private key and the certificate, or cert only if using CientKey.
client_key: Path to the file containing the private key if using separate cert and key files (client_cert will contain only the cert).
aws_access_key: The AWS access key.
aws_secret_key: The AWS secret key.
aws_region: The AWS region.
aws_service_name: The AWS service name.
"""
@attrs(kw_only=True, auto_attribs=True)
class ElasticSearchBladeConfig(JidType, hint="Coveo.IndexService.ElasticSearchBladeConfig"):
"""Some global configuration for the elasticsearch indexer blade
Attributes:
message_store: Optional folder where to keep a copy of the add_document messages.
logger_config: Optional logger configuration. Format to be defined.
"""
message_store: Opt[str] = None
logger_config: Opt[str] = None
def __init__(self, *, message_store: Opt[str] = None, logger_config: Opt[str] = None) -> None:
"""
Parameters:
message_store: Optional folder where to keep a copy of the add_document messages.
logger_config: Optional logger configuration. Format to be defined.
"""
@attrs(kw_only=True, auto_attribs=True)
class ElasticSearchConfig(JidType, hint="Coveo.IndexService.ElasticSearchConfig"):
"""Some global configuration for the elasticsearch indexer blade and search API.
Attributes:
connection_v: List of elasticsearch connections.
indexer_config: Indexer blade config.
"""
connection_v: Opt[List[ElasticSearchConnection]] = None
indexer_config: Opt[ElasticSearchBladeConfig] = None
def __init__(
self,
*,
connection_v: Opt[List[ElasticSearchConnection]] = None,
indexer_config: Opt[ElasticSearchBladeConfig] = None,
) -> None:
"""
Parameters:
connection_v: List of elasticsearch connections.
indexer_config: Indexer blade config.
"""
@attrs(kw_only=True, auto_attribs=True)
class IndexState(JidType, hint="Coveo.IndexService.IndexState"):
"""Internal state for the index.
Attributes:
inconsistent_index: Whether the index is in an inconsistent state.
inconsistent_config: Whether the index config is in an inconsistent state.
"""
inconsistent_index: Opt[bool] = None
inconsistent_config: Opt[bool] = None
def __init__(self, *, inconsistent_index: Opt[bool] = None, inconsistent_config: Opt[bool] = None) -> None:
"""
Parameters:
inconsistent_index: Whether the index is in an inconsistent state.
inconsistent_config: Whether the index config is in an inconsistent state.
"""
class IIndexAdmin(CoveoInterface):
"""Main interface used to control an index node."""
@api("GET/ranking")
def get_ranking(self) -> Ranking:
"""Get ranking configuration for the index."""
@api("PUT/ranking")
def update_ranking(self, *, ranking: Ranking) -> None:
"""Update ranking configuration in the index.
Parameters:
ranking: The updated configuration for ranking.
"""
@api("GET/system")
def get_system(self) -> System:
"""Get system configuration for the index."""
@api("PUT/system", system="system")
def update_system(self, *, system: System) -> None:
"""Update system configuration in the index.
Parameters:
system: The updated configuration for system.
"""
@api("GET/query_highlighter")
def get_query_highlighter(self) -> QueryHighlighter:
"""Get query highlighter configuration for the index."""
@api("PUT/query_highlighter")
def update_query_highlighter(self, *, query_highlighter: QueryHighlighter) -> None:
"""Update query highlighter configuration in the index.
Parameters:
query_highlighter: The updated configuration for query highlighter.
"""
@api("GET/query_highlighter/highlight_tags")
def get_highlight_tags(self) -> List[HighlightTag]:
"""Get all the highlight tags in the index."""
@api("GET/query_highlighter/highlight_tags/{highlight_tag_id}", highlight_tag_id="HighlightTagID")
def get_highlight_tag(self, *, highlight_tag_id: int) -> HighlightTag:
"""Get a highlight tag from the index.
Parameters:
highlight_tag_id: The id of the highlight tag.
"""
@api("POST/query_highlighter/highlight_tags")
def add_highlight_tag(self, *, highlight_tag: HighlightTag) -> None:
"""Add a highlight tag to the index.
Parameters:
highlight_tag: The new highlight tag.
"""
@api("PUT/query_highlighter/highlight_tags/{highlight_tag_id}", highlight_tag_id="HighlightTagID")
def update_highlight_tag(self, *, highlight_tag_id: int, highlight_tag: HighlightTag) -> None:
"""Update a highlight tag in the index.
Parameters:
highlight_tag_id: The id of the highlight tag.
highlight_tag: The updated highlight tag.
"""
@api("DELETE/query_highlighter/highlight_tags/{highlight_tag_id}", highlight_tag_id="HighlightTagID")
def delete_highlight_tag(self, *, highlight_tag_id: int) -> None:
"""Delete a highlight tag contained in the index.
Parameters:
highlight_tag_id: The id of the highlight tag.
"""
@api("GET/slices")
def get_slices(self) -> List[Slice]:
"""Get all the slices in the index."""
@api("GET/slices/{slice_id}", slice_id="SliceID")
def get_slice(self, *, slice_id: int) -> Slice:
"""Get a slice from the index.
Parameters:
slice_id: The id of the slice.
"""
@api("POST/slices", slice_="Slice")
def add_slice(self, *, slice_: Slice) -> None:
"""Add a slice to the index.
Parameters:
slice_: The new slice.
"""
@api("PUT/slices/{slice_id}", slice_id="SliceID", slice_="Slice")
def update_slice(self, *, slice_id: int, slice_: Slice) -> None:
"""Update a slice in the index.
Parameters:
slice_id: The id of the slice.
slice_: The updated slice.
"""
@api("DELETE/slices/{slice_id}", slice_id="SliceID")
def | |
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attention core modules for Flax."""
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
from typing import Any, Callable, Union
import warnings
from . import stochastic
from flax import jax_utils
from flax import struct
from flax.core import Scope
from flax.linen import initializers
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
from .linear import default_kernel_init
from .linear import dense_general
import numpy as np
def dot_product_attention(scope,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights. This
function supports multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of `[batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels]`.
key: keys for calculating attention with shape of `[batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels]`.
value: values to be used in attention with shape of `[batch_size, dim1,
dim2,..., dimN, num_heads, value_channels]`.
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`.
"""
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
depth = query.shape[-1]
n = key.ndim
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(np.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, channels, <attention dims>)
v_perm = batch_dims + (n - 1,) + axis
value = value.transpose(v_perm)
query = query / jnp.sqrt(depth).astype(dtype)
batch_dims_t = tuple(range(len(batch_dims)))
attn_weights = lax.dot_general(
query,
key, (((n - 1,), (n - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision)
# apply attention bias: masking, droput, proximity bias, ect.
if bias is not None:
attn_weights = attn_weights + bias
# normalize the attention weights
norm_dims = tuple(range(attn_weights.ndim - len(axis), attn_weights.ndim))
attn_weights = lax.exp(
attn_weights -
jax.scipy.special.logsumexp(attn_weights, axis=norm_dims, keepdims=True))
attn_weights = attn_weights.astype(dtype)
# apply dropout
if not deterministic and dropout_rate > 0.:
if dropout_rng is None:
dropout_rng = scope.make_rng('dropout')
keep_prob = jax.lax.tie_in(attn_weights, 1.0 - dropout_rate)
if broadcast_dropout:
# dropout is broadcast across the batch+head+non-attention dimension
dropout_dims = attn_weights.shape[-(2 * len(axis)):]
dropout_shape = (tuple([1] * len(batch_dims_t)) + dropout_dims)
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = (keep.astype(attn_weights.dtype) /
jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# compute the new values given the attention weights
wv_contracting_dims = (norm_dims, range(value.ndim - len(axis), value.ndim))
y = lax.dot_general(
attn_weights,
value, (wv_contracting_dims, (batch_dims_t, batch_dims_t)),
precision=precision)
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
y = y.transpose(perm_inv)
return y
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
class CacheEntry(struct.PyTreeNode):
key: np.ndarray
value: np.ndarray
i: np.ndarray
def multi_head_dot_product_attention(
scope: Scope,
inputs_q,
inputs_kv,
num_heads,
dtype=jnp.float32,
qkv_features=None,
out_features=None,
attention_axis=None,
causal_mask=False,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
cache=False,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None,
kernel_init=default_kernel_init,
bias_init=initializers.zeros,
bias=True,
attention_fn=dot_product_attention):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`.
inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]`
or None for self-attention, inn which case key/values will be derived
from inputs_q.
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
attention_axis: axes over which the attention is applied ( 'None' means
attention over all axes, but batch, heads, and features).
causal_mask: boolean specifying whether to apply a causal mask on the
attention weights. If True, the output at timestep `t` will not depend
on inputs at timesteps strictly greater than `t`.
padding_mask: boolean specifying query tokens that are pad token.
key_padding_mask: boolean specifying key-value tokens that are pad token.
segmentation: segment indices for packed inputs_q data.
key_segmentation: segment indices for packed inputs_kv data.
cache: an instance of `flax.deprecated.nn.attention.Cache` used for
efficient autoregressive decoding.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
bias: bool: whether pointwise QKVO dense transforms use bias.
attention_fn: dot_product_attention or compatible function. Accepts
query, key, value, and returns output of shape
`[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]``
Returns:
output of shape `[bs, dim1, dim2, ..., dimN, features]`.
"""
assert causal_mask or not cache, (
'Caching is only support for causal attention.')
if inputs_kv is None:
inputs_kv = inputs_q
if attention_axis is None:
attention_axis = tuple(range(1, inputs_q.ndim - 1))
features = out_features or inputs_q.shape[-1]
qkv_features = qkv_features or inputs_q.shape[-1]
assert qkv_features % num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // num_heads
dense = functools.partial(
dense_general,
axis=-1,
dtype=dtype,
features=(num_heads, head_dim),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
precision=precision)
# project inputs_q to multi-headed q/k/v
# dimensions are then [bs, dims..., n_heads, n_features_per_head]
query = scope.child(dense, 'query')(inputs_q)
key = scope.child(dense, 'key')(inputs_kv)
value = scope.child(dense, 'value')(inputs_kv)
if cache:
cache_entry: Union[Callable[[Any], CacheEntry], CacheEntry]
if not scope.has_variable('cache', 'entry'):
ndim, tail_shape = (key.ndim, key.shape[-2:])
def init_fn(shape, dtype=jnp.float32):
full_shape = shape + tail_shape
if len(full_shape) != ndim:
raise ValueError('Shape should be a tuple with the shape of the batch'
'and attention dims.')
return CacheEntry(
key=jnp.zeros(full_shape, dtype),
value=jnp.zeros(full_shape, dtype),
i=jnp.zeros((), jnp.uint32))
cache_entry = init_fn
else:
cache_entry = scope.get_variable('cache', 'entry')
if not isinstance(cache_entry, CacheEntry):
raise ValueError('Cache is not initialized.')
expected_shape = list(cache_entry.key.shape[:-2])
for attn_dim in attention_axis:
expected_shape[attn_dim] = 1
expected_shape = tuple(expected_shape) + inputs_q.shape[-1:]
if expected_shape != inputs_q.shape:
raise ValueError('Invalid shape provided, '
'expected shape %s instead got %s.' %
(expected_shape, inputs_q.shape))
cshape = cache_entry.key.shape
indices = [0] * len(cshape)
i = cache_entry.i
attn_size = np.prod(np.take(cshape, attention_axis))
for attn_dim in attention_axis:
attn_size //= cshape[attn_dim]
indices[attn_dim] = i // attn_size
i = i % attn_size
key = lax.dynamic_update_slice(cache_entry.key, key, indices)
value = lax.dynamic_update_slice(cache_entry.value, value, indices)
one = jnp.array(1, jnp.uint32)
cache_entry = cache_entry.replace(i=cache_entry.i + one,
key=key,
value=value)
# TODO(levskaya): verify this is still needed in translation decoding.
key_padding_mask = jnp.broadcast_to(
(jnp.arange(cshape[1]) < cache_entry.i), cshape[:2])
key_padding_mask = key_padding_mask.astype(jnp.float32)[..., None]
scope.put_variable('cache', 'entry', cache_entry)
# create attention masks
mask_components = []
if causal_mask:
| |
"""
Author: <NAME>
License: MIT
"""
import numpy as np
from xdgmm import XDGMM
class Empiricist(object):
"""
Worker object that can fit supernova and host galaxy parameters
given noisy inputs using an XDGMM model, and then predict new
supernovae based on this model and a set of new host galaxies.
Parameters
----------
model_file: string (optional)
Name of text file containing model being used (default=None).
fit_method: string (optional)
Name of XD fitting method to use (default='astroML'). Must be
either 'astroML' or 'Bovy'.
Notes
-----
The class can be initialized with a model or one can be loaded or
fit to data.
"""
def __init__(self, model_file=None, fit_method='astroML'):
self.XDGMM = XDGMM(n_components=7, method=fit_method)
self.fit_method = fit_method
if model_file is not None:
self.read_model(model_file)
def get_SN(self, X, Xerr=None, n_SN=1):
"""
Conditions the XDGMM model based on the data in X and returns
SN parameters sampled from the conditioned model.
Parameters
----------
X: array_like, shape = (n_samples, n_features)
Input data. First 3 entries (SN parameters) should be NaN.
Xerr: array_like, shape = (n_samples, n_features), optional
Error on input data. SN errors should be 0.0. If None,
errors are not used for the conditioning.
n_SN: int (optional)
Number of SNe to sample (default = 1).
Returns
-------
SN_data: array_like, shape = (n_SN, 3)
Sample of SN data taken from the conditioned model.
Notes
-----
Assumes that the first three parameters used when fitting
the model are the SN parameters.
"""
if self.model_file is None:
raise StandardError("Model parameters not set.")
if Xerr is None: cond_XDGMM = self.XDGMM.condition(X)
else: cond_XDGMM = self.XDGMM.condition(X, Xerr)
return np.atleast_2d(cond_XDGMM.sample(n_SN))
def fit_model(self, X, Xerr, filename='empiriciSN_model.fit',
n_components=6):
"""
Fits the XD model to data.
Parameters
----------
X: array_like, shape = (n_samples, n_features)
Input data.
Xerr: array_like, shape = (n_samples, n_features, n_features)
Error on input data.
filename: string (optional)
Filename for model fit to be saved to (default =
'empiriciSN_model.fit').
n_components: float (optional)
Number of Gaussian components to use (default = 6)
Notes
-----
The specified method and n_components Gaussian components will
be used (typical BIC-optimized numbers of components for ~100s
of training datapoints are 6 or 7).
The fit will be saved in the file with name defined by the
filename variable.
"""
self.XDGMM.n_components = n_components
self.XDGMM = self.XDGMM.fit(X, Xerr)
self.XDGMM.save_model(filename)
self.model_file = filename
return
def fit_from_files(self, filelist, filename='empiriciSN_model.fit',
n_components=7):
"""
Fits the XD model to data contained in the files provided.
Parameters
----------
filelist: array_like
Array of strings containing names of files containing data
to fit.
filename: string (optional)
Filename for model fit (default = 'empiriciSN_model.fit').
n_components: float (optional)
Number of Gaussian components to use (default = 7)
method: string (optional)
XD fitting method to use (default = 'astroML')
Notes
-----
The model is fitted using the data contained in the files
named in the `filelist` variable. This assumes that the data
files are in the same format as those provided with this code
and that only redshift, distance from host nucleus, host colors,
and local host surface brightness are being used for the fit.
"""
X, Xerr = self.get_data(filelist)
self.fit_model(X, Xerr, filename=filename,
n_components=n_components)
return
def read_model(self, filename):
"""
Reads the parameters of a model from a file.
Parameters
----------
filename: string
Name of the file to read from.
Notes
-----
Model parameters are stored in the self.XDGMM model object.
The model filename is stored self.model_file.
"""
self.XDGMM.read_model(filename)
self.model_file = filename
return
def component_test(self, X, Xerr, component_range, no_err=False):
"""
Test the performance of the model for a range of numbers of
Gaussian components.
Parameters
----------
X: array_like, shape = (n_samples, n_features)
Input data.
Xerr: array_like, shape = (n_samples, n_features, n_features)
Error on input data.
component_range: array_like
Range of n_components to test.
no_err: bool (optional)
Flag for whether to calculate the BIC with the errors
included or not. (default = False)
Returns
-------
bics: array_like, shape = (len(param_range),)
BIC for each value of n_components
optimal_n_comp: float
Number of components with lowest BIC score
lowest_bic: float
Lowest BIC from the scores computed.
Notes
-----
Uses the XDGMM.bic_test method to compute the BIC score for
each n_components in the component_range array.
"""
bics, optimal_n_comp, lowest_bic = \
self.XDGMM.bic_test(X, Xerr, component_range, no_err)
return bics, optimal_n_comp, lowest_bic
def get_logR(self,cond_indices, R_index, X, Xerr=None):
"""
Uses a subset of parameters in the given data to condition the
model and return a sample value for log(R/Re).
Parameters
----------
cond_indices: array_like
Array of indices indicating which parameters to use to
condition the model. Cannot contain [0, 1, 2] since these
are SN parameters.
R_index: int
Index of log(R/Re) in the list of parameters that were used
to fit the model.
X: array_like, shape = (n < n_features,)
Input data.
Xerr: array_like, shape = (X.shape,) (optional)
Error on input data. If none, no error used to condition.
Returns
-------
logR: float
Sample value of log(R/Re) taken from the conditioned model.
Notes
-----
The fit_params array specifies a list of indices to use to
condition the model. The model will be conditioned and then
a radius will be drawn from the conditioned model.
This is so that the radius can then be used to calculate local
surface brightness to fully condition the model to sample
likely SN parameters.
This does not make assumptions about what parameters are being
used in the model, but does assume that the model has been
fit already and that the first three parameters in the data
that were used to fit the model are the SN parameters.
"""
if self.model_file is None:
raise StandardError("Model parameters not set.")
if 0 in cond_indices or 1 in cond_indices or 2 in cond_indices:
raise ValueError("Cannot condition model on SN parameters.")
if R_index in cond_indices:
raise ValueError("Cannot condition model on log(R/Re).")
cond_data = np.array([])
if Xerr is not None: cond_err = np.array([])
R_cond_idx = R_index
n_features = self.XDGMM.mu.shape[1]
j = 0
for i in range(n_features):
if i in cond_indices:
cond_data = np.append(cond_data,X[j])
if Xerr is not None: cond_err = np.append(cond_err, Xerr[j])
j += 1
if i < R_index: R_cond_idx -= 1
else:
cond_data = np.append(cond_data,np.nan)
if Xerr is not None: cond_err = np.append(cond_err, 0.0)
if Xerr is not None:
cond_XDGMM = self.XDGMM.condition(cond_data, cond_err)
else: cond_XDGMM = self.XDGMM.condition(cond_data)
sample = cond_XDGMM.sample()
logR = sample[0][R_cond_idx]
return logR
def get_local_SB(self, SB_params, R ):
"""
Uses magnitudes, a surface brightness (SB) profile, and
a SN location to fit local surface brightnesses at the location
of the SN.
Parameters
----------
SB_params: array_like, shape = (21,)
Array of parameters needed for the SB fit. First entry
should be a sersic index of 1 or 4, indicating whether to
use an exponential or de Vaucouleurs profile. Following this
should be sets of
(magnitude, mag_unc, effective radius, rad_unc) data for
each of the 5 ugriz filters, giving a total array length of
21. These data are assumed to be known by the user.
R: float
Separation from host nucleus in units of log(R/Re).
It is assumed that the Re used here is the r-band Re, as is
output by the get_logR function.
Returns
-------
SBs: array_list, shape = (5,)
Local surface brightness at the location of the SN for each
of the 5 ugriz filters. Units = mag/arcsec^2
SB_errs: array_like, shape = (5,)
Uncertainties on the local surface brightnesses.
"""
if SB_params[0]!=1 and SB_params[0]!=4:
raise ValueError("Sersic index must be 1 or 4")
sep = (10**R) * SB_params[11] # separation in arcsec
SBs = np.array([])
SB_errs = np.array([])
for j in range(5):
halfmag = SB_params[j*4+1] + 0.75257
magerr = SB_params[j*4+2]
Re = SB_params[j*4+3]
Re_err = SB_params[j*4+4]
r = sep/Re
Ie = halfmag + 2.5 * np.log10(np.pi*Re**2)
Re2_unc = 2 * Re * Re_err * np.pi
log_unc = 2.5 * Re2_unc/(np.log10(np.pi*Re**2) * np.log(10))
Ie_unc = np.sqrt(magerr**2 + log_unc**2)
if SB_params[0] == 1:
Io = Ie-1.824
Io_unc = Ie_unc
sb = Io*np.exp(-1.68*(r))
exp_unc = np.exp(-1.68*(r))*1.68*sep*Re_err/(Re**2)
sb_unc = sb * np.sqrt((Io_unc/Io)**2 +
(exp_unc/np.exp(-1.68*(r)))**2)
if np.isnan(sb_unc): sb_unc = 0.0
| |
# Copyright (c) 2009-2016 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R""" Deprecated initialization routines.
"""
from hoomd.deprecated import _deprecated;
import hoomd;
import math
import os
from hoomd import _hoomd
def read_xml(filename, restart = None, time_step = None, wrap_coordinates = False):
R""" ## Reads initial system state from an XML file
Args:
filename (str): File to read
restart (str): If it exists, read *restart* instead of *filename*.
time_step (int): (if specified) Time step number to use instead of the one stored in the XML file
wrap_coordinates (bool): Wrap input coordinates back into the box
.. deprecated:: 2.0
GSD is the new default file format for HOOMD-blue. It can store everything that an XML file can in
an efficient binary format that is easy to access. See :py:class:`hoomd.init.read_gsd`.
Examples::
deprecated.init.read_xml(filename="data.xml")
deprecated.init.read_xml(filename="init.xml", restart="restart.xml")
deprecated.init.read_xml(filename="directory/data.xml")
deprecated.init.read_xml(filename="restart.xml", time_step=0)
system = deprecated.init.read_xml(filename="data.xml")
All particles, bonds, etc... are read from the given XML file,
setting the initial condition of the simulation.
After this command completes, the system is initialized allowing
other commands in hoomd_script to be run.
For restartable jobs, specify the initial condition in *filename* and the restart file in *restart*.
init.read_xml will read the restart file if it exists, otherwise it will read *filename*.
All values are read in native units, see :ref:`page-units` for more information.
If *time_step* is specified, its value will be used as the initial time
step of the simulation instead of the one read from the XML file.
If *wrap_coordinates* is set to True, input coordinates will be wrapped
into the box specified inside the XML file. If it is set to False, out-of-box
coordinates will result in an error.
"""
hoomd.util.print_status_line();
hoomd.context._verify_init();
# check if initialization has already occured
if hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot initialize more than once\n");
raise RuntimeError("Error reading XML file");
filename_to_read = filename;
if restart is not None:
if os.path.isfile(restart):
filename_to_read = restart;
# read in the data
initializer = _deprecated.HOOMDInitializer(hoomd.context.exec_conf,filename_to_read,wrap_coordinates);
snapshot = initializer.getSnapshot()
my_domain_decomposition = hoomd.init._create_domain_decomposition(snapshot._global_box);
if my_domain_decomposition is not None:
hoomd.context.current.system_definition = _hoomd.SystemDefinition(snapshot, hoomd.context.exec_conf, my_domain_decomposition);
else:
hoomd.context.current.system_definition = _hoomd.SystemDefinition(snapshot, hoomd.context.exec_conf);
# initialize the system
if time_step is None:
hoomd.context.current.system = _hoomd.System(hoomd.context.current.system_definition, initializer.getTimeStep());
else:
hoomd.context.current.system = _hoomd.System(hoomd.context.current.system_definition, time_step);
hoomd.init._perform_common_init_tasks();
return hoomd.data.system_data(hoomd.context.current.system_definition);
def create_random(N, phi_p=None, name="A", min_dist=0.7, box=None, seed=1, dimensions=3):
R""" Generates N randomly positioned particles of the same type.
Args:
N (int): Number of particles to create.
phi_p (float): Packing fraction of particles in the simulation box (unitless).
name (str): Name of the particle type to create.
min_dist (float): Minimum distance particles will be separated by (in distance units).
box (:py:class:`hoomd.data.boxdim`): Simulation box dimensions.
seed (int): Random seed.
dimensions (int): The number of dimensions in the simulation.
.. deprecated:: 2.0 Random initialization is best left to specific methods tailored by the user for their work.
Either *phi_p* or *box* must be specified. If *phi_p* is provided, it overrides the value of *box*.
Examples::
init.create_random(N=2400, phi_p=0.20)
init.create_random(N=2400, phi_p=0.40, min_dist=0.5)
system = init.create_random(N=2400, box=data.boxdim(L=20))
When *phi_p* is set, the
dimensions of the created box are such that the packing fraction
of particles in the box is *phi_p*. The number density \e n
is related to the packing fraction by :math:`n = 2d/\pi \cdot \phi_P`,
where *d* is the dimension, and assumes the particles have a radius of 0.5.
All particles are created with the same type, given by *name*.
The result of :py:func:`hoomd.deprecated.init.create_random` can be saved in a variable and later used to read
and/or change particle properties later in the script. See :py:mod:`hoomd.data` for more information.
"""
hoomd.util.print_status_line();
hoomd.context._verify_init();
# check if initialization has already occured
if hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot initialize more than once\n");
raise RuntimeError("Error initializing");
# check that dimensions are appropriate
if dimensions not in (2,3):
raise ValueError('dimensions must be 2 or 3')
# abuse the polymer generator to generate single particles
if phi_p is not None:
# calculate the box size
L = math.pow(math.pi/(2.0*dimensions)*N / phi_p, 1.0/dimensions);
box = hoomd.data.boxdim(L=L, dimensions=dimensions);
if box is None:
raise RuntimeError('box or phi_p must be specified');
if not isinstance(box, hoomd.data.boxdim):
hoomd.context.msg.error('box must be a data.boxdim object');
raise TypeError('box must be a data.boxdim object');
# create the generator
generator = _deprecated.RandomGenerator(hoomd.context.exec_conf, box._getBoxDim(), seed, box.dimensions);
# build type list
type_vector = _hoomd.std_vector_string();
type_vector.append(name);
# empty bond lists for single particles
bond_ab = _hoomd.std_vector_uint();
bond_type = _hoomd.std_vector_string();
# create the generator
generator.addGenerator(int(N), _deprecated.PolymerParticleGenerator(hoomd.context.exec_conf, 1.0, type_vector, bond_ab, bond_ab, bond_type, 100, box.dimensions));
# set the separation radius
generator.setSeparationRadius(name, min_dist/2.0);
# generate the particles
generator.generate();
# initialize snapshot
snapshot = generator.getSnapshot()
my_domain_decomposition = hoomd.init._create_domain_decomposition(snapshot._global_box);
if my_domain_decomposition is not None:
hoomd.context.current.system_definition = _hoomd.SystemDefinition(snapshot, hoomd.context.exec_conf, my_domain_decomposition);
else:
hoomd.context.current.system_definition = _hoomd.SystemDefinition(snapshot, hoomd.context.exec_conf);
# initialize the system
hoomd.context.current.system = _hoomd.System(hoomd.context.current.system_definition, 0);
hoomd.init._perform_common_init_tasks();
return hoomd.data.system_data(hoomd.context.current.system_definition);
def create_random_polymers(box, polymers, separation, seed=1):
R""" Generates any number of randomly positioned polymers of configurable types.
Args:
box (:py:class:`hoomd.data.boxdim`): Simulation box dimensions
polymers (list): Specification for the different polymers to create (see below)
separation (dict): Separation radii for different particle types (see below)
seed (int): Random seed to use
.. deprecated:: 2.0 Random initialization is best left to specific methods tailored by the user for their work.
Any number of polymers can be generated, of the same or different types, as
specified in the argument *polymers*. Parameters for each polymer include
bond length, particle type list, bond list, and count.
The syntax is best shown by example. The below line specifies that 600 block copolymers
A6B7A6 with a bond length of 1.2 be generated::
polymer1 = dict(bond_len=1.2, type=['A']*6 + ['B']*7 + ['A']*6,
bond="linear", count=600)
Here is an example for a second polymer, specifying just 100 polymers made of 5 B beads
bonded in a branched pattern::
polymer2 = dict(bond_len=1.2, type=['B']*5,
bond=[(0, 1), (1,2), (1,3), (3,4)] , count=100)
The *polymers* argument can be given a list of any number of polymer types specified
as above. *count* randomly generated polymers of each type in the list will be
generated in the system.
In detail:
- bond_len defines the bond length of the generated polymers. This should
not necessarily be set to the equilibrium bond length! The generator is dumb and doesn't know
that bonded particles can be placed closer together than the separation (see below). Thus
bond_len must be at a minimum set at twice the value of the largest separation radius. An
error will be generated if this is not the case.
- type is a python list of strings. Each string names a particle type in the order that
they will be created in generating the polymer.
- bond can be specified as "linear" in which case the generator connects all particles together
with bonds to form a linear chain. bond can also be given a list if python tuples (see example
above).
- Each tuple in the form of \c (a,b) specifies that particle \c a of the polymer be bonded to
particle \c b. These bonds are given the default type name of 'polymer' to be used when specifying parameters to
bond forces such as bond.harmonic.
- A tuple with three elements (a,b,type) can be used as above, but with a custom name for the bond. For example,
a simple branched polymer with different bond types on each branch could be defined like so::
bond=[(0,1), (1,2), (2,3,'branchA'), (3,4,'branchA), (2,5,'branchB'), (5,6,'branchB')]
separation must contain one entry for each particle type specified in polymers
('A' and 'B' in the examples above). The value given is the separation radius of each
particle of that type. The generated polymer system will have no two overlapping
particles.
Examples::
init.create_random_polymers(box=data.boxdim(L=35),
polymers=[polymer1, polymer2],
separation=dict(A=0.35, B=0.35));
init.create_random_polymers(box=data.boxdim(L=31),
polymers=[polymer1],
separation=dict(A=0.35, B=0.35), seed=52);
# create polymers in an orthorhombic box
init.create_random_polymers(box=data.boxdim(Lx=18,Ly=10,Lz=25),
polymers=[polymer2],
separation=dict(A=0.35, B=0.35), seed=12345);
# create a triclinic box with tilt factors xy=0.1 xz=0.2 yz=0.3
init.create_random_polymers(box=data.boxdim(L=18, xy=0.1, xz=0.2, yz=0.3),
polymeres=[polymer2],
separation=dict(A=0.35, B=0.35));
With all other parameters the same, create_random_polymers will always create the
same system if seed is the | |
i
for i in range(4):
enemy_piece_id_list[31 + i] = 12 + i
enemy_blue_piece_set = set({})
for index, piece_color in enumerate(enemy_pieces):
if piece_color == 1:
enemy_blue_piece_set.add(enemy_piece_id_list[index])
# enemy_blue_piece_setの値を反転させ、推測の際に扱いやすいように変換する
# (このままでは8~15の値をとるが、0~7の値に修正し扱う必要がある)
rev_enemy_blue_piece_set = set({})
for piece_coo in enemy_blue_piece_set:
rev_enemy_blue_piece_set.add(15 - piece_coo)
if through_num == 0:
ii_state = II_State(blue_piece_set, rev_enemy_blue_piece_set)
elif wrong_through_num == 0:
see_thorugh_id_set = create_see_through_piece(
rev_enemy_blue_piece_set, through_num
)
ii_state = II_State(
blue_piece_set, rev_enemy_blue_piece_set, see_thorugh_id_set
)
else:
correct_wrong_piece_set = create_wrong_and_see_through_piece(
blue_piece_set, through_num, wrong_through_num
)
ii_state = II_State(
blue_piece_set,
rev_enemy_blue_piece_set,
correct_wrong_piece_set[0],
correct_wrong_piece_set[1],
)
return ii_state
def create_state_from_ii_state(ii_state, blue_set):
pieces = [0] * 36
enemy_pieces = [0] * 36
# 0~7は敵の駒
for index, piece_coo in enumerate(ii_state.all_piece[:8]):
if piece_coo < 36:
if index in blue_set:
enemy_pieces[35 - piece_coo] = 1
else:
enemy_pieces[35 - piece_coo] = 2
for index, piece_coo in enumerate(ii_state.all_piece[8:]):
if piece_coo < 36:
if index + 8 in ii_state.real_my_piece_blue_set:
pieces[piece_coo] = 1
else:
pieces[piece_coo] = 2
state = State(pieces, enemy_pieces)
return state
### ガイスターAI大会のプロトコル周り
# プロトコルから相手の行動は送られず、更新されたボードが送られてくるそうなので、行動した駒の座標を求める
# これは相手の行動のみ検知可能
def enemy_coordinate_checker(before_board, now_board):
for i in range(len(before_board) // 2, len(before_board)):
if before_board[i] != now_board[i]:
break
# iではなく(i//3)*3とすることで、座標と駒色(例:14R)の先頭インデックスが取れる(これしないと2文字目からとってくる恐れがある)
beginningOfTheChanged = (i // 3) * 3
# 列番号+行番号*6でgame.pyで使ってる表現に直せる
before_coordinate = (
int(before_board[beginningOfTheChanged])
+ int(before_board[beginningOfTheChanged + 1]) * 6
)
now_coordinate = (
int(now_board[beginningOfTheChanged])
+ int(now_board[beginningOfTheChanged + 1]) * 6
)
# 行動前と行動後の座標を返す
return before_coordinate, now_coordinate
# 行動番号を駒の移動元と移動方向に変換
def action_to_position(action_num):
return (int(action_num / 4), action_num % 4) # position,direction
# 行動番号を移動前の座標と移動後の座標に変換
def action_to_coordinate(action_num):
coordinate_before, direction = action_to_position(action_num)
if direction == 0: # 下
coordinate_after = coordinate_before + 6
elif direction == 1: # 左
coordinate_after = coordinate_before - 1
elif direction == 3: # 右
coordinate_after = coordinate_before + 1
elif direction == 2: # 上
if coordinate_before == 0 or coordinate_before == 5: # 0と5の上行動はゴール処理なので弾く
coordinate_after = coordinate_before # coordinate_beforeを入れて駒の場所を動かさない(勝敗は決しているので下手に動かさない方が良い(多分))
else:
coordinate_after = coordinate_before - 6
else:
print("ERROR:action_to_coordinate(illegal action_num)")
return coordinate_before, coordinate_after
# 移動前の座標と方向番号から行動番号を算出
def position_to_action(position, direction):
return position * 4 + direction
# 移動前と移動後の座標から相手の行動番号を算出
def calculate_enemy_action_number_from_coordinate(before_coordinate, now_coordinate):
enemy_looking_now_coordinate = 35 - now_coordinate
enemy_looking_before_coordinate = 35 - before_coordinate
difference = enemy_looking_now_coordinate - enemy_looking_before_coordinate
if difference == 6: # 下
return position_to_action(enemy_looking_before_coordinate, 0)
elif difference == 1: # 左
return position_to_action(enemy_looking_before_coordinate, 1)
elif difference == -6: # 上
return position_to_action(enemy_looking_before_coordinate, 2)
elif difference == -1: # 右
return position_to_action(enemy_looking_before_coordinate, 3)
else:
print("ERROR:find_enemy_action_number_from_coordinate(illegal move)")
return -1
###
# 相手の行動を受けて、ガイスターの盤面を更新(駒が死んだ場合の処理もここで行う)
def update_II_state(ii_state, before_coordinate, now_coordinate):
kill = np.any(ii_state.all_piece == now_coordinate)
# 敵駒がkillしていたら死んだ駒の処理を行う(99は死んだ駒)
if kill:
dead_piece_ID = np.where(ii_state.all_piece == now_coordinate)[0][0]
color_is_blue = np.any(ii_state.real_my_piece_blue_set == dead_piece_ID)
# print(dead_piece_ID, color_is_blue)
reduce_pattern(dead_piece_ID, color_is_blue, ii_state)
# 行動前の座標を行動後の座標に変更する
ii_state.all_piece[
np.where(ii_state.all_piece == before_coordinate)[0][0]
] = now_coordinate
# myの視点で状態を作成
def my_looking_create_state(ii_state, my_blue, my_red, enemy_blue, enemy_red):
# プレイヤー毎のデュアルネットワークの入力の2次元配列の取得
def pieces_array_of(blue_piece_list, red_piece_list):
table_list = []
blue_table = [0] * 36
table_list.append(blue_table) # ちなみにappendは参照渡し
# blue_piece_listは駒のIDの値なので、ii_state.all_pieceでそのIDを参照してあげると座標が取れる
for blue_piece in blue_piece_list:
if ii_state.all_piece[blue_piece] < 36: # 死駒を除外
blue_table[ii_state.all_piece[blue_piece]] = 1
red_table = [0] * 36
table_list.append(red_table)
for red_piece in red_piece_list:
if ii_state.all_piece[red_piece] < 36:
red_table[ii_state.all_piece[red_piece]] = 1
return table_list
# デュアルネットワークの入力の2次元配列の取得(自分と敵両方)
return [pieces_array_of(my_blue, my_red), pieces_array_of(enemy_blue, enemy_red)]
# # 入力の順序はcreate
# # enemyの視点から状態を作成
# def enemy_looking_create_state(ii_state, my_blue, my_red, enemy_blue, enemy_red):
# # プレイヤー毎のデュアルネットワークの入力の2次元配列の取得
# def pieces_array_of(blue_piece_list, red_piece_list):
# table_list = []
# blue_table = [0] * 36
# # blue_piece_listは駒のIDの値なので、ii_state.all_pieceでそのIDを参照してあげると座標が取れる
# for blue_piece in blue_piece_list:
# if ii_state.all_piece[blue_piece] < 36: # 死駒を除外
# blue_table[ii_state.all_piece[blue_piece]] = 1
# blue_table.reverse() # 逆視点にするために要素を反転
# table_list.append(blue_table)
# red_table = [0] * 36
# for red_piece in red_piece_list:
# if ii_state.all_piece[red_piece] < 36:
# red_table[ii_state.all_piece[red_piece]] = 1
# red_table.reverse() # 逆視点にするために要素を反転
# table_list.append(red_table)
# return table_list
# # デュアルネットワークの入力の2次元配列の取得(自分と敵両方)
# return [pieces_array_of(enemy_blue, enemy_red), pieces_array_of(my_blue, my_red)]
# 諸々の情報からstateを作る
def create_state_from_enemy_looking(ii_state, my_blue, my_red, enemy_blue, enemy_red):
# 自分の駒を格納
my_table = [0] * 36
for my_b in my_blue:
if ii_state.all_piece[my_b] < 36:
my_table[ii_state.all_piece[my_b]] = 1
for my_r in my_red:
if ii_state.all_piece[my_r] < 36:
my_table[ii_state.all_piece[my_r]] = 2
# 敵の駒を格納
enemy_table = [0] * 36
for en_b in enemy_blue:
if ii_state.all_piece[en_b] < 36:
enemy_table[ii_state.all_piece[en_b]] = 1
for en_r in enemy_red:
if ii_state.all_piece[en_r] < 36:
enemy_table[ii_state.all_piece[en_r]] = 2
enemy_table.reverse() # このままでは敵の駒の座標が逆なので反転させて戻す
# 敵視点でのstateを生成
state = State(enemy_table, my_table)
return state
# enemy→各駒の推測値を保存。推測のために70パターン想定するが、足し合わせるだけ(各盤面について保存はしない)
# my→推測したい駒配置。
# 行動と推測盤面に対応した行動価値のリストを返す
def my_ii_predict(model_path, ii_state):
# 推論のための入力データのシェイプの変換
a, b, c = DN_INPUT_SHAPE # (6, 6, 4)
# ii_stateから生きてる駒のリストを取得
my_piece_set = set(ii_state.my_piece_list)
enemy_piece_set = set(ii_state.enemy_piece_list)
# policies_list[パターン(0~最大69)][行動(盤面依存)]
policies_list = []
legal_actions = list(ii_state.legal_actions())
# HandyRLで学習させた方策を取れる関数を定義
convert_func = convert_func_use_in_guess(model_path)
for num_and_my_blue in ii_state.my_estimated_num:
sum_np_policies = np.array([0] * len(legal_actions), dtype="f4")
# 赤駒のインデックスをセット形式で獲得(青駒以外の駒は赤駒)
my_red_set = my_piece_set - set(num_and_my_blue[1])
for num_and_enemy_blue in ii_state.enemy_estimated_num:
# 同様に赤駒のインデックスを獲得
enemy_red_set = enemy_piece_set - set(num_and_enemy_blue[1])
ii_pieces_array = my_looking_create_state(
ii_state,
num_and_my_blue[1],
my_red_set,
num_and_enemy_blue[1],
enemy_red_set,
)
# HandyRLに適応
policies = convert_func(ii_pieces_array, legal_actions)
# 行列演算するためにndarrayに変換
np_policies = np.array(policies, dtype="f4")
# 自分のパターンは既存のpoliciesに足すだけ
sum_np_policies = sum_np_policies + np_policies
# value = y[1][0][0] # 価値の取得
policies_list.extend([sum_np_policies])
return policies_list
# # 相手の行動前に、相手の目線で各パターンにおける各行動の価値を算出
# def enemy_ii_predict(model_path, ii_state):
# a, b, c = DN_INPUT_SHAPE # (6, 6, 4)
# my_piece_set = set(ii_state.my_piece_list)
# enemy_piece_set = set(ii_state.enemy_piece_list)
# policies_list = []
# enemy_legal_actions = sorted(list(ii_state.enemy_legal_actions()), key=lambda x: x)
# convert_func = convert_func_use_in_guess(model_path)
# for num_and_enemy_blue in ii_state.enemy_estimated_num: # enemyのパターンの確からしさを求めたい
# # 赤駒のインデックスをセット形式で獲得(my_blueはタプル)
# enemy_red_set = enemy_piece_set - set(num_and_enemy_blue[1])
# sum_np_policies = np.array([0] * len(enemy_legal_actions), dtype="f4")
# for num_and_my_blue in ii_state.my_estimated_num:
# my_red_set = my_piece_set - set(num_and_my_blue[1])
# # 要修正
# ii_pieces_array = enemy_looking_create_state(
# ii_state,
# num_and_my_blue[1],
# my_red_set,
# num_and_enemy_blue[1],
# enemy_red_set,
# )
# # HandyRLに適応
# policies = convert_func(ii_pieces_array, enemy_legal_actions)
# # 行列演算するためにndarrayに変換
# np_policies = np.array(policies, dtype="f4")
# # myのパターンは既存のpoliciesに足すだけ
# sum_np_policies = sum_np_policies + np_policies
# policies_list.extend([sum_np_policies])
# return policies_list
from test import get_policies
# 自分の駒配置を確定させて推測するパターン
# 相手の行動前に、相手の目線で各パターンにおける各行動の価値を算出
def enemy_ii_predict(model_path, ii_state):
my_piece_set = set(ii_state.my_piece_list)
enemy_piece_set = set(ii_state.enemy_piece_list)
policies_list = []
enemy_legal_actions = sorted(list(ii_state.enemy_legal_actions()), key=lambda x: x)
# enemy_legal_actions = sorted(enemy_legal_actions, key=lambda x: x)
# print("ii_legal", enemy_legal_actions)
# convert_func = convert_func_use_in_guess(model_path)
# print("盤面", ii_state)
get_policies_func = get_policies(model_path)
# enemyのパターンの確からしさ(蓋然性)を求める
for num_and_enemy_blue in ii_state.enemy_estimated_num:
# 赤駒のインデックスをセット形式で獲得(my_blueはタプル)
enemy_red_set = enemy_piece_set - set(num_and_enemy_blue[1])
# 自分の駒配置は見抜かれているものとして相手の行動の価値を求める
my_blue_set = ii_state.real_my_piece_blue_set
my_red_set = my_piece_set - my_blue_set
# 相手視点のstateを作成した上で方策を獲得
enemy_looking_state = create_state_from_enemy_looking(
ii_state, my_blue_set, my_red_set, num_and_enemy_blue[1], enemy_red_set,
)
ap_list = sorted(get_policies_func(enemy_looking_state), key=lambda x: x[0])
policies = []
for tup in ap_list:
policies.append(tup[1])
# HandyRLに適応
# ii_pieces_array = enemy_looking_create_state(
# ii_state, my_blue_set, my_red_set, num_and_enemy_blue[1], enemy_red_set,
# )
# policies = convert_func(ii_pieces_array, enemy_legal_actions)
# ndarrayに変換(自分の駒配置が確定でなかった際に、行列演算するためにnpに変換していた名残)
np_policies = np.array(policies, dtype="f4")
policies_list.extend([np_policies])
return policies_list
# 相手の行動から推測値を更新
# state, enemy_ii_predictで作成した推測値の行列, 敵の行動番号
def update_predict_num_all(
ii_state, beforehand_estimated_num, enemy_action_num, gamma=default_gamma
):
# print(enemy_action_num)
enemy_legal_actions = sorted(list(ii_state.enemy_legal_actions()), key=lambda x: x)
enemy_action_index = enemy_legal_actions.index(enemy_action_num)
for index, enemy_estimated_num in enumerate(ii_state.enemy_estimated_num):
# ii_state.enemy_estimated_num[index][0]
enemy_estimated_num[0] = (
enemy_estimated_num[0] * gamma
) + beforehand_estimated_num[index][enemy_action_index]
# 相手の行動から推測値を更新
# 相手の取りうる指し手の中で最も価値の高い行動であれば、その盤面である可能性が高いと考える
# 最も価値の高い行動であれば+1、そうでなければ+0
def update_predict_num_max_only(
ii_state, beforehand_estimated_num, enemy_action_num, gamma=default_gamma
):
enemy_legal_actions = sorted(list(ii_state.enemy_legal_actions()), key=lambda x: x)
enemy_action_index = enemy_legal_actions.index(enemy_action_num)
bf_estimated_num = beforehand_estimated_num
# 未知の駒が0か5に存在している場合、未知の駒が赤駒である場合でも行動番号2や22を追加しないと配列への参照(beforehand_estimated_num[index][enemy_action_index])がズレる
goal_actions = []
if 2 in enemy_legal_actions:
goal_actions.append(2)
if 22 in enemy_legal_actions:
goal_actions.append(22)
# ゴール行動が含まれていた場合
if goal_actions != []:
legal_length = len(enemy_legal_actions)
for goal in goal_actions:
new_est_num = []
for bf_index, bf_est_num in enumerate(bf_estimated_num):
# このやり方だと0,5の両方に赤駒がいた場合に対処できないが、ごく稀にしか起こらないためその場合の推測は割り切る
if legal_length > len(bf_est_num):
insert_index = enemy_legal_actions.index(goal)
new_est_num.append(
np.insert(bf_estimated_num[bf_index], insert_index, -999.9)
)
else:
new_est_num.append(bf_estimated_num[bf_index])
bf_estimated_num = new_est_num
enemy_estimated_num = ii_state.enemy_estimated_num
for index, en_est_num in enumerate(enemy_estimated_num):
action_value = bf_estimated_num[index][enemy_action_index]
# 相手の選択した行動が、相手の取りうる行動の中で最高の評価であった場合
if np.partition(bf_estimated_num[index].ravel(), -1)[-1] == action_value:
en_est_num[0] = (en_est_num[0] * gamma) + 1
# 相手の行動から推測値を更新
# 方策の値を盤面ごとに正規化する
def update_predict_num_normalize(
ii_state, beforehand_estimated_num, enemy_action_num, gamma=default_gamma
):
enemy_legal_actions = sorted(list(ii_state.enemy_legal_actions()), key=lambda x: x)
enemy_action_index = enemy_legal_actions.index(enemy_action_num)
bf_estimated_num = beforehand_estimated_num
# 未知の駒が0か5に存在している場合、未知の駒が赤駒である場合でも行動番号2や22を追加しないと配列への参照(beforehand_estimated_num[index][enemy_action_index])がズレる
goal_actions = []
if 2 in enemy_legal_actions:
goal_actions.append(2)
if 22 in enemy_legal_actions:
goal_actions.append(22)
# ゴール行動が含まれていた場合
if goal_actions != []:
legal_length = len(enemy_legal_actions)
for goal in goal_actions:
new_est_num = []
for bf_index, bf_est_num in enumerate(bf_estimated_num):
# このやり方だと0,5の両方に赤駒がいた場合に対処できないが、ごく稀にしか起こらないためその場合の推測は割り切る
if legal_length > len(bf_est_num):
# 最小値を探索
pol_min = np.amin(bf_est_num)
# 非合法手の方策の値は最小値と同じにする
insert_index = enemy_legal_actions.index(goal)
new_est_num.append(np.insert(bf_est_num, insert_index, pol_min))
else:
new_est_num.append(bf_estimated_num[bf_index])
bf_estimated_num = new_est_num
# 最小値を0にする
for bf_est_num in bf_estimated_num:
bf_est_num -= np.amin(bf_est_num)
# 最小値0、合計1に正規化する
for bf_est_num in bf_estimated_num:
if np.sum(bf_est_num) > 0.0: # 0除算対策(合計0の場合はそのまま扱う)
bf_est_num /= np.sum(bf_est_num)
for index, en_est_num in enumerate(ii_state.enemy_estimated_num):
action_value = bf_estimated_num[index][enemy_action_index]
# 実際に取られた行動の評価値を足すことで、推測値を更新
en_est_num[0] = (en_est_num[0] * gamma) + action_value
# ありえないパターンを消す
def shave_impossible_pattern(piece_ID: int, color_is_blue: bool, ii_state):
if piece_ID < 8 and color_is_blue: # 敵駒 and 駒が青色
# piece_IDが含まれていないものを削除(piece_IDが青色で確定するため、それが青色の駒のリストに含まれていないとおかしい)
# リストをそのままfor内で削除するとインデックスがバグるのでコピーしたものを参照
for enemy_estimated_num in ii_state.enemy_estimated_num[:]:
if piece_ID not in enemy_estimated_num[1]:
ii_state.enemy_estimated_num.remove(enemy_estimated_num)
elif piece_ID < 8 and not color_is_blue: # 敵駒 and 駒が赤色
# piece_IDが含まれているものを削除
for enemy_estimated_num in ii_state.enemy_estimated_num[:]:
if piece_ID in enemy_estimated_num[1]:
ii_state.enemy_estimated_num.remove(enemy_estimated_num)
elif piece_ID >= 8 and color_is_blue: # 自駒 and 駒が青色
for my_estimated_num in ii_state.my_estimated_num[:]:
if piece_ID not in my_estimated_num[1]:
ii_state.my_estimated_num.remove(my_estimated_num)
elif piece_ID >= 8 and not color_is_blue: # 自駒 and 駒が赤色
for my_estimated_num in ii_state.my_estimated_num[:]:
if piece_ID in my_estimated_num[1]:
ii_state.my_estimated_num.remove(my_estimated_num)
# 駒が死ぬたびに推測値を全て作り直して、死んだ駒と残った推測駒から新しい推測値を作成する
def rebuilding_estimated_num(
ii_state, correct_estimate_piece: list, wrong_estimate_piece: list
):
# 生きてる駒のリストにcorrect_estimate_pieceとwrong_estimate_pieceが存在するかを確認
# enemy_piece_list = [0, 1, 2, 3, 4, 5, | |
<reponame>cmu-db/cmdbac
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import logging
import requests
import re
import copy
import traceback
import time
import datetime
import json
from library.models import *
from cmudbac.settings import *
import utils
import extract
import submit
import count
## =====================================================================
## LOGGING CONFIGURATION
## =====================================================================
LOG = logging.getLogger()
SUBMISSION_FORMS_TIMES = 5
EDIT_DISTANCE_THRESHOLD = 3
## =====================================================================
## BASE DRIVER
## =====================================================================
class BaseDriver(object):
def __init__(self, main_url, database, name, base_path = '/tmp', log_file = None):
self.main_url = main_url
self.database = database
self.name = name
self.base_path = base_path
if log_file != None:
self.log_file = log_file
else:
self.log_file = LOG_FILE_LOCATION[database.name.lower()]
self.init_forms = []
self.forms = []
self.urls = []
def check_log(self, last_line_no = None):
sql_log_file = open(self.log_file, 'r')
if last_line_no == None:
return len(sql_log_file.readlines())
else:
return sql_log_file.readlines()[last_line_no:]
def process_query(self, query, inputs, queries):
matched = False
matched_query = query
if inputs != None:
for name, value in sorted(inputs.items(), key=lambda (x, y): len(str(y)), reverse=True):
if str(value) in matched_query:
matched_query = matched_query.replace(str(value), '<span style="color:red">{}</span>'.format(name))
matched = True
queries.append({'content': matched_query, 'matched': matched, 'raw': query})
def process_logs(self, logs, inputs):
queries = []
current_query = None
new_query = None
for line in logs:
line = line.strip()
if self.database.name == 'MySQL':
query = re.search('Query.?(.+)', line)
# print query, line
if query == None:
if len(line) > 0 and line[0].isdigit():
continue
if current_query != None:
current_query += ' ' + line
new_query = None
else:
new_query = query.group(1)
elif self.database.name == 'PostgreSQL':
query = re.search('LOG: statement: (.+)', line)
if query == None:
if re.search('LOG: duration:', line):
continue
if re.search('UTC DETAIL:', line):
continue
if re.search('LOG: execute .*?: (.+)', line):
query = re.search('LOG: execute .*?: (.+)', line)
new_query = query.group(1)
else:
if current_query != None:
current_query += ' ' + line
new_query = None
else:
new_query = query.group(1)
elif self.database.name == 'SQLite3':
query = re.search("QUERY = u'(.+)'", line)
if query == None:
if current_query != None:
current_query += ' ' + line
new_query = None
else:
new_query = query.group(1)
if new_query:
if current_query:
self.process_query(current_query, inputs, queries)
current_query = new_query
new_query = None
if current_query:
self.process_query(current_query, inputs, queries)
counter = count.count_query(queries)
return queries, counter
def save_screenshot(self, main_url, screenshot_path):
try:
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1024, 768))
display.start()
from selenium import webdriver
try:
if '127.0.0.1' in main_url or 'localhost' in main_url:
br = webdriver.Firefox()
else:
from selenium.webdriver.common.proxy import Proxy, ProxyType
proxy = Proxy({
'proxyType': ProxyType.MANUAL,
'httpProxy': HTTP_PROXY
})
br = webdriver.Firefox(proxy=proxy)
except Exception, e:
LOG.exception(e)
br = webdriver.Firefox()
br.get(main_url)
br.save_screenshot(screenshot_path)
br.quit()
display.stop()
return screenshot_path
except Exception, e:
LOG.exception(e)
try:
br.quit()
display.stop()
except:
pass
return None
def equal_form(self, form1, form2):
# check method first
if form1['method'] != form2['method']:
return False
# check inputs
def equal_input(input1, input2):
for name, value in input1.iteritems():
if name in ['value']:
continue
if name not in input2:
return False
if value != input2[name]:
return False
return True
inputs1 = form1['inputs']
inputs2 = form2['inputs']
if len(inputs1) != len(inputs2):
return False
for i in xrange(len(inputs1)):
if not equal_input(inputs1[i], inputs2[i]):
return False
return True
def equal_queries(self, queries1, queries2):
n = len(queries1)
if n != len(queries2):
return False
for i in xrange(n):
if utils.edit_distance(queries1[i]['raw'], queries2[i]['raw'], EDIT_DISTANCE_THRESHOLD) > EDIT_DISTANCE_THRESHOLD:
return False
return True
def equal_url(self, url1, url2):
if url1['url'] == url2['url']:
return True
if self.equal_queries(url1['queries'], url2['queries']):
return True
return False
def bootstrap(self):
LOG.info('Driving : Bootstraping ...')
# get main page
main_url = self.main_url
LOG.info('Main URL : {}'.format(main_url))
# set json filename
json_filename = 'forms{}.json'.format(self.name)
# extract all the forms
LOG.info('Extracting all forms ...')
try:
forms = extract.extract_all_forms(main_url, json_filename)
except Exception, e:
forms = []
traceback.print_exc()
self.init_forms = forms
ret_forms = []
# login as admin
br = None
info = {
'username': 'admin',
'password': '<PASSWORD>'
}
try:
login_form, br = submit.login(forms, info)
except Exception, e:
login_form = br = None
LOG.exception(e)
# submit other forms as admin
if br != None:
try:
forms = extract.extract_all_forms_with_cookie(main_url, br._ua_handlers['_cookies'].cookiejar, json_filename)
except Exception, e:
forms = []
# save browser
self.browser = br
for form in forms:
if any(self.equal_form(form, ret_form) for ret_form in ret_forms):
continue
last_line_no = self.check_log()
try:
part_inputs = submit.fill_form_random(form, br)
except:
part_inputs = None
form['admin'] = True
if part_inputs == None:
ret_forms.append(form)
continue
form['queries'], form['counter'] = self.process_logs(self.check_log(last_line_no), part_inputs)
if len(form['queries']) == 0:
ret_forms.append(form)
continue
LOG.info('Admin: Fill in Form on {} Successfully ...'.format(form['url']))
ret_forms.append(form)
for i in range(SUBMISSION_FORMS_TIMES):
try:
submit.fill_form_random(form, br)
except:
pass
return ret_forms
def get_forms(self):
# get main page
main_url = self.main_url
# set json filename
json_filename = 'forms{}.json'.format(self.name)
# extract all the forms
forms = self.init_forms
ret_forms = []
# register as normal user
register_result = USER_STATUS_UNKNOWN
last_line_no = self.check_log()
try:
register_form, info, inputs = submit.register(self.base_path, forms)
except Exception, e:
register_form = info = inputs = None
LOG.exception(e)
if register_form == None or info == None or inputs == None:
LOG.warn("Can not submit register form!")
register_result = USER_STATUS_FAIL
else:
register_form['queries'], register_form['counter'] = self.process_logs(self.check_log(last_line_no), inputs)
if register_form and len(register_form['queries']) > 0:
register_result = USER_STATUS_SUCCESS
else:
LOG.warn("Can not get queries from log file: {}!".format(self.log_file))
register_result = USER_STATUS_FAIL
if register_result == USER_STATUS_FAIL:
LOG.info('Fail to register ...')
else:
LOG.info('Register Successfully ...')
if register_form != None:
ret_forms.append(register_form)
# login as normal user
login_result = USER_STATUS_UNKNOWN
br = None
if register_result == USER_STATUS_SUCCESS:
last_line_no = self.check_log()
try:
login_form, br = submit.login(forms, info)
except Exception, e:
login_form = br = None
LOG.exception(e)
if login_form == None or br == None:
login_result = USER_STATUS_FAIL
else:
login_form['queries'], login_form['counter'] = self.process_logs(self.check_log(last_line_no), inputs)
if login_form and len(login_form['queries']) > 0:
login_result = USER_STATUS_SUCCESS
else:
login_result = USER_STATUS_FAIL
if login_result == USER_STATUS_FAIL:
LOG.info('Fail to login ...')
else:
LOG.info('Login Successfully ...')
if login_form != None:
ret_forms.append(login_form)
if br != None:
try:
forms = extract.extract_all_forms_with_cookie(main_url, br._ua_handlers['_cookies'].cookiejar, json_filename)
except Exception, e:
forms = []
LOG.exception(e)
LOG.info('Forms after logged: {}'.format(json.dumps(forms, indent = 2)))
# save forms
for form in forms:
if any(self.equal_form(form, ret_form) for ret_form, _ in self.forms):
continue
last_line_no = self.check_log()
browser_index = 0
try:
part_inputs = submit.fill_form_random(form, br)
except:
part_inputs = None
if part_inputs == None:
browser_index = 1
try:
part_inputs = submit.fill_form_random(form, None)
except:
part_inputs = None
if part_inputs == None:
continue
form['queries'], form['counter'] = self.process_logs(self.check_log(last_line_no), part_inputs)
if len(form['queries']) == 0:
continue
self.forms.append((form, browser_index))
return {'register': register_result, 'login': login_result, 'forms': ret_forms}
def get_urls(self):
# get main page
main_url = self.main_url
# set json filename
json_filename = 'urls{}.json'.format(self.name)
# extract all the urls
try:
if self.browser != None:
urls = extract.extract_all_urls_with_cookie(main_url, self.browser._ua_handlers['_cookies'].cookiejar, json_filename)
else:
urls = extract.extract_all_urls(main_url, json_filename)
except Exception, e:
urls = []
LOG.exception(e)
for url in urls:
url['queries'] = []
url['counter'] = {}
last_line_no = self.check_log()
try:
submit.query_url(url, self.browser)
except:
traceback.print_exc()
pass
url['queries'], url['counter'] = self.process_logs(self.check_log(last_line_no), None)
if len(url['queries']) == 0:
continue
if any(self.equal_url(url, ret_url) for ret_url in self.urls):
continue
self.urls.append(url)
def initialize(self):
LOG.info('Driving: Initializing ...')
driver_results = self.get_forms()
self.get_urls()
return driver_results
def submit_forms(self):
ret_forms = []
for form, browser_index in self.forms:
form['queries'] = []
form['counter'] = {}
if any(self.equal_form(form, ret_form) for ret_form in ret_forms):
continue
last_line_no = self.check_log()
try:
if browser_index == 0:
part_inputs = submit.fill_form_random(form, self.browser)
else:
part_inputs = submit.fill_form_random(form, None)
except:
# traceback.print_exc()
part_inputs = None
if part_inputs == None:
ret_forms.append(form)
continue
form['queries'], form['counter'] = self.process_logs(self.check_log(last_line_no), part_inputs)
if len(form['queries']) == 0:
continue
LOG.info('Normal: Fill in Form on {} Successfully ...'.format(form['url']))
ret_forms.append(form)
for i in range(SUBMISSION_FORMS_TIMES):
try:
submit.fill_form_random(form, self.browser)
except:
pass
return ret_forms
def query_urls(self):
ret_urls = []
for url in self.urls:
url['queries'] = []
url['counter'] = {}
last_line_no = self.check_log()
try:
submit.query_url(url, self.browser)
except:
# traceback.print_exc()
pass
url['queries'], url['counter'] = self.process_logs(self.check_log(last_line_no), None)
if len(url['queries']) == 0:
continue
if any(self.equal_url(url, ret_url) for ret_url in ret_urls):
continue
LOG.info('Normal: Query the Url on {} Successfully ...'.format(url['url']))
ret_urls.append(url)
return ret_urls
def drive(self):
LOG.info('Driving URL: {} ...'.format(self.main_url))
# get main page
main_url = self.main_url
# bootstrap
admin_forms = self.bootstrap()
# initialize
driver_results = self.initialize()
# submit forms
normal_forms = self.submit_forms()
# query urls
urls = self.query_urls()
# filter forms
driver_results['forms'] += sorted(normal_forms, key=lambda x: len(x['queries']), reverse=True)
filtered_forms = []
for form in driver_results['forms']:
if any(self.equal_form(form, filtered_form) for filtered_form in filtered_forms):
continue
filtered_forms.append(form)
driver_results['forms'] = filtered_forms
# save urls
driver_results['urls'] = urls
filtered_urls = []
for url in driver_results['urls']:
if any(self.equal_url(url, filtered_url) for filtered_url in filtered_urls):
| |
# Important librairies.
from PIL import Image
import glob
import numpy as np
import re
import matplotlib.pyplot as plt
from skimage import measure
import scipy.ndimage
import os
import cv2
import pickle
import copy
from tifffile import imsave
# -----------------------------------------------------------------------------
def prepare_standardplot(title, xlabel):
"""
Prepares the layout and axis for the plotting of the history from the training.
The string 'title' refers to the title of the plot.
The string 'xlabel' refers to the name of the x-axis.
"""
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle(title)
ax1.set_ylabel('Binary cross-entropy')
ax1.set_xlabel(xlabel)
ax1.set_yscale('log')
ax2.set_ylabel('Accuracy')
ax2.set_xlabel(xlabel)
return fig, ax1, ax2
# -----------------------------------------------------------------------------
def finalize_standardplot(fig, ax1, ax2):
"""
Finalizes the layout of the plotting of the history from the training.
The variable 'fig' refers to the created figure of the plot.
The variables 'ax1' and 'ax2' refer to the axes of the plot.
"""
ax1handles, ax1labels = ax1.get_legend_handles_labels()
if len(ax1labels) > 0:
ax1.legend(ax1handles, ax1labels)
ax2handles, ax2labels = ax2.get_legend_handles_labels()
if len(ax2labels) > 0:
ax2.legend(ax2handles, ax2labels)
fig.tight_layout()
plt.subplots_adjust(top=0.9)
# -----------------------------------------------------------------------------
def plot_history(history, title):
"""
Plots the history from the training of a model. More precisely, this function
plots the training loss, the validation loss, the training accuracy and
the validation accuracy of a model training.
The variable 'history' refers to the history file that was saved after
the training of the model.
The string 'title' represents the title that the plot will have.
"""
if title == "unet_simple":
title = "Simple U-Net"
elif title == "unet_weighted":
title = "Weighted U-Net"
fig, ax1, ax2 = prepare_standardplot(title, 'Epoch')
ax1.plot(history['loss'], label = "Training")
ax1.plot(history['val_loss'], label = "Validation")
ax2.plot(history['acc'], label = "Training")
ax2.plot(history['val_acc'], label = "Validation")
finalize_standardplot(fig, ax1, ax2)
return fig
# -----------------------------------------------------------------------------
def natural_keys(text):
"""
Sorts the filelist in a more "human" order.
The variable 'text' represents a file list that would be imported with
the glob library.
"""
def atoi(text):
return int(text) if text.isdigit() else text
return [atoi(c) for c in re.split('(\d+)', text)]
# -----------------------------------------------------------------------------
def load_data(path_images, path_labels):
"""
Loads and returns images and labels.
The variables 'path_images' and 'path_labels' refer to the paths of the
folders containing the images and labels, respectively.
"""
# Creates a list of file names in the data directory.
filelist = glob.glob(path_images)
filelist.sort(key=natural_keys)
# Loads all data images in a list.
data = [Image.open(fname) for fname in filelist]
# Creates a list of file names in the labels directory.
filelist = glob.glob(path_labels)
filelist.sort(key=natural_keys)
# Loads all labels images in a list.
labels = [Image.open(fname) for fname in filelist]
return data, labels
# -----------------------------------------------------------------------------
def check_binary(labels):
"""
Checks if the given labels are binary or not.
The variable "labels" correspond to a list of label images.
"""
# Initialize output variable.
binary = True
# Check every label.
for k in range(len(labels)):
# Number of unique values (should be = 2 for binary labels or > 2 for
# categorical or non-binary data).
n_unique = len(np.unique(np.array(labels[k])))
if n_unique > 2:
binary = False
# Raise exception if labels are constant images or not recognised.
elif n_unique < 2:
raise RuntimeError("Labels are neither binary or categorical.")
return binary
# -----------------------------------------------------------------------------
def make_binary(labels):
"""
Makes the given labels binary.
The variable "labels" correspond to a list of label images.
"""
# For each label, convert the image to a numpy array, binarizes the array
# and converts back the array to an image.
for i in range(len(labels)):
tmp = np.array(labels[i])
tmp[tmp > 0] = 255
tmp[tmp == 0] = 0
tmp = tmp.astype('uint8')
tmp = Image.fromarray(tmp, 'L')
labels[i] = tmp
return labels
# -----------------------------------------------------------------------------
def save_data(data, labels, path):
"""
Save images and labels.
The variables 'data' and 'labels' refer to the processed images and labels.
The string 'path' corresponds to the path where the images and labels will
be saved.
"""
# Number of images.
n_data = len(data)
# Count number of digits in n_data. This is important for the number
# of leading zeros in the name of the images and labels.
n_digits = len(str(n_data))
# These represent the paths for the final label and images with the right
# number of leading zeros given by n_digits.
direc_d = path + "image/{b:0" + str(n_digits) + "d}.png"
direc_l = path + "label/{b:0" + str(n_digits) + "d}.png"
# Saves data and labels in the right folder.
for i in range(len(data)):
data[i].save(direc_d.format(b=i))
labels[i].save(direc_l.format(b=i))
return None
# -----------------------------------------------------------------------------
def split_data(X, y, ratio=0.8, seed=1):
"""
The split_data function will shuffle data randomly as well as return
a split data set that are individual for training and testing purposes.
The input 'X' is a list of images.
The input 'y' is a list of images with each image corresponding to the label
of the corresponding sample in X.
The 'ratio' variable is a float that sets the train set fraction of
the entire dataset to this ratio and keeps the other part for test set.
Default value set to 0.8.
The 'seed' variable represents the seed value for the randomization of the
process. Default value set to 1.
"""
# Set seed.
np.random.seed(seed)
# Perform shuffling.
idx_shuffled = np.random.permutation(len(y))
# Return shuffled X and y.
X_shuff = [X[i] for i in idx_shuffled]
y_shuff = [y[i] for i in idx_shuffled]
# Cut the data set into train and test.
train_num = round(len(y) * ratio)
X_train = X_shuff[:train_num]
y_train = y_shuff[:train_num]
X_test = X_shuff[train_num:]
y_test = y_shuff[train_num:]
return X_train, y_train, X_test, y_test
# -----------------------------------------------------------------------------
def convertLabel(lab, threshold = 0.5):
"""
Converts the given label probability maps to a binary images using a specific
threshold.
The numpy array 'lab' correspond to label probability maps.
The float 'threshold' corresponds to the threshold at which we binarize
the probability map. Default value set to 0.5.
"""
# Converts the labels into boolean values using a threshold.
label = lab[...,0] > threshold
# Converts the boolean values into 0 and 1.
label = label.astype(int)
# Converts the labels to have values 0 and 255.
label[label == 1] = 255
return label
# -----------------------------------------------------------------------------
def pred_accuracy(y_true, y_pred):
"""
Computes the prediction accuracy.
The numpy array 'y_true' corresponds to the true label.
The numpy array 'y_pred' corresponds to the predicted label.
"""
# Compares both the predictions and labels.
compare = (y_true == y_pred)
# Convert the resulting boolean values into 0 and 1.
compare = compare.astype(int)
# Computes the percentage of correct pixels.
accuracy = np.sum(compare)/(len(y_true)**2)
return accuracy
# -----------------------------------------------------------------------------
def saveResults(save_path, results, convert = True, threshold = 0.5):
"""
Save the predicted arrays into a folder.
The string 'save_path' corresponds to the path where the predicted images
would be saved.
The numpy array 'results' corresponds to the probability maps that were
predicted with the model.
The boolean 'convert' refers to whether or not the probability maps
should be converted to binary arrays. Defaut value set to True.
The float 'threshold' corresponds to the threshold at which we binarize
the probability map. Default value set to 0.5.
"""
# Number of predictions.
n_result = len(results)
# Count number of digits in n_result. This is important for the number
# of leading zeros in the name of the predictions.
n_digits = len(str(n_result))
# These represent the paths for the predictions (binary or not) with the right
# number of leading zeros given by n_digits.
if convert:
# Selects path for data and labels.
direc_r = save_path + "result/{b:0" + str(n_digits) + "d}.tif"
else:
direc_r = save_path + "result_prob/{b:0" + str(n_digits) + "d}.tif"
for i, lab in enumerate(results):
if convert:
# Converts the given label with a threshold.
label = convertLabel(lab, threshold)
else:
label = | |
<reponame>BentleyJOakes/rtamt<gh_stars>0
# Generated from StlParser.g4 by ANTLR 4.5.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3")
buf.write(u"H\u00f6\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write(u"\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write(u"\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4")
buf.write(u"\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\3\2\3\2\3\2\3")
buf.write(u"\3\5\3\61\n\3\3\3\7\3\64\n\3\f\3\16\3\67\13\3\3\3\3\3")
buf.write(u"\7\3;\n\3\f\3\16\3>\13\3\3\3\3\3\3\4\3\4\3\4\3\5\3\5")
buf.write(u"\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\b\3\b\3\b\3\t")
buf.write(u"\3\t\3\t\3\t\3\t\3\t\3\t\3\n\5\n[\n\n\3\n\5\n^\n\n\3")
buf.write(u"\n\3\n\3\n\5\nc\n\n\3\13\3\13\3\13\3\13\5\13i\n\13\3")
buf.write(u"\f\3\f\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17")
buf.write(u"\5\17w\n\17\3\20\3\20\3\21\3\21\3\21\3\21\3\21\5\21\u0080")
buf.write(u"\n\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3")
buf.write(u"\22\3\22\3\22\3\22\5\22\u008f\n\22\3\22\3\22\3\22\5\22")
buf.write(u"\u0094\n\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3")
buf.write(u"\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u00a6\n\22")
buf.write(u"\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3")
buf.write(u"\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write(u"\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u00c3\n\22\3\22\7")
buf.write(u"\22\u00c6\n\22\f\22\16\22\u00c9\13\22\3\23\3\23\3\23")
buf.write(u"\3\23\3\23\3\23\3\23\3\23\5\23\u00d3\n\23\3\23\3\23\3")
buf.write(u"\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\7\23")
buf.write(u"\u00e1\n\23\f\23\16\23\u00e4\13\23\3\24\3\24\3\24\3\24")
buf.write(u"\3\24\3\24\5\24\u00ec\n\24\3\25\3\25\3\25\3\25\5\25\u00f2")
buf.write(u"\n\25\3\26\3\26\3\26\2\4\"$\27\2\4\6\b\n\f\16\20\22\24")
buf.write(u"\26\30\32\34\36 \"$&(*\2\5\4\2\37\"DD\3\2\32\33\3\2\23")
buf.write(u"\27\u010b\2,\3\2\2\2\4\60\3\2\2\2\6A\3\2\2\2\bD\3\2\2")
buf.write(u"\2\nI\3\2\2\2\fM\3\2\2\2\16O\3\2\2\2\20R\3\2\2\2\22Z")
buf.write(u"\3\2\2\2\24h\3\2\2\2\26j\3\2\2\2\30l\3\2\2\2\32n\3\2")
buf.write(u"\2\2\34t\3\2\2\2\36x\3\2\2\2 \177\3\2\2\2\"\u00a5\3\2")
buf.write(u"\2\2$\u00d2\3\2\2\2&\u00eb\3\2\2\2(\u00f1\3\2\2\2*\u00f3")
buf.write(u"\3\2\2\2,-\5\4\3\2-.\7\2\2\3.\3\3\2\2\2/\61\5\6\4\2\60")
buf.write(u"/\3\2\2\2\60\61\3\2\2\2\61\65\3\2\2\2\62\64\5\b\5\2\63")
buf.write(u"\62\3\2\2\2\64\67\3\2\2\2\65\63\3\2\2\2\65\66\3\2\2\2")
buf.write(u"\66<\3\2\2\2\67\65\3\2\2\28;\5\f\7\29;\5\16\b\2:8\3\2")
buf.write(u"\2\2:9\3\2\2\2;>\3\2\2\2<:\3\2\2\2<=\3\2\2\2=?\3\2\2")
buf.write(u"\2><\3\2\2\2?@\5\n\6\2@\5\3\2\2\2AB\7%\2\2BC\7D\2\2C")
buf.write(u"\7\3\2\2\2DE\7&\2\2EF\7D\2\2FG\7\31\2\2GH\7D\2\2H\t\3")
buf.write(u"\2\2\2IJ\7D\2\2JK\7>\2\2KL\5 \21\2L\13\3\2\2\2MN\5\22")
buf.write(u"\n\2N\r\3\2\2\2OP\7\21\2\2PQ\5\20\t\2Q\17\3\2\2\2RS\7")
buf.write(u"\30\2\2ST\7\7\2\2TU\7D\2\2UV\7\17\2\2VW\7D\2\2WX\7\b")
buf.write(u"\2\2X\21\3\2\2\2Y[\7\35\2\2ZY\3\2\2\2Z[\3\2\2\2[]\3\2")
buf.write(u"\2\2\\^\5\30\r\2]\\\3\2\2\2]^\3\2\2\2^_\3\2\2\2_`\5\26")
buf.write(u"\f\2`b\5*\26\2ac\5\24\13\2ba\3\2\2\2bc\3\2\2\2c\23\3")
buf.write(u"\2\2\2de\7>\2\2ei\5(\25\2fg\7>\2\2gi\5\"\22\2hd\3\2\2")
buf.write(u"\2hf\3\2\2\2i\25\3\2\2\2jk\t\2\2\2k\27\3\2\2\2lm\t\3")
buf.write(u"\2\2m\31\3\2\2\2no\7\13\2\2op\5\34\17\2pq\7\16\2\2qr")
buf.write(u"\5\34\17\2rs\7\f\2\2s\33\3\2\2\2tv\5(\25\2uw\5\36\20")
buf.write(u"\2vu\3\2\2\2vw\3\2\2\2w\35\3\2\2\2xy\t\4\2\2y\37\3\2")
buf.write(u"\2\2z{\7/\2\2{\u0080\5\"\22\2|}\7\60\2\2}\u0080\5\"\22")
buf.write(u"\2~\u0080\5\"\22\2\177z\3\2\2\2\177|\3\2\2\2\177~\3\2")
buf.write(u"\2\2\u0080!\3\2\2\2\u0081\u0082\b\22\1\2\u0082\u0083")
buf.write(u"\7\'\2\2\u0083\u00a6\5\"\22\20\u0084\u0085\7/\2\2\u0085")
buf.write(u"\u0086\5\32\16\2\u0086\u0087\5\"\22\n\u0087\u00a6\3\2")
buf.write(u"\2\2\u0088\u0089\7\60\2\2\u0089\u008a\5\32\16\2\u008a")
buf.write(u"\u008b\5\"\22\t\u008b\u00a6\3\2\2\2\u008c\u008e\7\62")
buf.write(u"\2\2\u008d\u008f\5\32\16\2\u008e\u008d\3\2\2\2\u008e")
buf.write(u"\u008f\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u00a6\5\"\22")
buf.write(u"\7\u0091\u0093\7\63\2\2\u0092\u0094\5\32\16\2\u0093\u0092")
buf.write(u"\3\2\2\2\u0093\u0094\3\2\2\2\u0094\u0095\3\2\2\2\u0095")
buf.write(u"\u00a6\5\"\22\6\u0096\u00a6\5$\23\2\u0097\u0098\7\7\2")
buf.write(u"\2\u0098\u0099\5\"\22\2\u0099\u009a\7\b\2\2\u009a\u00a6")
buf.write(u"\3\2\2\2\u009b\u009c\7-\2\2\u009c\u009d\7\7\2\2\u009d")
buf.write(u"\u009e\5\"\22\2\u009e\u009f\7\b\2\2\u009f\u00a6\3\2\2")
buf.write(u"\2\u00a0\u00a1\7.\2\2\u00a1\u00a2\7\7\2\2\u00a2\u00a3")
buf.write(u"\5\"\22\2\u00a3\u00a4\7\b\2\2\u00a4\u00a6\3\2\2\2\u00a5")
buf.write(u"\u0081\3\2\2\2\u00a5\u0084\3\2\2\2\u00a5\u0088\3\2\2")
buf.write(u"\2\u00a5\u008c\3\2\2\2\u00a5\u0091\3\2\2\2\u00a5\u0096")
buf.write(u"\3\2\2\2\u00a5\u0097\3\2\2\2\u00a5\u009b\3\2\2\2\u00a5")
buf.write(u"\u00a0\3\2\2\2\u00a6\u00c7\3\2\2\2\u00a7\u00a8\f\22\2")
buf.write(u"\2\u00a8\u00a9\5&\24\2\u00a9\u00aa\5\"\22\23\u00aa\u00c6")
buf.write(u"\3\2\2\2\u00ab\u00ac\f\17\2\2\u00ac\u00ad\7(\2\2\u00ad")
buf.write(u"\u00c6\5\"\22\20\u00ae\u00af\f\16\2\2\u00af\u00b0\7)")
buf.write(u"\2\2\u00b0\u00c6\5\"\22\17\u00b1\u00b2\f\r\2\2\u00b2")
buf.write(u"\u00b3\7+\2\2\u00b3\u00c6\5\"\22\16\u00b4\u00b5\f\f\2")
buf.write(u"\2\u00b5\u00b6\7*\2\2\u00b6\u00c6\5\"\22\r\u00b7\u00b8")
buf.write(u"\f\13\2\2\u00b8\u00b9\7,\2\2\u00b9\u00c6\5\"\22\f\u00ba")
buf.write(u"\u00bb\f\b\2\2\u00bb\u00bc\7\61\2\2\u00bc\u00bd\5\32")
buf.write(u"\16\2\u00bd\u00be\5\"\22\t\u00be\u00c6\3\2\2\2\u00bf")
buf.write(u"\u00c0\f\5\2\2\u00c0\u00c2\7\64\2\2\u00c1\u00c3\5\32")
buf.write(u"\16\2\u00c2\u00c1\3\2\2\2\u00c2\u00c3\3\2\2\2\u00c3\u00c4")
buf.write(u"\3\2\2\2\u00c4\u00c6\5\"\22\6\u00c5\u00a7\3\2\2\2\u00c5")
buf.write(u"\u00ab\3\2\2\2\u00c5\u00ae\3\2\2\2\u00c5\u00b1\3\2\2")
buf.write(u"\2\u00c5\u00b4\3\2\2\2\u00c5\u00b7\3\2\2\2\u00c5\u00ba")
buf.write(u"\3\2\2\2\u00c5\u00bf\3\2\2\2\u00c6\u00c9\3\2\2\2\u00c7")
buf.write(u"\u00c5\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8#\3\2\2\2\u00c9")
buf.write(u"\u00c7\3\2\2\2\u00ca\u00cb\b\23\1\2\u00cb\u00d3\7D\2")
buf.write(u"\2\u00cc\u00d3\5(\25\2\u00cd\u00ce\7\22\2\2\u00ce\u00cf")
buf.write(u"\7\7\2\2\u00cf\u00d0\5$\23\2\u00d0\u00d1\7\b\2\2\u00d1")
buf.write(u"\u00d3\3\2\2\2\u00d2\u00ca\3\2\2\2\u00d2\u00cc\3\2\2")
buf.write(u"\2\u00d2\u00cd\3\2\2\2\u00d3\u00e2\3\2\2\2\u00d4\u00d5")
buf.write(u"\f\7\2\2\u00d5\u00d6\7\4\2\2\u00d6\u00e1\5$\23\b\u00d7")
buf.write(u"\u00d8\f\6\2\2\u00d8\u00d9\7\3\2\2\u00d9\u00e1\5$\23")
buf.write(u"\7\u00da\u00db\f\5\2\2\u00db\u00dc\7\5\2\2\u00dc\u00e1")
buf.write(u"\5$\23\6\u00dd\u00de\f\4\2\2\u00de\u00df\7\6\2\2\u00df")
buf.write(u"\u00e1\5$\23\5\u00e0\u00d4\3\2\2\2\u00e0\u00d7\3\2\2")
buf.write(u"\2\u00e0\u00da\3\2\2\2\u00e0\u00dd\3\2\2\2\u00e1\u00e4")
buf.write(u"\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e3")
buf.write(u"%\3\2\2\2\u00e4\u00e2\3\2\2\2\u00e5\u00ec\7;\2\2\u00e6")
buf.write(u"\u00ec\7:\2\2\u00e7\u00ec\7=\2\2\u00e8\u00ec\7<\2\2\u00e9")
buf.write(u"\u00ec\78\2\2\u00ea\u00ec\79\2\2\u00eb\u00e5\3\2\2\2")
buf.write(u"\u00eb\u00e6\3\2\2\2\u00eb\u00e7\3\2\2\2\u00eb\u00e8")
buf.write(u"\3\2\2\2\u00eb\u00e9\3\2\2\2\u00eb\u00ea\3\2\2\2\u00ec")
buf.write(u"\'\3\2\2\2\u00ed\u00f2\7B\2\2\u00ee\u00f2\7C\2\2\u00ef")
buf.write(u"\u00f0\7\3\2\2\u00f0\u00f2\5(\25\2\u00f1\u00ed\3\2\2")
buf.write(u"\2\u00f1\u00ee\3\2\2\2\u00f1\u00ef\3\2\2\2\u00f2)\3\2")
buf.write(u"\2\2\u00f3\u00f4\7D\2\2\u00f4+\3\2\2\2\27\60\65:<Z]b")
buf.write(u"hv\177\u008e\u0093\u00a5\u00c2\u00c5\u00c7\u00d2\u00e0")
buf.write(u"\u00e2\u00eb\u00f1")
return buf.getvalue()
class StlParser ( Parser ):
grammarFileName = "StlParser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"'-'", u"'+'", u"'*'", u"'/'", u"'('",
u"')'", u"'{'", u"'}'", u"'['", u"']'", u"';'", u"':'",
u"','", u"'.'", u"'@'", u"'abs'", u"'s'", u"'ms'",
u"'us'", u"'ns'", u"'ps'", u"'topic'", u"'import'",
u"'input'", u"'output'", u"'internal'", u"'const'",
u"'real'", u"'float'", u"'long'", u"'complex'", u"'int'",
u"'bool'", u"'assertion'", u"'specification'", u"'from'",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"'xor'", u"'rise'", u"'fall'", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"'oracle'", u"<INVALID>",
u"'=='", u"'!=='", u"'>='", u"'<='", u"'>'", u"'<'",
u"'='" ]
symbolicNames = [ u"<INVALID>", u"MINUS", u"PLUS", u"TIMES", u"DIVIDE",
u"LPAREN", u"RPAREN", u"LBRACE", u"RBRACE", u"LBRACK",
u"RBRACK", u"SEMICOLON", u"COLON", u"COMMA", u"DOT",
u"AT", u"ABS", u"SEC", u"MSEC", u"USEC", u"NSEC",
u"PSEC", u"ROS_Topic", u"Import", u"Input", u"Output",
u"Internal", u"Constant", u"DomainTypeReal", u"DomainTypeFloat",
u"DomainTypeLong", u"DomainTypeComplex", u"DomainTypeInt",
u"DomainTypeBool", u"Assertion", u"Specification",
u"From", u"NotOperator", u"OrOperator", u"AndOperator",
u"IffOperator", u"ImpliesOperator", u"XorOperator",
u"RiseOperator", u"FallOperator", u"AlwaysOperator",
u"EventuallyOperator", u"UntilOperator", u"HistoricallyOperator",
u"OnceOperator", u"SinceOperator", u"NextOperator",
u"OracleOperator", u"PreviousOperator", u"EqualOperator",
u"NotEqualOperator", u"GreaterOrEqualOperator", u"LesserOrEqualOperator",
u"GreaterOperator", u"LesserOperator", u"EQUAL", u"BooleanLiteral",
u"TRUE", u"FALSE", u"IntegerLiteral", u"RealLiteral",
u"Identifier", u"LINE_TERMINATOR", u"WHITESPACE",
u"COMMENT", u"LINE_COMMENT" ]
RULE_stlfile = 0
RULE_stlSpecification = 1
RULE_spec = 2
RULE_modimport = 3
RULE_assertion = 4
RULE_declaration = 5
RULE_annotation = 6
RULE_annotation_type = 7
RULE_variableDeclaration = 8
RULE_assignment = 9
RULE_domainType = 10
RULE_ioType = 11
RULE_interval = 12
RULE_intervalTime = 13
RULE_unit = 14
RULE_topExpression = 15
RULE_expression = 16
RULE_real_expression = 17
RULE_comparisonOp = 18
RULE_literal = 19
RULE_identifier = 20
ruleNames = [ u"stlfile", u"stlSpecification", u"spec", u"modimport",
u"assertion", u"declaration", u"annotation", u"annotation_type",
u"variableDeclaration", u"assignment", u"domainType",
u"ioType", u"interval", u"intervalTime", u"unit", u"topExpression",
u"expression", u"real_expression", u"comparisonOp", u"literal",
u"identifier" ]
EOF = Token.EOF
MINUS=1
PLUS=2
TIMES=3
DIVIDE=4
LPAREN=5
RPAREN=6
LBRACE=7
RBRACE=8
LBRACK=9
RBRACK=10
SEMICOLON=11
COLON=12
COMMA=13
DOT=14
AT=15
ABS=16
SEC=17
MSEC=18
USEC=19
NSEC=20
PSEC=21
ROS_Topic=22
Import=23
Input=24
Output=25
Internal=26
Constant=27
DomainTypeReal=28
DomainTypeFloat=29
DomainTypeLong=30
DomainTypeComplex=31
DomainTypeInt=32
DomainTypeBool=33
Assertion=34
Specification=35
From=36
NotOperator=37
OrOperator=38
AndOperator=39
IffOperator=40
ImpliesOperator=41
XorOperator=42
RiseOperator=43
FallOperator=44
AlwaysOperator=45
EventuallyOperator=46
UntilOperator=47
HistoricallyOperator=48
OnceOperator=49
SinceOperator=50
NextOperator=51
OracleOperator=52
PreviousOperator=53
EqualOperator=54
NotEqualOperator=55
GreaterOrEqualOperator=56
LesserOrEqualOperator=57
GreaterOperator=58
LesserOperator=59
EQUAL=60
BooleanLiteral=61
TRUE=62
FALSE=63
IntegerLiteral=64
RealLiteral=65
Identifier=66
LINE_TERMINATOR=67
WHITESPACE=68
COMMENT=69
LINE_COMMENT=70
def __init__(self, input):
super(StlParser, self).__init__(input)
self.checkVersion("4.5.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StlfileContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.StlfileContext, self).__init__(parent, invokingState)
self.parser = parser
def stlSpecification(self):
return self.getTypedRuleContext(StlParser.StlSpecificationContext,0)
def EOF(self):
return self.getToken(StlParser.EOF, 0)
def getRuleIndex(self):
return StlParser.RULE_stlfile
def accept(self, visitor):
if hasattr(visitor, "visitStlfile"):
return visitor.visitStlfile(self)
else:
return visitor.visitChildren(self)
def stlfile(self):
localctx = StlParser.StlfileContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_stlfile)
try:
self.enterOuterAlt(localctx, 1)
self.state = 42
self.stlSpecification()
self.state = 43
self.match(StlParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StlSpecificationContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.StlSpecificationContext, self).__init__(parent, invokingState)
self.parser = parser
def assertion(self):
return self.getTypedRuleContext(StlParser.AssertionContext,0)
def spec(self):
return self.getTypedRuleContext(StlParser.SpecContext,0)
def modimport(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.ModimportContext)
else:
return self.getTypedRuleContext(StlParser.ModimportContext,i)
def declaration(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.DeclarationContext)
else:
return self.getTypedRuleContext(StlParser.DeclarationContext,i)
def annotation(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.AnnotationContext)
else:
return self.getTypedRuleContext(StlParser.AnnotationContext,i)
def getRuleIndex(self):
return StlParser.RULE_stlSpecification
def accept(self, visitor):
if hasattr(visitor, "visitStlSpecification"):
return visitor.visitStlSpecification(self)
else:
return visitor.visitChildren(self)
def stlSpecification(self):
localctx = StlParser.StlSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_stlSpecification)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 46
_la = self._input.LA(1)
if _la==StlParser.Specification:
self.state = 45
self.spec()
self.state = 51
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==StlParser.From:
self.state = 48
self.modimport()
self.state = 53
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 58
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 56
token = self._input.LA(1)
if token in [StlParser.Input, StlParser.Output, StlParser.Constant, StlParser.DomainTypeFloat, StlParser.DomainTypeLong, StlParser.DomainTypeComplex, StlParser.DomainTypeInt, StlParser.Identifier]:
self.state = 54
self.declaration()
elif token in [StlParser.AT]:
self.state = 55
self.annotation()
else:
raise NoViableAltException(self)
self.state = 60
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
self.state = 61
self.assertion()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.SpecContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_spec
def copyFrom(self, ctx):
super(StlParser.SpecContext, self).copyFrom(ctx)
class SpecificationContext(SpecContext):
def __init__(self, parser, ctx): # actually a StlParser.SpecContext)
super(StlParser.SpecificationContext, self).__init__(parser)
self.copyFrom(ctx)
def Specification(self):
return self.getToken(StlParser.Specification, 0)
def Identifier(self):
return self.getToken(StlParser.Identifier, 0)
def accept(self, visitor):
if hasattr(visitor, "visitSpecification"):
return visitor.visitSpecification(self)
else:
return visitor.visitChildren(self)
def spec(self):
localctx = StlParser.SpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_spec)
try:
localctx = StlParser.SpecificationContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 63
self.match(StlParser.Specification)
self.state = 64
self.match(StlParser.Identifier)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ModimportContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.ModimportContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_modimport
def copyFrom(self, ctx):
super(StlParser.ModimportContext, self).copyFrom(ctx)
class ModImportContext(ModimportContext):
def __init__(self, parser, ctx): # actually a StlParser.ModimportContext)
super(StlParser.ModImportContext, self).__init__(parser)
self.copyFrom(ctx)
def From(self):
return self.getToken(StlParser.From, 0)
def Identifier(self, i=None):
if i is None:
return self.getTokens(StlParser.Identifier)
else:
return self.getToken(StlParser.Identifier, i)
def Import(self):
return self.getToken(StlParser.Import, 0)
def accept(self, visitor):
if hasattr(visitor, "visitModImport"):
return visitor.visitModImport(self)
else:
return visitor.visitChildren(self)
def modimport(self):
localctx = StlParser.ModimportContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_modimport)
try:
localctx = StlParser.ModImportContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 66
self.match(StlParser.From)
self.state = 67
self.match(StlParser.Identifier)
self.state = 68
self.match(StlParser.Import)
self.state = 69
self.match(StlParser.Identifier)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssertionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.AssertionContext, self).__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(StlParser.Identifier, 0)
def EQUAL(self):
return self.getToken(StlParser.EQUAL, 0)
def topExpression(self):
return self.getTypedRuleContext(StlParser.TopExpressionContext,0)
def getRuleIndex(self):
return StlParser.RULE_assertion
def accept(self, visitor):
if hasattr(visitor, "visitAssertion"):
return visitor.visitAssertion(self)
else:
return visitor.visitChildren(self)
def assertion(self):
localctx = StlParser.AssertionContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_assertion)
try:
self.enterOuterAlt(localctx, 1)
self.state = 71
self.match(StlParser.Identifier)
self.state = 72
self.match(StlParser.EQUAL)
self.state = 73
self.topExpression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.DeclarationContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_declaration
def copyFrom(self, ctx):
super(StlParser.DeclarationContext, self).copyFrom(ctx)
class DeclVariableContext(DeclarationContext):
def __init__(self, parser, ctx): # actually a StlParser.DeclarationContext)
super(StlParser.DeclVariableContext, self).__init__(parser)
self.copyFrom(ctx)
def variableDeclaration(self):
return self.getTypedRuleContext(StlParser.VariableDeclarationContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitDeclVariable"):
return visitor.visitDeclVariable(self)
else:
return visitor.visitChildren(self)
def declaration(self):
localctx = StlParser.DeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_declaration)
try:
localctx = StlParser.DeclVariableContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 75
self.variableDeclaration()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AnnotationContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.AnnotationContext, self).__init__(parent, invokingState)
self.parser = parser
def annotation_type(self):
return self.getTypedRuleContext(StlParser.Annotation_typeContext,0)
def getRuleIndex(self):
return StlParser.RULE_annotation
def accept(self, visitor):
if hasattr(visitor, "visitAnnotation"):
return visitor.visitAnnotation(self)
else:
return visitor.visitChildren(self)
def annotation(self):
localctx = StlParser.AnnotationContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_annotation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 77
self.match(StlParser.AT)
self.state = 78
self.annotation_type()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Annotation_typeContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.Annotation_typeContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_annotation_type
def copyFrom(self, ctx):
super(StlParser.Annotation_typeContext, self).copyFrom(ctx)
class RosTopicContext(Annotation_typeContext):
def __init__(self, parser, ctx): # actually a StlParser.Annotation_typeContext)
super(StlParser.RosTopicContext, self).__init__(parser)
self.copyFrom(ctx)
def ROS_Topic(self):
return self.getToken(StlParser.ROS_Topic, 0)
def LPAREN(self):
return self.getToken(StlParser.LPAREN, 0)
def Identifier(self, | |
<filename>data/smal_base.py
"""
Base data loading class.
Should output:
- img: B X 3 X H X W
- kp: B X nKp X 2
- mask: B X H X W
# Silvia - sfm_pose: B X 7 (s, tr, q)
- camera_params: B X 4 (s, tr)
(kp, sfm_pose) correspond to image coordinates in [-1, 1]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
import scipy.misc
import scipy.linalg
import scipy.ndimage.interpolation
from absl import flags, app
import pickle as pkl
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from ..utils import image as image_utils
from ..utils import transformations
from ..nnutils.geom_utils import perspective_proj_withz
flags.DEFINE_integer('bgval', 1, 'color for padding input image')
flags.DEFINE_integer('border', 0, 'padding input image')
flags.DEFINE_integer('img_size', 256, 'image size')
flags.DEFINE_boolean('use_bbox', True, 'If doing the cropping based on bboxes')
flags.DEFINE_boolean('perturb_bbox', True, '')
flags.DEFINE_boolean('online_training', False, 'If to change dataset')
flags.DEFINE_boolean('save_training', False, 'Save the cropped images')
flags.DEFINE_boolean('update_vis', True, 'If to update visibility in the keypoint normaliation')
flags.DEFINE_float('padding_frac', 0.1, #0.05,
'bbox is increased by this fraction of max_dim')
flags.DEFINE_float('jitter_frac', 0.1, #0.05,
'bbox is jittered by this fraction of max_dim')
flags.DEFINE_enum('split', 'train', ['train', 'val', 'all', 'test'], 'eval split')
flags.DEFINE_integer('num_kps', 28, 'The dataloader should override these.')
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
# -------------- Dataset ------------- #
# ------------------------------------ #
class BaseDataset(Dataset):
'''
img, mask, kp, pose, texture_map data loader
'''
def __init__(self, opts, filter_key=None):
# Child class should define/load:
# self.kp_perm
# self.img_dir
# self.anno
# self.anno_camera
self.opts = opts
self.img_size = opts.img_size
self.jitter_frac = opts.jitter_frac
self.padding_frac = opts.padding_frac
self.filter_key = filter_key
if not opts.use_smal_betas:
# We need to load the blendshapes
model_path = osp.join(self.opts.model_dir, self.opts.model_name)
with open(model_path, 'r') as f:
dd = pkl.load(f)
num_betas = dd['shapedirs'].shape[-1]
self.shapedirs = np.reshape(dd['shapedirs'], [-1, num_betas]).T
def forward_img(self, index):
if True:
data = self.anno[index].copy()
data_sfm = self.anno_camera[index].copy()
img_path = data['img_path']
if 'img' in data.keys():
img = data['img']
else:
img = scipy.misc.imread(img_path) / 255.0
camera_params = [np.copy(data_sfm['flength']), np.zeros(2)]
if 'texture_map' in data.keys():
texture_map_path = data['texture_map']
if 'texture_map_data' in data.keys():
texture_map = data['texture_map_data']
else:
texture_map = scipy.misc.imread(texture_map_path) / 255.0
texture_map = np.transpose(texture_map, (2, 0, 1))
else:
texture_map = None
if data['mask_path'] is not None:
mask_path = data['mask_path']
if 'mask' in data.keys():
mask = data['mask']
else:
mask = scipy.misc.imread(mask_path) / 255.0
else:
mask = None
if 'uv_flow_path' in data.keys():
uv_flow_path = data['uv_flow_path']
if 'uv_flow' in data.keys():
uv_flow = data['uv_flow']
else:
uvdata = pkl.load(open(uv_flow_path))
uv_flow = uvdata['uv_flow'].astype(np.float32)
uv_flow[:,:,0] = uv_flow[:,:,0] /(uvdata['img_h']/2.)
uv_flow[:,:,1] = uv_flow[:,:,1] /(uvdata['img_w']/2.)
else:
uv_flow = None
occ_map = None
kp = data['keypoints']
if 'trans' in data.keys():
model_trans = data['trans'].copy()
else:
model_trans = None
if 'pose' in data.keys():
model_pose = data['pose'].copy()
else:
model_pose = None
if 'betas' in data.keys():
model_betas = data['betas'].copy()
else:
model_betas = None
if 'delta_v' in data.keys():
model_delta_v = data['delta_v'].copy()
if not self.opts.use_smal_betas:
# Modify the deformation to include B*\betas
nBetas = len(model_betas)
model_delta_v = model_delta_v + np.reshape(np.matmul(model_betas, self.shapedirs[:nBetas,:]), [model_delta_v.shape[0], model_delta_v.shape[1]])
else:
model_delta_v = None
# Perspective camera needs image center
camera_params[1][0] = img.shape[1]/2.
camera_params[1][1] = img.shape[0]/2.
if mask is not None:
M = mask[:,:,0]
xmin = np.min(np.where(M>0)[1])
ymin = np.min(np.where(M>0)[0])
xmax = np.max(np.where(M>0)[1])
ymax = np.max(np.where(M>0)[0])
else:
xmin = 0
ymin = 0
xmax = img.shape[1]
ymax = img.shape[0]
# Compute bbox
bbox = np.array([xmin, ymin, xmax, ymax], float)
if self.opts.border > 0:
assert(('trans' in data.keys())==False)
assert(('pose' in data.keys())==False)
assert(('kp' in data.keys())==False)
# This has to be used only if there are no annotations for the refinement!
scale_factor = float(self.opts.img_size-2*self.opts.border) / np.max(img.shape[:2])
img, _ = image_utils.resize_img(img, scale_factor)
if mask is not None:
mask, _ = image_utils.resize_img(mask, scale_factor)
# Crop img_size x img_size from the center
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# img center in (x, y)
center = center[::-1]
bbox = np.hstack([center - self.opts.img_size / 2., center + self.opts.img_size / 2.])
if kp is not None:
vis = kp[:, 2] > 0
kp[vis, :2] -= 1
else:
vis = None
# Peturb bbox
if self.opts.perturb_bbox and mask is not None:
bbox = image_utils.peturb_bbox(bbox, pf=self.padding_frac, jf=self.jitter_frac)
orig_bbox = bbox[:]
bbox = image_utils.square_bbox(bbox)
# crop image around bbox, translate kps
#if self.opts.use_bbox and mask is not None:
if not self.opts.is_optimization:
img, mask, kp, camera_params, model_trans, occ_map, uv_flow = self.crop_image(img,
mask, bbox, kp, vis, camera_params, model_trans, occ_map, uv_flow)
# scale image, and mask. And scale kps.
img, mask, kp, camera_params, occ_map, uv_flow = self.scale_image(img, mask, kp, vis, camera_params,
occ_map, orig_bbox, uv_flow)
# Normalize kp to be [-1, 1]
img_h, img_w = img.shape[:2]
if kp is not None:
kp_norm = self.normalize_kp(kp, img_h, img_w, self.opts.update_vis)
else:
kp_norm = None
if not self.opts.use_camera:
focal_length_fix = self.opts.camera_ref
f_scale = focal_length_fix/camera_params[0]
camera_params[0] *= f_scale
model_trans[2] *= f_scale
if self.opts.save_training:
scipy.misc.imsave(img_path+'.crop.png', img)
scipy.misc.imsave(img_path+'.crop_mask.png', mask)
data = {'kp':kp, 'sfm_pose':camera_params, 'model_trans':model_trans, 'model_pose':model_pose, 'model_betas':model_betas}
pkl.dump(data, open(img_path+'.crop.pkl', 'wb'))
if uv_flow is not None:
pkl.dump(uv_flow, open(img_path+'._uv_flow_crop.pkl', 'wb'))
print('saved ' + img_path)
# Finally transpose the image to 3xHxW
img = np.transpose(img, (2, 0, 1))
if mask is not None:
mask = np.transpose(mask, (2, 0, 1))
if self.opts.border > 0:
camera_params[1][0] = img.shape[1]/2.
camera_params[1][1] = img.shape[0]/2.
return img, kp_norm, mask, camera_params, texture_map, model_trans, model_pose, model_betas, model_delta_v, occ_map, img_path, uv_flow
def normalize_kp(self, kp, img_h, img_w, update_vis=False):
vis = kp[:, 2, None] > 0
new_kp = np.stack([2 * (kp[:, 0] / img_w) - 1,
2 * (kp[:, 1] / img_h) - 1,
kp[:, 2]]).T
if update_vis:
new_kp[np.where(new_kp[:,0] < -1),2] = 0
new_kp[np.where(new_kp[:,0] > 1),2] = 0
new_kp[np.where(new_kp[:,1] < -1),2] = 0
new_kp[np.where(new_kp[:,1] > 1),2] = 0
new_kp = vis * new_kp
return new_kp
def get_camera_projection_matrix(self, f, c):
P = np.hstack([np.eye(3), np.zeros((3,1))])
# Add camera matrix
K = np.zeros((3, 3))
K[0, 0] = f
K[1, 1] = f
K[2, 2] = 1
K[0, 2] = c[0]
K[1, 2] = c[1]
KP = np.array(np.matrix(K)*np.matrix(P))
return KP
def my_project_points(self, ptsw, P):
# Project world points ptsw(Nx3) into image points ptsi(Nx2) using the camera matrix P(3X4)
nPts = ptsw.shape[0]
ptswh = np.ones((nPts, 4))
ptswh[:, :-1] = ptsw
ptsih = np.dot(ptswh, P.T)
ptsi = np.divide(ptsih[:, :-1], ptsih[:, -1][:, np.newaxis])
return ptsi
def my_anti_project_points(self, ptsi, P):
nPts = ptsi.shape[0]
ptsih = np.ones((nPts, 3))
ptsih[:, :-1] = ptsi
ptswh = np.dot(ptsih, np.array(np.matrix(P.T).I))
nPts = ptswh.shape[0]
if P[-1,-1] == 0:
ptsw = ptswh[:, :-1]
else:
ptsw = np.divide(ptswh[:, :-1], ptswh[:, -1][:, np.newaxis])
return ptsw
def get_model_trans_for_cropped_image(self, trans, bbox, flength, img_w, img_h):
'''
trans: 3 model translation
bbox: 1 x 4 xmin, ymin, xmax, ymax
flength: 1
img_w: 1 width original image
img_h: 1 height original image
'''
# Location of the model in image frame (pixel coo)
P = self.get_camera_projection_matrix(flength, np.array([img_w/2., img_h/2.]))
Q = np.zeros((1,3))
Q[0,:] = trans
W = self.my_project_points(Q, P)
# Location of the model w.r.t. the center of the bbox (pixel coo)
E = np.zeros((1,2))
E[0,0] = W[0,0] - (bbox[0] + (bbox[2]-bbox[0])/2.)
E[0,1] = W[0,1] - (bbox[1] + (bbox[3]-bbox[1])/2.)
# Define the new camera for the bbox
# Center of the bbox in the bbox frame
c = np.array([bbox[2]-bbox[0], bbox[3]-bbox[1]])/2.
P = self.get_camera_projection_matrix(flength, c)
P[-1,-1] = trans[2]
# Location of the model in world space w.r.t. the new image and camera
D = self.my_anti_project_points(E, P)
trans[:] = np.array([D[0,0], D[0,1], trans[2]])
return trans
def crop_image(self, img, mask, bbox, kp, vis, camera_params, model_trans, occ_map, uv_flow):
img_orig_h, img_orig_w = img.shape[:2]
# crop image and mask and translate kps
img = image_utils.crop(img, bbox, bgval=self.opts.bgval)
if mask is not None:
mask = image_utils.crop(mask, bbox, bgval=0)
if occ_map is not None:
occ_map = image_utils.crop(occ_map, bbox, bgval=0)
if uv_flow is not None:
# uv_flow has image coordinates in the first 2 channels and a mask in the third channel
# image coordinates are normalized w.r.t the original size so their value is in [-1,1]
# un-normalize uv_flow coordinates
uv = uv_flow[:,:,:2]
uv[:,:,0] = uv[:,:,0]*(img_orig_h/2.)+img_orig_h/2.
uv[:,:,1] = uv[:,:,1]*(img_orig_w/2.)+img_orig_w/2.
# Change the values
uv[:,:,0] -= bbox[0]
uv[:,:,1] -= bbox[1]
img_h, img_w = img.shape[:2]
uv_flow[:,:,0] = (uv[:,:,0]-(img_h/2.))/(img_h/2.)
uv_flow[:,:,1] = (uv[:,:,1]-(img_w/2.))/(img_w/2.)
if kp is not None:
kp[vis, 0] -= bbox[0]
kp[vis, 1] -= bbox[1]
if camera_params[0]>0:
model_trans = self.get_model_trans_for_cropped_image(model_trans, bbox, camera_params[0], img_orig_w, img_orig_h)
camera_params[1][0] = img.shape[1]/2.
camera_params[1][1] = | |
<reponame>gneumann333/jumpscaleX_core<filename>JumpscaleCore/clients/tests/manual/test_ssh_client.py
import unittest
from Jumpscale import j
from random import randint
from testconfig import config
from base_test import BaseTest
from parameterized import parameterized
class SshClient(BaseTest):
addr = config["ssh"]["addr"]
port = config["ssh"]["port"]
login = config["ssh"]["login"]
passwd = config["ssh"]["passwd"]
@classmethod
def setUpClass(cls):
cls.info("create ssh client")
cls.SSH_CLIENT = j.clients.ssh.get(
name="SSH_{}".format(randint(1, 1000)), addr=cls.addr, port=cls.port, login=cls.login, passwd=cls.passwd
)
@classmethod
def tearDownClass(cls):
cls.info("delete ssh client")
cls.SSH_CLIENT.delete()
def install_nginx(self):
self.info("install nginx on remote machine")
self.os_command(
'sshpass -p {} ssh root@{} -p {} "sudo apt install nginx -y "'.format(self.passwd, self.addr, self.port)
)
def check_nginx_install(self):
self.info("check that nginx is installed correctly")
self.install_nginx()
output, error = self.os_command(
'sshpass -p {} ssh root@{} -p {} "curl localhost"'.format(self.passwd, self.addr, self.port)
)
self.info("check that nginx is installed correctly on remote machine")
if "Welcome to nginx!" in output.decode():
return True
else:
return False
def test001_addr_variable_properites(self):
"""
TC 509
Test case to test addr variable property
**Test scenario**
#. check the output from addr variable property it should equal to addr of remote ssh machine.
"""
self.info("check addr variable property")
self.assertEqual(self.SSH_CLIENT.addr_variable, self.addr)
def test002_port_variable_property(self):
"""
TC 510
Test case to test port variable property.
**Test scenario**
#. check the output from port variable property it should equal to port of remote ssh machine.
"""
self.info("check port variable property")
self.assertEqual(str(self.SSH_CLIENT.port_variable), str(self.port))
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/206")
def test003_is_connected_property(self):
"""
TC 511
Test case to test is_connected property.
**Test scenario**
#. check that is_connected property is True.
"""
self.info("check that is_connected property is True")
self.assertTrue(self.SSH_CLIENT.isconnected)
def test004_file_copy_valid_file_local_and_valid_file_remote(self):
"""
TC 485
Test case for file_copy method, valid local file and valid remote file, should pass
**Test scenario**
#. create test file, in local machine.
#. copy this file to remote machine.
#. make sure that the file is copied correctly
"""
self.info("create file locally")
with open("/tmp/ssh_test04.txt", "w") as f:
data = "test ssh client copy_file function\n"
f.write(data)
self.info("use copy_file to copy ssh_test04.txt from local machine to remote one")
self.SSH_CLIENT.file_copy("/tmp/ssh_test04.txt", "/tmp/ssh_test04.txt")
self.info("check that file is copy in the remote machine or not")
output, error = self.os_command(
'sshpass -p {} ssh {}@{} -p {} "cat /tmp/ssh_test04.txt"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.assertEqual("test ssh client copy_file function\n", output.decode())
def test005_file_copy_non_valid_file_local_and_valid_file_remote(self):
"""
TC 486
Test Case for file_copy method, non valid local file and valid remote file, should fail.
**Test scenario**
#. try to copy non valid local file, to remote file, should fail.
"""
self.info("try to copy non valid local file to remote valid file")
with self.assertRaises(Exception):
self.SSH_CLIENT.copy_file("/tmp/NOT_VALID", "/tmp/ssh")
def test006_file_copy_directory_local_and_valid_file_remote(self):
"""
TC 487
Test Case for file_copy method for directory in local machine, should fail
**Test scenario**
#. create a directory in local machine.
#. try to copy the directory to remote file it should fail.
"""
self.info("create a directory in local machine ")
output, error = self.os_command("mkdir /tmp/ssh_test06/")
self.assertFalse(error)
self.info("try to copy the directory to remote file it should fail.")
with self.assertRaises(Exception):
self.SSH_CLIENT.copy_file("/tmp/ssh_test06/", "/tmp/ssh")
def test007_file_copy_file_local_and_dir_remote(self):
"""
TC 488
Test Case for test copy_file method for valid local file with the same name as a destination directory.
**Test scenario**
#. create a directory in remote machine with name ssh_test07 in /tmp/
#. create a file with the same name in local machine.
#. try to use copy_file to copy this file from local machine to remote one, should fail.
"""
self.info("create a directory in remote machine with name ssh_test07 in /tmp/")
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "mkdir /tmp/ssh_test07"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.info("create a file with name ssh_test_DIR in local machine")
self.os_command("touch /tmp/ssh_test07")
self.info("try to use copy_file to copy this file from local machine to remote one, should fail.")
with self.assertRaises(Exception):
self.SSH_CLIENT.copy_file("/tmp/ssh_test07", "/tmp/")
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/160")
def test008_download_with_valid_source_valid_dest_none_ignoredir_none_ignorefiles_recursive_True(self):
"""
TC 491
Test Case to test download method in ssh client
**Test scenario**
#. create a file in a directory in remote machine, with certain file name.
#. use download method to copy this directory in my local machine in /tmp/ssh_test/.
#. check if files is downloaded or not.
"""
self.info("create a file in a directory in remote machine, with certain file name.")
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "mkdir /tmp/ssh_test08/"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "touch /tmp/ssh_test08/test1 /tmp/ssh_test08/test2"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.info("use download method to copy this directory in my local machine in /tmp/ssh_test08/")
self.SSH_CLIENT.download(source="/tmp/ssh_test08/", dest="/tmp/ssh_test08/")
self.info("check if files is downloaded or not")
output, error = self.os_command("ls /tmp/ssh_test08/")
self.assertFalse(error)
self.assertEqual("test1\ntest2\n", output.decode())
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/160")
def test009_download_with_valid_source_valid_dest_none_ignoredir_none_ignorefiles_recursive_False(self):
"""
TC 502
Test Case to test download method in ssh client
**Test scenario**
#. create a files in a directory in remote machine, with certain file name.
#. use download method to copy this directory in my local machine in /tmp/test_ssh/.
#. check if files is downloaded or not.
"""
self.info("create a file in a directory in remote machine, with certain file name.")
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "mkdir /tmp/ssh_test09/test1/test2 -p"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "touch /tmp/ssh_test09/test09_1 /tmp/ssh_test09/test1/test2/test3"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.info("use download method to copy this directory in my local machine in /tmp/ssh_test09/")
self.SSH_CLIENT.download(source="/tmp/ssh_test09/", dest="/tmp/ssh_test09/", recursive=False)
self.info("check if files is downloaded or not")
output, error = self.os_command("ls /tmp/ssh_test09/")
self.assertFalse(error)
self.assertEqual("test09_1\n", output.decode())
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/160")
def test010_download_with_valid_source_valid_dest_with_ignoredir_with_ignorefiles_recursive_True(self):
"""
TC 503
Test Case to test download method in ssh client
**Test scenario**
#. create a files in a directory in remote machine, with certain file name.
#. use download method to copy this directory in my local machine in /tmp/ssh_test10/.
#. check if files is downloaded or not.
"""
self.info("create a file in remote directory in remote machine, with certain file name.")
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "mkdir /tmp/ssh_test10/test1 /tmp/ssh_test10/test2 -p"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.os_command(
'sshpass -p {} ssh {}@{} -p {} "touch /tmp/ssh_test10/test10_1 /tmp/ssh_test10/test10_2"'.format(
self.passwd, self.login, self.addr, self.port
)
)
self.info("use download method to copy this directory in my local machine in /tmp/ssh_test10/")
self.SSH_CLIENT.download(
source="/tmp/ssh_test10/",
dest="/tmp/ssh_test10/",
recursive=True,
ignoredir=["test2"],
ignorefiles=["test10_2"],
)
self.info("check if files is downloaded or not")
output, error = self.os_command("ls /tmp/ssh_test10/")
self.assertFalse(error)
self.assertEqual("test1\ntest10_1\n", output.decode())
def test011_download_with_non_valid_source_should_fail(self):
"""
TC 503
Test Case to test download method in ssh client
**Test scenario**
#. try to use download method with non valid source should fail.
"""
self.info("try to use download method with non valid source should fail")
with self.assertRaises(Exception):
self.SSH_CLIENT.download(source="non-valid", dest="/tmp")
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/160")
def test012_upload_with_valid_source_valid_dest_none_ignoredir_none_ignorefiles_recursive_True(self):
"""
TC 491
Test Case to test upload method in ssh client
**Test scenario**
#. create a file in a directory in a local machine, with certain file name.
#. use upload method to copy this directory in my local machine in /tmp/ssh_test12/.
#. check if files is uploaded or not.
"""
self.info("create a file in a directory in local machine, with certain file name.")
self.os_command("mkdir /tmp/ssh_test12/")
self.os_command("touch /tmp/ssh_test12/test1 /tmp/ssh_test12/test2")
self.info("use upload method to copy this directory in my local machine in /tmp/test_ssh/")
self.SSH_CLIENT.upload(source="/tmp/ssh_test12/", dest="/tmp/ssh_test12/")
self.info("check if files is downloaded or not")
output, error = self.os_command(
'sshpass -p {} ssh {}@{} -p {} "ls /tmp/ssh_test12/"'.format(self.passwd, self.login, self.addr, self.port)
)
self.assertFalse(error)
self.assertEqual("test1\ntest2\n", output.decode())
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/160")
def test013_upload_with_valid_source_valid_dest_none_ignoredir_none_ignorefiles_recursive_False(self):
"""
TC 506
Test Case to test upload method in ssh client
**Test scenario**
#. create a files in a directory in local machine, with certain file name.
#. use upload method to copy this directory in my local machine in /tmp/ssh_test13/.
#. check if files is uploaded or not.
"""
self.info("create a file in a directory in local machine, with certain file name.")
self.os_command("mkdir /tmp/ssh_test13/test1/test2 -p")
self.os_command("touch /tmp/ssh_test13/test13_1 /tmp/ssh_test13/test1/test2/test3")
self.info("use upload method to copy this directory from my local machine in /tmp/ssh_test13/")
self.SSH_CLIENT.upload(source="/tmp/ssh_test13/", dest="/tmp/ssh_test13/", recursive=False)
self.info("check if files is uploaded or not")
output, error = self.os_command(
'sshpass -p {} ssh {}@{} -p {} "ls /tmp/ssh_test13/"'.format(self.passwd, self.login, self.addr, self.port)
)
self.assertFalse(error)
self.assertEqual("test13_1\n", output.decode())
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/160")
def test014_upload_with_valid_source_valid_dest_with_ignoredir_with_ignorefiles_recursive_True(self):
"""
TC 507
Test Case to test upload method in ssh client
**Test scenario**
#. create a files in a directory in local machine, | |
KiB need to add to Memory pool" %self.alloc_mem)
MemoryPool.instance().increase_memory(self.alloc_mem)
self._cleanup_phantom_devs(paths)
self._cleanupVm()
if ("transient" in self.info["other_config"] and \
bool(self.info["other_config"]["transient"])) or \
("change_home_server" in self.info and \
bool(self.info["change_home_server"])):
XendDomain.instance().domain_delete_by_dominfo(self)
def resetDomain(self):
log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
old_domid = self.domid
prev_vm_xend = self._listRecursiveVm('xend')
new_dom_info = self.info
try:
self._unwatchVm()
self.destroy()
new_dom = None
try:
from xen.xend import XendDomain
new_dom_info['domid'] = None
new_dom = XendDomain.instance().domain_create_from_dict(
new_dom_info)
for x in prev_vm_xend[0][1]:
new_dom._writeVm('xend/%s' % x[0], x[1])
new_dom.waitForDevices()
new_dom.unpause()
except:
if new_dom:
new_dom.destroy()
raise
except:
log.exception('Failed to reset domain %s.', str(old_domid))
def resumeDomain(self):
log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
# resume a suspended domain (e.g. after live checkpoint, or after
# a later error during save or migate); checks that the domain
# is currently suspended first so safe to call from anywhere
xeninfo = dom_get(self.domid)
if xeninfo is None:
return
if not xeninfo['shutdown']:
return
reason = shutdown_reason(xeninfo['shutdown_reason'])
if reason != 'suspend':
return
try:
# could also fetch a parsed note from xenstore
fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
if not fast:
self._releaseDevices()
self.testDeviceComplete()
self.testvifsComplete()
log.debug("XendDomainInfo.resumeDomain: devices released")
self._resetChannels()
self._removeDom('control/shutdown')
self._removeDom('device-misc/vif/nextDeviceID')
self._createChannels()
self._introduceDomain()
self._storeDomDetails()
self._createDevices()
log.debug("XendDomainInfo.resumeDomain: devices created")
xc.domain_resume(self.domid, fast)
ResumeDomain(self.domid)
except:
log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
self.image.resumeDeviceModel()
log.debug("XendDomainInfo.resumeDomain: completed")
#
# Channels for xenstore and console
#
def _createChannels(self):
"""Create the channels to the domain.
"""
self.store_port = self._createChannel()
self.console_port = self._createChannel()
def _createChannel(self):
"""Create an event channel to the domain.
"""
try:
if self.domid != None:
return xc.evtchn_alloc_unbound(domid = self.domid,
remote_dom = 0)
except:
log.exception("Exception in alloc_unbound(%s)", str(self.domid))
raise
def _resetChannels(self):
"""Reset all event channels in the domain.
"""
try:
if self.domid != None:
return xc.evtchn_reset(dom = self.domid)
except:
log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
raise
#
# Bootloader configuration
#
def _configureBootloader(self):
"""Run the bootloader if we're configured to do so."""
blexec = self.info['PV_bootloader']
bootloader_args = self.info['PV_bootloader_args']
kernel = self.info['PV_kernel']
ramdisk = self.info['PV_ramdisk']
args = self.info['PV_args']
boot = self.info['HVM_boot_policy']
if boot:
# HVM booting.
pass
elif not blexec and kernel:
# Boot from dom0. Nothing left to do -- the kernel and ramdisk
# will be picked up by image.py.
pass
else:
# Boot using bootloader
if not blexec or blexec == 'pygrub':
blexec = auxbin.pathTo('pygrub')
blcfg = None
disks = [x for x in self.info['vbd_refs']
if self.info['devices'][x][1]['bootable']]
if not disks:
msg = "Had a bootloader specified, but no disks are bootable"
log.error(msg)
raise VmError(msg)
devinfo = self.info['devices'][disks[0]]
devtype = devinfo[0]
disk = devinfo[1]['uname']
(fn, types) = parse_uname(disk)
def _shouldMount(types):
if types[0] in ('file', 'phy'):
return False
if types[0] in ('tap', 'tap2'):
if types[1] in ('aio', 'sync'):
return False
else:
return True
return os.access('/etc/xen/scripts/block-%s' % types[0], os.X_OK)
mounted = _shouldMount(types)
mounted_vbd_uuid = 0
if mounted:
# This is a file, not a device. pygrub can cope with a
# file if it's raw, but if it's QCOW or other such formats
# used through blktap, then we need to mount it first.
log.info("Mounting %s on %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))
vbd = {
'mode': 'RO',
'device': BOOTLOADER_LOOPBACK_DEVICE,
}
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
mounted_vbd_uuid = dom0.create_vbd(vbd, disk);
dom0._waitForDeviceUUID(mounted_vbd_uuid)
fn = BOOTLOADER_LOOPBACK_DEVICE
try:
blcfg = bootloader(blexec, fn, self, False,
bootloader_args, kernel, ramdisk, args)
finally:
if mounted:
log.info("Unmounting %s from %s." %
(fn, BOOTLOADER_LOOPBACK_DEVICE))
_, vbd_info = dom0.info['devices'][mounted_vbd_uuid]
dom0.destroyDevice(dom0.getBlockDeviceClass(vbd_info['devid']),
BOOTLOADER_LOOPBACK_DEVICE, force = True)
if blcfg is None:
msg = "Had a bootloader specified, but can't find disk"
log.error(msg)
raise VmError(msg)
self.info.update_with_image_sxp(blcfg, True)
#
# VM Functions
#
def _readVMDetails(self, params):
"""Read the specified parameters from the store.
"""
try:
return self._gatherVm(*params)
except ValueError:
# One of the int/float entries in params has a corresponding store
# entry that is invalid. We recover, because older versions of
# Xend may have put the entry there (memory/target, for example),
# but this is in general a bad situation to have reached.
log.exception(
"Store corrupted at %s! Domain %d's configuration may be "
"affected.", self.vmpath, self.domid)
return []
def _cleanupVm(self):
"""Cleanup VM resources. Idempotent. Nothrow guarantee."""
self._unwatchVm()
try:
self._removeVm()
except:
log.exception("Removing VM path failed.")
def checkLiveMigrateMemory(self):
""" Make sure there's enough memory to migrate this domain """
overhead_kb = 0
if arch.type == "x86":
# 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
# the minimum that Xen would allocate if no value were given.
overhead_kb = self.info['VCPUs_max'] * 1024 + \
(self.info['memory_static_max'] / 1024 / 1024) * 4
overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
# The domain might already have some shadow memory
overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
if overhead_kb > 0:
balloon.free(overhead_kb, self)
def _unwatchVm(self):
"""Remove the watch on the VM path, if any. Idempotent. Nothrow
guarantee."""
try:
try:
if self.vmWatch:
self.vmWatch.unwatch()
finally:
self.vmWatch = None
except:
log.exception("Unwatching VM path failed.")
def testDeviceComplete(self):
""" For Block IO migration safety we must ensure that
the device has shutdown correctly, i.e. all blocks are
flushed to disk
"""
start = time.time()
while True:
test = 0
diff = time.time() - start
vbds = self.getDeviceController('vbd').deviceIDs()
taps = self.getDeviceController('tap').deviceIDs()
tap2s = self.getDeviceController('tap2').deviceIDs()
for i in vbds + taps + tap2s:
test = 1
log.info("Dev %s still active, looping...", i)
time.sleep(0.1)
if test == 0:
break
if diff >= MIGRATE_TIMEOUT:
log.info("Dev still active but hit max loop timeout")
break
def testvifsComplete(self):
""" In case vifs are released and then created for the same
domain, we need to wait the device shut down.
"""
start = time.time()
while True:
test = 0
diff = time.time() - start
for i in self.getDeviceController('vif').deviceIDs():
test = 1
log.info("Dev %s still active, looping...", i)
time.sleep(0.1)
if test == 0:
break
if diff >= MIGRATE_TIMEOUT:
log.info("Dev still active but hit max loop timeout")
break
def _storeVmDetails(self):
to_store = {}
for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
if self._infoIsSet(info_key):
to_store[key] = str(self.info[info_key])
if self._infoIsSet("static_memory_min"):
to_store["memory"] = str(self.info["static_memory_min"])
if self._infoIsSet("static_memory_max"):
to_store["maxmem"] = str(self.info["static_memory_max"])
image_sxpr = self.info.image_sxpr()
if image_sxpr:
to_store['image'] = sxp.to_string(image_sxpr)
if not self._readVm('xend/restart_count'):
to_store['xend/restart_count'] = str(0)
log.debug("Storing VM details: %s", scrub_password(to_store))
self._writeVm(to_store)
self._setVmPermissions()
def _setVmPermissions(self):
"""Allow the guest domain to read its UUID. We don't allow it to
access any other entry, for security."""
xstransact.SetPermissions('%s/uuid' % self.vmpath,
{ 'dom' : self.domid,
'read' : True,
'write' : False })
#
# Utility functions
#
def __getattr__(self, name):
if name == "state":
log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
log.warn("".join(traceback.format_stack()))
return self._stateGet()
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "state":
log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
log.warn("".join(traceback.format_stack()))
self._stateSet(value)
else:
self.__dict__[name] = value
def _stateSet(self, state):
self.state_updated.acquire()
try:
# TODO Not sure this is correct...
# _stateGet is live now. Why not fire event
# even when it hasn't changed?
if self._stateGet() != state:
self.state_updated.notifyAll()
import XendAPI
XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
'power_state')
finally:
self.state_updated.release()
def _stateGet(self):
# Lets try and reconsitute the state from xc
# first lets try and get the domain info
# from xc - this will tell us if the domain
# exists
info = dom_get(self.getDomid())
if info is None or info['shutdown']:
# We are either HALTED or SUSPENDED
# check saved image exists
from xen.xend import XendDomain
managed_config_path = \
XendDomain.instance()._managed_check_point_path( \
self.get_uuid())
if os.path.exists(managed_config_path):
return XEN_API_VM_POWER_STATE_SUSPENDED
else:
return XEN_API_VM_POWER_STATE_HALTED
elif info['crashed']:
# Crashed
return XEN_API_VM_POWER_STATE_CRASHED
else:
# We are either RUNNING or PAUSED
if info['paused']:
return XEN_API_VM_POWER_STATE_PAUSED
else:
return XEN_API_VM_POWER_STATE_RUNNING
def _infoIsSet(self, name):
return name in self.info and self.info[name] is not None
def _checkName(self, name):
"""Check if a vm name is valid. Valid names contain alphabetic
characters, digits, or characters in '_-.:+'.
The same name cannot be used for more than one vm at the same time.
@param name: name
@raise: VmError if invalid
"""
from xen.xend import XendDomain
if name is None or name == '':
raise VmError('Missing VM Name')
if not re.search(r'^[A-Za-z0-9_\-\.\:\+]+$', name):
raise VmError('Invalid VM Name')
dom = XendDomain.instance().domain_lookup_nr(name)
if dom and dom.info['uuid'] != self.info['uuid']:
raise VmError("VM name '%s' already exists%s" %
(name,
dom.domid is not None | |
"""
Client
======
"""
from collections import namedtuple, deque
from logging import getLogger
import functools
from blinker import Signal
import tornado.ioloop
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from .common import EndpointType, ProtocolError, MessageType
from .common import make_msg, parse_msg
# Global logging object
log = getLogger(__name__)
class Client(object):
"""Client for a streaming kinect2 server.
Usually the client will be used with a ``with`` statement::
with Client(endpoint) as c:
# c is connected here
pass
# c is disconnected here
*control_endpoint* is the zeromq control endpoint for the server which
should be connected to.
If not *None*, *zmq_ctx* is the zeromq context to create sockets in. If
*zmq_ctx* is *None*, the global context returned by
:py:meth:`zmq.Context.instance` is used.
If not *None*, *io_loop* is the event loop to pass to
:py:class:`zmq.eventloop.zmqstream.ZMQStream` used to listen to responses
from the server. If *None* then global IO loop is used.
If *connect_immediately* is *True* then the client attempts to connect when
constructed. If *False* then :py:meth:`connect` must be used explicitly.
.. py:attribute:: server_name
A string giving a human-readable name for the server or *None* if the
server has not yet replied to our initial query.
.. py:attribute:: endpoints
A :py:class:`dict` of endpoint addresses keyed by
:py:class:`streamkinect2common.EndpointType`.
.. py:attribute:: is_connected
*True* if the client is connected. *False* otherwise.
The following attributes are mostly of use to the unit tests and advanced
users.
.. py:attribute:: heartbeat_period
The delay, in milliseconds, between "heartbeat" requests to the server.
These are used to ensure the server is still alive. Changes to this
attribute are ignored once :py:meth:`connect` has been called.
.. py:attribute:: response_timeout
The maximum wait time, in milliseconds, the client waits for the server
to reply before giving up.
"""
on_connect = Signal()
"""A signal which is emitted when the client connects to a server."""
on_disconnect = Signal()
"""A signal which is emitted when the client disconnects from a server."""
on_add_kinect = Signal()
"""A signal which is emitted when a new kinect device is available. Handlers
should accept a single keyword argument *kinect_id* which is the unique id
associated with the new device."""
on_remove_kinect = Signal()
"""A signal which is emitted when a kinect device is removed. Handlers
should accept a single keyword argument *kinect_id* which is the unique id
associated with the new device."""
on_depth_frame = Signal()
"""A signal which is emitted when a new depth frame is available. Handlers
should accept two keyword arguments: *depth_frame* which will be an
instance of an object with the same interface as :py:class:`DepthFrame` and
*kinect_id* which will be the unique id of the kinect device producing the
depth frame."""
def __init__(self, control_endpoint, connect_immediately=False, zmq_ctx=None, io_loop=None):
self.is_connected = False
self.server_name = None
self.endpoints = {
EndpointType.control: control_endpoint
}
# Default values for timeouts, periods, etc
self.heartbeat_period = 10000
self.response_timeout = 5000
if zmq_ctx is None:
zmq_ctx = zmq.Context.instance()
self._zmq_ctx = zmq_ctx
self._io_loop = io_loop or tornado.ioloop.IOLoop.instance()
self._response_handlers = deque()
# Heartbeat callback
self._heartbeat_callback = None
# Dictionary of device records keyed by id
self._kinect_records = {}
# ZMQStream for control socket
self._control_stream = None
# Handle to timeout when waiting for a response
self._response_timeout_handle = None
if connect_immediately:
self.connect()
@property
def kinect_ids(self):
return list(self._kinect_records.keys())
def ping(self, pong_cb=None):
"""Send a 'ping' request to the server. If *pong_cb* is not *None*, it
is a callable which is called with no arguments when the pong response
has been received.
"""
self._ensure_connected()
def pong(type, payload, pong_cb=pong_cb):
if pong_cb is not None:
pong_cb()
self._control_send(MessageType.ping, recv_cb=pong)
def enable_depth_frames(self, kinect_id):
"""Enable streaming of depth frames. *kinect_id* is the id of the
device which should have streaming enabled.
:raises ValueError: if *kinect_id* does not correspond to a connected device
"""
try:
record = self._kinect_records[kinect_id]
except KeyError:
raise ValueError('Kinect id "{0}" does not correspond to a connected device'.format(
kinect_id))
# Create subscriber stream
socket = self._zmq_ctx.socket(zmq.SUB)
socket.connect(record.endpoints[EndpointType.depth])
socket.setsockopt_string(zmq.SUBSCRIBE, u'')
stream = ZMQStream(socket, self._io_loop)
record.streams[EndpointType.depth] = stream
# Fire signal on incoming depth frame
def on_recv(msg, kinect_id=kinect_id):
# TODO: decompress frame
self.on_depth_frame.send(self, kinect_id=kinect_id, depth_frame=msg)
# Wire up callback
stream.on_recv(on_recv)
def connect(self):
"""Explicitly connect the client."""
if self.is_connected:
log.warn('Client already connected')
return
# Create, connect and wire up control socket listener
self._connect_control_endpoint()
# We should not have any pending response timeouts
assert self._response_timeout_handle is None
self.is_connected = True
# Kick off an initial "who-me" request
self._who_me()
# Create and start the heartbeat callback
self._heartbeat_callback = tornado.ioloop.PeriodicCallback(
self._who_me, self.heartbeat_period, self._io_loop)
self._heartbeat_callback.start()
# Finally, signal connection
self.on_connect.send(self)
def disconnect(self):
"""Explicitly disconnect the client."""
if not self.is_connected:
log.warn('Client not connected')
return
# Cancel any pending response timeout
if self._response_timeout_handle is not None:
self._io_loop.remove_timeout(self._response_timeout_handle)
# Stop heartbeat callback
if self._heartbeat_callback is not None:
self._heartbeat_callback.stop()
self._heartbeat_callback = None
# TODO: check if disconnect() on the sockets is necessary
self._control_stream = None
self.is_connected = False
# Finally, signal disconnection
self.on_disconnect.send(self)
_KinectRecord = namedtuple('_KinectRecord', ['endpoints', 'streams'])
def _who_me(self):
"""Request the list of endpoints from the server.
"""
# Handler function
def got_me(type, payload):
if type != MessageType.me:
raise ProtocolError('Expected me list but got "{0}" instead'.format(type))
log.info('Received "me" from server')
if payload is None or 'version' not in payload or payload['version'] != 1:
log.error('me had wrong or missing version')
raise ProtocolError('unknown server protocol')
# Fill in server information
self.server_name = payload['name']
log.info('Server identifies itself as "{0}"'.format(self.server_name))
# Remember the old kinect ids
old_kinect_ids = set(self._kinect_records.keys())
# Extract kinects
devices = payload['devices']
new_records = {}
for device in devices:
# Fetch or create the record for this device
try:
record = self._kinect_records[device['id']]
except KeyError:
record = Client._KinectRecord(endpoints={}, streams={})
new_records[device['id']] = record
# Fill in endpoint and stream dictionaries for device
for ep_type in EndpointType:
# See if this endpoint is in the payload
ep = None
try:
ep = device['endpoints'][ep_type.name]
except KeyError:
pass
if ep is None and ep_type in record.endpoints:
# Endpoint has gone away but was there
del record.endpoints[ep_type]
del record.streams[ep_type]
elif ep is not None:
# Is this a new or changed endpoint endpoint?
if ep_type not in record.endpoints or record.endpoints[ep_type] != ep:
# Record new/changed endpoint
record.endpoints[ep_type] = ep
# Initially there are no streams for any endpoint to avoid
# subscribing to services we do not need.
record.streams[ep_type] = None
# Update kinect records
self._kinect_records = new_records
# Fill in out server endpoint list from payload
endpoints = payload['endpoints']
for endpoint_type in EndpointType:
try:
self.endpoints[endpoint_type] = endpoints[endpoint_type.name]
log.info('Server added "{0.name}" endpoint at "{1}"'.format(
endpoint_type, endpoints[endpoint_type.name]))
except KeyError:
# Skip endpoints we don't know about
pass
# Send {add,remove}_kinect events...
new_kinect_ids = set(self._kinect_records.keys())
# ... for devices in new list and not in old
for k_id in new_kinect_ids.difference(old_kinect_ids):
self.on_add_kinect.send(self, kinect_id=k_id)
# ... for devices in old list and not in new
for k_id in old_kinect_ids.difference(new_kinect_ids):
self.on_remove_kinect.send(self, kinect_id=k_id)
# Send packet
log.info('Requesting server identity')
self._control_send(MessageType.who, recv_cb=got_me)
def _ensure_connected(self):
if not self.is_connected:
raise RuntimeError('Client is not connected')
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def _connect_control_endpoint(self):
control_endpoint = self.endpoints[EndpointType.control]
# Disconnect any existing socket (or, rather, let GC do it)
if self._control_stream is not None:
self._control_stream = None
# Create, connect and wire up control socket listener
control_socket = self._zmq_ctx.socket(zmq.REQ)
control_socket.connect(control_endpoint)
self._control_stream = ZMQStream(control_socket, self._io_loop)
self._control_stream.on_recv(self._control_recv)
def _control_send(self, type, payload=None, recv_cb=None):
"""Send *payload* formatted as a JSON object along the control socket.
If *recv_cb* is not *None*, it is a callable which is called with the
type and Python object representing the response payload from the
server. If there is no payload, None is passed.
"""
# (Re-)set response timeout
if self._response_timeout_handle is not None:
self._io_loop.remove_timeout(self._response_timeout_handle)
self._io_loop.call_later(self.response_timeout * 1e-3, self._response_timed_out)
# Add the response handler and send the message
self._response_handlers.append(recv_cb)
self._control_stream.send_multipart(make_msg(type, payload))
def _control_recv(self, msg):
"""Called when there is something to be received on the control socket."""
# If we're disconnected, then just drop the incoming packet.
if not self.is_connected:
return
# Parse message
type, payload = parse_msg(msg)
# Do we have a recv handler?
handler = self._response_handlers.popleft()
if handler is not None:
handler(type, payload)
def _response_timed_out(self):
"""Called when the response timeout | |
# This file is part of the Indico plugins.
# Copyright (C) 2020 - 2021 CERN and ENEA
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from flask import flash, has_request_context, request, session
from markupsafe import escape
from requests.exceptions import HTTPError
from sqlalchemy.orm.attributes import flag_modified
from wtforms.fields import TextAreaField
from wtforms.fields.core import BooleanField
from wtforms.fields.html5 import URLField
from wtforms.fields.simple import StringField
from wtforms.validators import URL, DataRequired, Optional, ValidationError
from indico.core import signals
from indico.core.auth import multipass
from indico.core.errors import UserValueError
from indico.core.plugins import IndicoPlugin, render_plugin_template, url_for_plugin
from indico.modules.events.views import WPSimpleEventDisplay
from indico.modules.vc import VCPluginMixin, VCPluginSettingsFormBase
from indico.modules.vc.exceptions import VCRoomError, VCRoomNotFoundError
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomStatus
from indico.modules.vc.views import WPVCEventPage, WPVCManageEvent
from indico.util.user import principal_from_identifier
from indico.web.forms.fields import IndicoEnumSelectField, IndicoPasswordField, TextListField
from indico.web.forms.validators import HiddenUnless
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
from indico_vc_zoom import _
from indico_vc_zoom.api import ZoomIndicoClient
from indico_vc_zoom.blueprint import blueprint
from indico_vc_zoom.cli import cli
from indico_vc_zoom.forms import VCRoomAttachForm, VCRoomForm
from indico_vc_zoom.notifications import notify_host_start_url
from indico_vc_zoom.util import (UserLookupMode, ZoomMeetingType, fetch_zoom_meeting, find_enterprise_email,
gen_random_password, get_alt_host_emails, get_schedule_args, get_url_data_args,
process_alternative_hosts, update_zoom_meeting)
class PluginSettingsForm(VCPluginSettingsFormBase):
_fieldsets = [
(_('API Credentials'), ['api_key', 'api_secret', 'webhook_token']),
(_('Zoom Account'), ['user_lookup_mode', 'email_domains', 'authenticators', 'enterprise_domain',
'allow_webinars', 'phone_link']),
(_('Room Settings'), ['mute_audio', 'mute_host_video', 'mute_participant_video', 'join_before_host',
'waiting_room']),
(_('Notifications'), ['creation_email_footer', 'send_host_url', 'notification_emails']),
(_('Access'), ['managers', 'acl'])
]
api_key = StringField(_('API Key'), [DataRequired()])
api_secret = IndicoPasswordField(_('API Secret'), [DataRequired()], toggle=True)
webhook_token = IndicoPasswordField(_('Webhook Token'), toggle=True,
description=_("Specify Zoom's webhook token if you want live updates"))
user_lookup_mode = IndicoEnumSelectField(_('User lookup mode'), [DataRequired()], enum=UserLookupMode,
description=_('Specify how Indico should look up the zoom user that '
'corresponds to an Indico user.'))
email_domains = TextListField(_('E-mail domains'),
[HiddenUnless('user_lookup_mode', UserLookupMode.email_domains), DataRequired()],
description=_('List of e-mail domains which can use the Zoom API. Indico attempts '
'to find Zoom accounts using all email addresses of a user which use '
'those domains.'))
authenticators = TextListField(_('Indico identity providers'),
[HiddenUnless('user_lookup_mode', UserLookupMode.authenticators), DataRequired()],
description=_('Identity providers from which to get usernames. '
'Indico queries those providers using the email addresses of the user '
'and attempts to find Zoom accounts having an email address with the '
'format username@enterprise-domain.'))
enterprise_domain = StringField(_('Enterprise domain'),
[HiddenUnless('user_lookup_mode', UserLookupMode.authenticators), DataRequired()],
description=_('The domain name used together with the usernames from the Indico '
'identity provider'))
allow_webinars = BooleanField(_('Allow Webinars (Experimental)'),
widget=SwitchWidget(),
description=_('Allow webinars to be created through Indico. Use at your own risk.'))
mute_audio = BooleanField(_('Mute audio'),
widget=SwitchWidget(),
description=_('Participants will join the VC room muted by default '))
mute_host_video = BooleanField(_('Mute video (host)'),
widget=SwitchWidget(),
description=_('The host will join the VC room with video disabled'))
mute_participant_video = BooleanField(_('Mute video (participants)'),
widget=SwitchWidget(),
description=_('Participants will join the VC room with video disabled'))
join_before_host = BooleanField(_('Join Before Host'),
widget=SwitchWidget(),
description=_('Allow participants to join the meeting before the host starts the '
'meeting. Only used for scheduled or recurring meetings.'))
waiting_room = BooleanField(_('Waiting room'),
widget=SwitchWidget(),
description=_('Participants may be kept in a waiting room by the host'))
creation_email_footer = TextAreaField(_('Creation email footer'), widget=CKEditorWidget(),
description=_('Footer to append to emails sent upon creation of a VC room'))
send_host_url = BooleanField(_('Send host URL'),
widget=SwitchWidget(),
description=_('Whether to send an e-mail with the Host URL to the meeting host upon '
'creation of a meeting'))
phone_link = URLField(_('Join via phone'), [Optional(), URL()],
description=_('Link to the list of VidyoVoice phone numbers'))
def validate_authenticators(self, field):
invalid = set(field.data) - set(multipass.identity_providers)
if invalid:
raise ValidationError(_('Invalid identity providers: {}').format(escape(', '.join(invalid))))
class ZoomPlugin(VCPluginMixin, IndicoPlugin):
"""Zoom
Zoom Plugin for Indico."""
configurable = True
settings_form = PluginSettingsForm
vc_room_form = VCRoomForm
vc_room_attach_form = VCRoomAttachForm
friendly_name = 'Zoom'
default_settings = dict(VCPluginMixin.default_settings, **{
'api_key': '',
'api_secret': '',
'webhook_token': '',
'user_lookup_mode': UserLookupMode.email_domains,
'email_domains': [],
'authenticators': [],
'enterprise_domain': '',
'allow_webinars': False,
'mute_host_video': True,
'mute_audio': True,
'mute_participant_video': True,
'join_before_host': True,
'waiting_room': False,
'creation_email_footer': None,
'send_host_url': False,
'phone_link': '',
})
def init(self):
super().init()
self.connect(signals.plugin.cli, self._extend_indico_cli)
self.connect(signals.event.times_changed, self._times_changed)
self.connect(signals.event.metadata_postprocess, self._event_metadata_postprocess)
self.template_hook('event-vc-room-list-item-labels', self._render_vc_room_labels)
self.inject_bundle('main.js', WPSimpleEventDisplay)
self.inject_bundle('main.js', WPVCEventPage)
self.inject_bundle('main.js', WPVCManageEvent)
@property
def logo_url(self):
return url_for_plugin(self.name + '.static', filename='images/zoom_logo.png')
@property
def icon_url(self):
return url_for_plugin(self.name + '.static', filename='images/zoom_logo.png')
def create_form(self, event, existing_vc_room=None, existing_event_vc_room=None):
"""Override the default room form creation mechanism."""
if existing_vc_room and request.method != 'POST':
try:
self.refresh_room(existing_vc_room, event)
except VCRoomNotFoundError as exc:
raise UserValueError(str(exc))
except VCRoomError:
# maybe a temporary issue - we just keep going and fail when saving in
# case it's something more persistent
pass
form = super().create_form(
event,
existing_vc_room=existing_vc_room,
existing_event_vc_room=existing_event_vc_room
)
if existing_vc_room:
form.host_choice.render_kw = {'disabled': True}
form.host_user.render_kw = {'disabled': True}
if self.settings.get('allow_webinars'):
# if we're editing a VC room, we will not allow the meeting type to be changed
form.meeting_type.render_kw = {'disabled': True}
if form.data['meeting_type'] == 'webinar':
# webinar hosts cannot be changed through the API
form.host_choice.render_kw = {'disabled': True}
form.host_user.render_kw = {'disabled': True}
elif not form.is_submitted():
form.password.data = <PASSWORD>()
return form
def get_extra_delete_msg(self, vc_room, event_vc_room):
host = principal_from_identifier(vc_room.data['host'])
if host == session.user or len(vc_room.events) <= 1:
return ''
return render_plugin_template('vc_zoom:extra_delete_msg.html', host=host.full_name)
def _extend_indico_cli(self, sender, **kwargs):
return cli
def update_data_association(self, event, vc_room, room_assoc, data):
# XXX: This feels slightly hacky. Maybe we should change the API on the core?
association_is_new = room_assoc.vc_room is None
old_link = room_assoc.link_object
# in a new room, `meeting_type` comes in `data`, otherwise it's already in the VCRoom
is_webinar = data.get('meeting_type', vc_room.data and vc_room.data.get('meeting_type')) == 'webinar'
super().update_data_association(event, vc_room, room_assoc, data)
if vc_room.data:
try:
# this is not a new room
if association_is_new:
# this means we are updating an existing meeting with a new vc_room-event association
update_zoom_meeting(vc_room.data['zoom_id'], {
'start_time': None,
'duration': None,
'type': (
ZoomMeetingType.recurring_webinar_no_time
if is_webinar
else ZoomMeetingType.recurring_meeting_no_time
)
})
elif room_assoc.link_object != old_link:
# the booking should now be linked to something else
new_schedule_args = (get_schedule_args(room_assoc.link_object)
if room_assoc.link_object.start_dt
else {})
meeting = fetch_zoom_meeting(vc_room)
current_schedule_args = {k: meeting[k] for k in {'start_time', 'duration'} if k in meeting}
# check whether the start time / duration of the scheduled meeting differs
if new_schedule_args != current_schedule_args:
if new_schedule_args:
update_zoom_meeting(vc_room.data['zoom_id'], new_schedule_args)
else:
update_zoom_meeting(vc_room.data['zoom_id'], {
'start_time': None,
'duration': None,
'type': (
ZoomMeetingType.recurring_webinar_no_time
if is_webinar
else ZoomMeetingType.recurring_meeting_no_time
)
})
except VCRoomNotFoundError as exc:
raise UserValueError(str(exc)) from exc
room_assoc.data['password_visibility'] = data.pop('password_visibility')
flag_modified(room_assoc, 'data')
def update_data_vc_room(self, vc_room, data, is_new=False):
super().update_data_vc_room(vc_room, data)
fields = {'description', 'password'}
# we may end up not getting a meeting_type from the form
# (i.e. webinars are disabled)
data.setdefault('meeting_type', 'regular' if is_new else vc_room.data['meeting_type'])
if data['meeting_type'] == 'webinar':
fields |= {'mute_host_video'}
if is_new:
fields |= {'host', 'meeting_type'}
else:
fields |= {
'meeting_type', 'host', 'mute_audio', 'mute_participant_video', 'mute_host_video', 'join_before_host',
'waiting_room'
}
for key in fields:
if key in data:
vc_room.data[key] = data.pop(key)
flag_modified(vc_room, 'data')
def create_room(self, vc_room, event):
"""Create a new Zoom room for an event, given a VC room.
In order to create the Zoom room, the function will try to get
a valid e-mail address for the user in question, which can be
use with the Zoom API.
:param vc_room: the VC room from which to create the Zoom room
:param event: the event to the Zoom room will be attached
"""
client = ZoomIndicoClient()
host = principal_from_identifier(vc_room.data['host'])
host_email = find_enterprise_email(host)
# get the object that this booking is linked to
vc_room_assoc = vc_room.events[0]
link_obj = vc_room_assoc.link_object
is_webinar = vc_room.data.setdefault('meeting_type', 'regular') == 'webinar'
scheduling_args = get_schedule_args(link_obj) if link_obj.start_dt else {}
try:
settings = {
'host_video': not vc_room.data['mute_host_video'],
}
kwargs = {}
if is_webinar:
kwargs['type'] = (ZoomMeetingType.webinar
if scheduling_args
else ZoomMeetingType.recurring_webinar_no_time)
settings['alternative_hosts'] = host_email
else:
kwargs = {
'type': (
ZoomMeetingType.scheduled_meeting
if scheduling_args
else ZoomMeetingType.recurring_meeting_no_time
),
'schedule_for': host_email
}
settings.update({
'mute_upon_entry': vc_room.data['mute_audio'],
'participant_video': not vc_room.data['mute_participant_video'],
'waiting_room': vc_room.data['waiting_room'],
'join_before_host': self.settings.get('join_before_host'),
})
kwargs.update({
'topic': vc_room.name,
'agenda': vc_room.data['description'],
'password': vc_room.data['password'],
'timezone': event.timezone,
'settings': settings
})
kwargs.update(scheduling_args)
if is_webinar:
meeting_obj = client.create_webinar(host_email, **kwargs)
else:
meeting_obj = client.create_meeting(host_email, **kwargs)
except HTTPError as e:
self.logger.exception('Error creating Zoom Room: %s', e.response.content)
raise VCRoomError(_('Could not create the room in Zoom. Please contact support if the error persists'))
vc_room.data.update({
'zoom_id': str(meeting_obj['id']),
'start_url': meeting_obj['start_url'],
'host': host.identifier,
'alternative_hosts': process_alternative_hosts(meeting_obj['settings'].get('alternative_hosts', ''))
})
vc_room.data.update(get_url_data_args(meeting_obj['join_url']))
flag_modified(vc_room, 'data')
# e-mail Host URL to meeting host
if self.settings.get('send_host_url'):
notify_host_start_url(vc_room)
def update_room(self, vc_room, event):
client = ZoomIndicoClient()
is_webinar = vc_room.data['meeting_type'] == 'webinar'
zoom_meeting = fetch_zoom_meeting(vc_room, client=client, is_webinar=is_webinar)
changes = {}
if vc_room.name != zoom_meeting['topic']:
changes['topic'] = vc_room.name
if vc_room.data['description'] != zoom_meeting.get('agenda', ''):
changes['agenda'] = vc_room.data['description']
if vc_room.data['password'] != zoom_meeting['password']:
changes['password'] | |
attacks. The following resource gives a detailed insight on secure coding practices. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices"],
[39, "Hackers will be able to steal data from the backend and also they can authenticate themselves to the website and can impersonate as any user since they have total control over the backend. They can even wipe out the entire database. Attackers can also steal cookie information of an authenticated user and they can even redirect the target to any malicious address or totally deface the application.",
"Proper input validation has to be done prior to directly querying the database information. A developer should remember not to trust an end-user's input. By following a secure coding methodology attacks like SQLi, XSS and BSQLi. The following resource guides on how to implement secure coding methodology on application development. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices"],
[40, "Attackers exploit the vulnerability in BASH to perform remote code execution on the target. An experienced attacker can easily take over the target system and access the internal sources of the machine",
"This vulnerability can be mitigated by patching the version of BASH. The following resource gives an indepth analysis of the vulnerability and how to mitigate it. https://www.symantec.com/connect/blogs/shellshock-all-you-need-know-about-bash-bug-vulnerability https://www.digitalocean.com/community/tutorials/how-to-protect-your-server-against-the-shellshock-bash-vulnerability"],
[41, "Gives attacker an idea on how the address scheming is done internally on the organizational network. Discovering the private addresses used within an organization can help attackers in carrying out network-layer attacks aiming to penetrate the organization's internal infrastructure.",
"Restrict the banner information to the outside world from the disclosing service. More information on mitigating this vulnerability can be found here. https://portswigger.net/kb/issues/00600300_private-ip-addresses-disclosed"],
[42, "There are chances for an attacker to manipulate files on the webserver.",
"It is recommended to disable the HTTP PUT and DEL methods incase if you don't use any REST API Services. Following resources helps you how to disable these methods. http://www.techstacks.com/howto/disable-http-methods-in-tomcat.html https://docs.oracle.com/cd/E19857-01/820-5627/gghwc/index.html https://developer.ibm.com/answers/questions/321629/how-to-disable-http-methods-head-put-delete-option/"],
[43, "Attackers try to learn more about the target from the amount of information exposed in the headers. An attacker may know what type of tech stack a web application is emphasizing and many other information.",
"Banner Grabbing should be restricted and access to the services from outside would should be made minimum."],
[44, "An attacker who successfully exploited this vulnerability could read data, such as the view state, which was encrypted by the server. This vulnerability can also be used for data tampering, which, if successfully exploited, could be used to decrypt and tamper with the data encrypted by the server.",
"Microsoft has released a set of patches on their website to mitigate this issue. The information required to fix this vulnerability can be inferred from this resource. https://docs.microsoft.com/en-us/security-updates/securitybulletins/2010/ms10-070"],
[45, "Any outdated web server may contain multiple vulnerabilities as their support would've been ended. An attacker may make use of such an opportunity to leverage attacks.",
"It is highly recommended to upgrade the web server to the available latest version."],
[46, "Hackers will be able to manipulate the URLs easily through a GET/POST request. They will be able to inject multiple attack vectors in the URL with ease and able to monitor the response as well",
"By ensuring proper sanitization techniques and employing secure coding practices it will be impossible for the attacker to penetrate through. The following resource gives a detailed insight on secure coding practices. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices"],
[47, "Since the attacker has knowledge about the particular type of backend the target is running, they will be able to launch a targetted exploit for the particular version. They may also try to authenticate with default credentials to get themselves through.",
"Timely security patches for the backend has to be installed. Default credentials has to be changed. If possible, the banner information can be changed to mislead the attacker. The following resource gives more information on how to secure your backend. http://kb.bodhost.com/secure-database-server/"],
[48, "Attackers may launch remote exploits to either crash the service or tools like ncrack to try brute-forcing the password on the target.",
"It is recommended to block the service to outside world and made the service accessible only through the a set of allowed IPs only really neccessary. The following resource provides insights on the risks and as well as the steps to block the service. https://www.perspectiverisk.com/remote-desktop-service-vulnerabilities/"],
[49, "Hackers will be able to read community strings through the service and enumerate quite an information from the target. Also, there are multiple Remote Code Execution and Denial of Service vulnerabilities related to SNMP services.",
"Use a firewall to block the ports from the outside world. The following article gives wide insight on locking down SNMP service. https://www.techrepublic.com/article/lock-it-down-dont-allow-snmp-to-compromise-network-security/"],
[50, "Attackers will be able to find the logs and error information generated by the application. They will also be able to see the status codes that was generated on the application. By combining all these information, the attacker will be able to leverage an attack.",
"By restricting access to the logger application from the outside world will be more than enough to mitigate this weakness."],
[51, "Cyber Criminals mainly target this service as it is very easier for them to perform a remote attack by running exploits. WannaCry Ransomware is one such example.",
"Exposing SMB Service to the outside world is a bad idea, it is recommended to install latest patches for the service in order not to get compromised. The following resource provides a detailed information on SMB Hardening concepts. https://kb.iweb.com/hc/en-us/articles/115000274491-Securing-Windows-SMB-and-NetBios-NetBT-Services"]
]
#vul_remed_info('c',50)
#sys.exit(1)
# Tool Set
tools_precheck = [
["wapiti"], ["whatweb"], ["nmap"], ["golismero"], ["host"], ["wget"], ["uniscan"], ["wafw00f"], ["dirb"], ["davtest"], ["theharvester"], ["xsser"], ["dnsrecon"],["fierce"], ["dnswalk"], ["whois"], ["sslyze"], ["lbd"], ["golismero"], ["dnsenum"],["dmitry"], ["davtest"], ["nikto"], ["dnsmap"]
]
# Shuffling Scan Order (starts)
scan_shuffle = list(zip(tool_names, tool_cmd, tool_resp, tool_status))
random.shuffle(scan_shuffle)
tool_names, tool_cmd, tool_resp, tool_status = zip(*scan_shuffle)
tool_checks = (len(tool_names) + len(tool_resp) + len(tool_status)) / 3 # Cross verification incase, breaks.
# Shuffling Scan Order (ends)
# Tool Head Pointer: (can be increased but certain tools will be skipped)
tool = 0
# Run Test
runTest = 1
# For accessing list/dictionary elements
arg1 = 0
arg2 = 1
arg3 = 2
arg4 = 3
arg5 = 4
arg6 = 5
# Detected Vulnerabilities [will be dynamically populated]
rs_vul_list = list()
rs_vul_num = 0
rs_vul = 0
# Total Time Elapsed
rs_total_elapsed = 0
# Tool Pre Checker
rs_avail_tools = 0
# Checks Skipped
rs_skipped_checks = 0
if len(sys.argv) == 1 :
logo()
helper()
else:
target = sys.argv[1].lower()
if target == '--update' or target == '-u' or target == '--u':
logo()
print("Source is updating....Please wait.\n")
spinner.start()
# Checking internet connectivity first...
rs_internet_availability = check_internet()
if rs_internet_availability == 0:
print( "\t"+ bcolors.BG_ERR_TXT + "There seems to be some problem connecting to the internet. Please try again or later." +bcolors.ENDC)
spinner.stop()
sys.exit(1)
cmd = 'sha1sum Source.py | grep .... | cut -c 1-40'
oldversion_hash = subprocess.check_output(cmd, shell=True)
oldversion_hash = oldversion_hash.strip()
os.system('wget -N https://raw.githubusercontent.com/ScorchingShade/Vulnerous-web/master/Source.py -O Source.py > /dev/null 2>&1')
newversion_hash = subprocess.check_output(cmd, shell=True)
newversion_hash = newversion_hash.strip()
if oldversion_hash == newversion_hash :
clear()
print( "\t"+ bcolors.OKBLUE +"You already have the latest version of Source." + bcolors.ENDC)
else:
clear()
print( "\t"+ bcolors.OKGREEN +"Source successfully updated to the latest version." +bcolors.ENDC)
spinner.stop()
sys.exit(1)
elif target == '--help' or target == '-h' or target == '--h':
logo()
helper()
sys.exit(1)
else:
target = url_maker(target)
os.system('rm te* > /dev/null 2>&1') # Clearing previous scan files
os.system('clear')
os.system('setterm -cursor off')
logo()
print( bcolors.BG_HEAD_TXT+"[ Checking Available Security Scanning Tools Phase... Initiated. ]"+bcolors.ENDC)
unavail_tools = 0
unavail_tools_names = list()
while (rs_avail_tools < len(tools_precheck)):
precmd = str(tools_precheck[rs_avail_tools][arg1])
try:
p = subprocess.Popen([precmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, err = p.communicate()
val = output
val=val+err
except:
print( "\t"+bcolors.BG_ERR_TXT+"Source was terminated abruptly..."+bcolors.ENDC)
sys.exit(1)
if (b"not found" in val):
print( "\t"+bcolors.OKBLUE+tools_precheck[rs_avail_tools][arg1]+bcolors.ENDC+bcolors.BADFAIL+"...unavailable."+bcolors.ENDC)
for scanner_index, scanner_val in enumerate(tool_names):
if scanner_val[2] == tools_precheck[rs_avail_tools][arg1]:
scanner_val[3] = 0 # disabling scanner as it's not available.
unavail_tools_names.append(tools_precheck[rs_avail_tools][arg1])
unavail_tools = unavail_tools + 1
else:
print( "\t"+bcolors.OKBLUE+tools_precheck[rs_avail_tools][arg1]+bcolors.ENDC+bcolors.OKGREEN+"...available."+bcolors.ENDC)
rs_avail_tools = rs_avail_tools + 1
clear()
unavail_tools_names = list(set(unavail_tools_names))
if unavail_tools == 0:
print( "\t"+bcolors.OKGREEN+"All Scanning Tools are available. All vulnerability checks will be performed by Source."+bcolors.ENDC)
else:
print( "\t"+bcolors.WARNING+"Some of these tools "+bcolors.BADFAIL+str(unavail_tools_names)+bcolors.ENDC+bcolors.WARNING+" are unavailable. | |
Summary: 批量创建全局参数
"""
UtilClient.validate_model(request)
return deps_models.BatchcreateConfigGlobalResponse().from_map(
self.do_request('1.0', 'antcloud.deps.config.global.batchcreate', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def batchcreate_config_global_ex_async(
self,
request: deps_models.BatchcreateConfigGlobalRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.BatchcreateConfigGlobalResponse:
"""
Description: 批量创建全局参数
Summary: 批量创建全局参数
"""
UtilClient.validate_model(request)
return deps_models.BatchcreateConfigGlobalResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.config.global.batchcreate', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def batchcreate_config_app(
self,
request: deps_models.BatchcreateConfigAppRequest,
) -> deps_models.BatchcreateConfigAppResponse:
"""
Description: 批量创建应用参数
Summary: 批量创建应用参数
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.batchcreate_config_app_ex(request, headers, runtime)
async def batchcreate_config_app_async(
self,
request: deps_models.BatchcreateConfigAppRequest,
) -> deps_models.BatchcreateConfigAppResponse:
"""
Description: 批量创建应用参数
Summary: 批量创建应用参数
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.batchcreate_config_app_ex_async(request, headers, runtime)
def batchcreate_config_app_ex(
self,
request: deps_models.BatchcreateConfigAppRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.BatchcreateConfigAppResponse:
"""
Description: 批量创建应用参数
Summary: 批量创建应用参数
"""
UtilClient.validate_model(request)
return deps_models.BatchcreateConfigAppResponse().from_map(
self.do_request('1.0', 'antcloud.deps.config.app.batchcreate', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def batchcreate_config_app_ex_async(
self,
request: deps_models.BatchcreateConfigAppRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.BatchcreateConfigAppResponse:
"""
Description: 批量创建应用参数
Summary: 批量创建应用参数
"""
UtilClient.validate_model(request)
return deps_models.BatchcreateConfigAppResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.config.app.batchcreate', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_config_sitetree(
self,
request: deps_models.GetConfigSitetreeRequest,
) -> deps_models.GetConfigSitetreeResponse:
"""
Description: 获取当前租户下的站点管理员视角的树形结构:区域(region)=>机房(az)
Summary: 获取当前租户下的站点管理员视角的树形结构
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_config_sitetree_ex(request, headers, runtime)
async def get_config_sitetree_async(
self,
request: deps_models.GetConfigSitetreeRequest,
) -> deps_models.GetConfigSitetreeResponse:
"""
Description: 获取当前租户下的站点管理员视角的树形结构:区域(region)=>机房(az)
Summary: 获取当前租户下的站点管理员视角的树形结构
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_config_sitetree_ex_async(request, headers, runtime)
def get_config_sitetree_ex(
self,
request: deps_models.GetConfigSitetreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetConfigSitetreeResponse:
"""
Description: 获取当前租户下的站点管理员视角的树形结构:区域(region)=>机房(az)
Summary: 获取当前租户下的站点管理员视角的树形结构
"""
UtilClient.validate_model(request)
return deps_models.GetConfigSitetreeResponse().from_map(
self.do_request('1.0', 'antcloud.deps.config.sitetree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_config_sitetree_ex_async(
self,
request: deps_models.GetConfigSitetreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetConfigSitetreeResponse:
"""
Description: 获取当前租户下的站点管理员视角的树形结构:区域(region)=>机房(az)
Summary: 获取当前租户下的站点管理员视角的树形结构
"""
UtilClient.validate_model(request)
return deps_models.GetConfigSitetreeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.config.sitetree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_config_tenanttree(
self,
request: deps_models.GetConfigTenanttreeRequest,
) -> deps_models.GetConfigTenanttreeResponse:
"""
Description: 获取当前租户下的租户管理员视角的树形结构:工作空间组(workspaceGroup)=>工作空间(workspace)=>部署单元(cell)
Summary: 获取当前租户下的租户管理员视角的树形结构
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_config_tenanttree_ex(request, headers, runtime)
async def get_config_tenanttree_async(
self,
request: deps_models.GetConfigTenanttreeRequest,
) -> deps_models.GetConfigTenanttreeResponse:
"""
Description: 获取当前租户下的租户管理员视角的树形结构:工作空间组(workspaceGroup)=>工作空间(workspace)=>部署单元(cell)
Summary: 获取当前租户下的租户管理员视角的树形结构
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_config_tenanttree_ex_async(request, headers, runtime)
def get_config_tenanttree_ex(
self,
request: deps_models.GetConfigTenanttreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetConfigTenanttreeResponse:
"""
Description: 获取当前租户下的租户管理员视角的树形结构:工作空间组(workspaceGroup)=>工作空间(workspace)=>部署单元(cell)
Summary: 获取当前租户下的租户管理员视角的树形结构
"""
UtilClient.validate_model(request)
return deps_models.GetConfigTenanttreeResponse().from_map(
self.do_request('1.0', 'antcloud.deps.config.tenanttree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_config_tenanttree_ex_async(
self,
request: deps_models.GetConfigTenanttreeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetConfigTenanttreeResponse:
"""
Description: 获取当前租户下的租户管理员视角的树形结构:工作空间组(workspaceGroup)=>工作空间(workspace)=>部署单元(cell)
Summary: 获取当前租户下的租户管理员视角的树形结构
"""
UtilClient.validate_model(request)
return deps_models.GetConfigTenanttreeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.config.tenanttree.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def exist_config_app(
self,
request: deps_models.ExistConfigAppRequest,
) -> deps_models.ExistConfigAppResponse:
"""
Description: 检查应用参数是否已存在
Summary: 检查应用参数是否已存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.exist_config_app_ex(request, headers, runtime)
async def exist_config_app_async(
self,
request: deps_models.ExistConfigAppRequest,
) -> deps_models.ExistConfigAppResponse:
"""
Description: 检查应用参数是否已存在
Summary: 检查应用参数是否已存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.exist_config_app_ex_async(request, headers, runtime)
def exist_config_app_ex(
self,
request: deps_models.ExistConfigAppRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ExistConfigAppResponse:
"""
Description: 检查应用参数是否已存在
Summary: 检查应用参数是否已存在
"""
UtilClient.validate_model(request)
return deps_models.ExistConfigAppResponse().from_map(
self.do_request('1.0', 'antcloud.deps.config.app.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def exist_config_app_ex_async(
self,
request: deps_models.ExistConfigAppRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ExistConfigAppResponse:
"""
Description: 检查应用参数是否已存在
Summary: 检查应用参数是否已存在
"""
UtilClient.validate_model(request)
return deps_models.ExistConfigAppResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.config.app.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def exist_config_global(
self,
request: deps_models.ExistConfigGlobalRequest,
) -> deps_models.ExistConfigGlobalResponse:
"""
Description: 检查全局参数是否已存在
Summary: 检查全局参数是否已存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.exist_config_global_ex(request, headers, runtime)
async def exist_config_global_async(
self,
request: deps_models.ExistConfigGlobalRequest,
) -> deps_models.ExistConfigGlobalResponse:
"""
Description: 检查全局参数是否已存在
Summary: 检查全局参数是否已存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.exist_config_global_ex_async(request, headers, runtime)
def exist_config_global_ex(
self,
request: deps_models.ExistConfigGlobalRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ExistConfigGlobalResponse:
"""
Description: 检查全局参数是否已存在
Summary: 检查全局参数是否已存在
"""
UtilClient.validate_model(request)
return deps_models.ExistConfigGlobalResponse().from_map(
self.do_request('1.0', 'antcloud.deps.config.global.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def exist_config_global_ex_async(
self,
request: deps_models.ExistConfigGlobalRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ExistConfigGlobalResponse:
"""
Description: 检查全局参数是否已存在
Summary: 检查全局参数是否已存在
"""
UtilClient.validate_model(request)
return deps_models.ExistConfigGlobalResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.config.global.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_workspacegroup(
self,
request: deps_models.ListWorkspacegroupRequest,
) -> deps_models.ListWorkspacegroupResponse:
"""
Description: 列出指定租户下所有环境
Summary: 列出指定租户下所有环境
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_workspacegroup_ex(request, headers, runtime)
async def list_workspacegroup_async(
self,
request: deps_models.ListWorkspacegroupRequest,
) -> deps_models.ListWorkspacegroupResponse:
"""
Description: 列出指定租户下所有环境
Summary: 列出指定租户下所有环境
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_workspacegroup_ex_async(request, headers, runtime)
def list_workspacegroup_ex(
self,
request: deps_models.ListWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ListWorkspacegroupResponse:
"""
Description: 列出指定租户下所有环境
Summary: 列出指定租户下所有环境
"""
UtilClient.validate_model(request)
return deps_models.ListWorkspacegroupResponse().from_map(
self.do_request('1.0', 'antcloud.deps.workspacegroup.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_workspacegroup_ex_async(
self,
request: deps_models.ListWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ListWorkspacegroupResponse:
"""
Description: 列出指定租户下所有环境
Summary: 列出指定租户下所有环境
"""
UtilClient.validate_model(request)
return deps_models.ListWorkspacegroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.workspacegroup.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_workspacegroup(
self,
request: deps_models.GetWorkspacegroupRequest,
) -> deps_models.GetWorkspacegroupResponse:
"""
Description: 查询指定环境信息
Summary: 查询指定环境信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_workspacegroup_ex(request, headers, runtime)
async def get_workspacegroup_async(
self,
request: deps_models.GetWorkspacegroupRequest,
) -> deps_models.GetWorkspacegroupResponse:
"""
Description: 查询指定环境信息
Summary: 查询指定环境信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_workspacegroup_ex_async(request, headers, runtime)
def get_workspacegroup_ex(
self,
request: deps_models.GetWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetWorkspacegroupResponse:
"""
Description: 查询指定环境信息
Summary: 查询指定环境信息
"""
UtilClient.validate_model(request)
return deps_models.GetWorkspacegroupResponse().from_map(
self.do_request('1.0', 'antcloud.deps.workspacegroup.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_workspacegroup_ex_async(
self,
request: deps_models.GetWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetWorkspacegroupResponse:
"""
Description: 查询指定环境信息
Summary: 查询指定环境信息
"""
UtilClient.validate_model(request)
return deps_models.GetWorkspacegroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.workspacegroup.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_cell(
self,
request: deps_models.QueryCellRequest,
) -> deps_models.QueryCellResponse:
"""
Description: 查询部署单元列表
Summary: 查询部署单元列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_cell_ex(request, headers, runtime)
async def query_cell_async(
self,
request: deps_models.QueryCellRequest,
) -> deps_models.QueryCellResponse:
"""
Description: 查询部署单元列表
Summary: 查询部署单元列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_cell_ex_async(request, headers, runtime)
def query_cell_ex(
self,
request: deps_models.QueryCellRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryCellResponse:
"""
Description: 查询部署单元列表
Summary: 查询部署单元列表
"""
UtilClient.validate_model(request)
return deps_models.QueryCellResponse().from_map(
self.do_request('1.0', 'antcloud.deps.cell.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_cell_ex_async(
self,
request: deps_models.QueryCellRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryCellResponse:
"""
Description: 查询部署单元列表
Summary: 查询部署单元列表
"""
UtilClient.validate_model(request)
return deps_models.QueryCellResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.cell.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_workspace_delta(
self,
request: deps_models.QueryWorkspaceDeltaRequest,
) -> deps_models.QueryWorkspaceDeltaResponse:
"""
Description: 查询环境增量统计信息
Summary: 查询环境增量统计信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_workspace_delta_ex(request, headers, runtime)
async def query_workspace_delta_async(
self,
request: deps_models.QueryWorkspaceDeltaRequest,
) -> deps_models.QueryWorkspaceDeltaResponse:
"""
Description: 查询环境增量统计信息
Summary: 查询环境增量统计信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_workspace_delta_ex_async(request, headers, runtime)
def query_workspace_delta_ex(
self,
request: deps_models.QueryWorkspaceDeltaRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryWorkspaceDeltaResponse:
"""
Description: 查询环境增量统计信息
Summary: 查询环境增量统计信息
"""
UtilClient.validate_model(request)
return deps_models.QueryWorkspaceDeltaResponse().from_map(
self.do_request('1.0', 'antcloud.deps.workspace.delta.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_workspace_delta_ex_async(
self,
request: deps_models.QueryWorkspaceDeltaRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryWorkspaceDeltaResponse:
"""
Description: 查询环境增量统计信息
Summary: 查询环境增量统计信息
"""
UtilClient.validate_model(request)
return deps_models.QueryWorkspaceDeltaResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.workspace.delta.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def create_workspacegroup(
self,
request: deps_models.CreateWorkspacegroupRequest,
) -> deps_models.CreateWorkspacegroupResponse:
"""
Description: 创建工作空间组。
Summary: 创建工作空间组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.create_workspacegroup_ex(request, headers, runtime)
async def create_workspacegroup_async(
self,
request: deps_models.CreateWorkspacegroupRequest,
) -> deps_models.CreateWorkspacegroupResponse:
"""
Description: 创建工作空间组。
Summary: 创建工作空间组
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.create_workspacegroup_ex_async(request, headers, runtime)
def create_workspacegroup_ex(
self,
request: deps_models.CreateWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.CreateWorkspacegroupResponse:
"""
Description: 创建工作空间组。
Summary: 创建工作空间组
"""
UtilClient.validate_model(request)
return deps_models.CreateWorkspacegroupResponse().from_map(
self.do_request('1.0', 'antcloud.deps.workspacegroup.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def create_workspacegroup_ex_async(
self,
request: deps_models.CreateWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.CreateWorkspacegroupResponse:
"""
Description: 创建工作空间组。
Summary: 创建工作空间组
"""
UtilClient.validate_model(request)
return deps_models.CreateWorkspacegroupResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.workspacegroup.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_workspacegroup(
self,
request: deps_models.QueryWorkspacegroupRequest,
) -> deps_models.QueryWorkspacegroupResponse:
"""
Description: 查询环境组详细信息
Summary: 查询环境组详细信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_workspacegroup_ex(request, headers, runtime)
async def query_workspacegroup_async(
self,
request: deps_models.QueryWorkspacegroupRequest,
) -> deps_models.QueryWorkspacegroupResponse:
"""
Description: 查询环境组详细信息
Summary: 查询环境组详细信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_workspacegroup_ex_async(request, headers, runtime)
def query_workspacegroup_ex(
self,
request: deps_models.QueryWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryWorkspacegroupResponse:
"""
Description: 查询环境组详细信息
Summary: 查询环境组详细信息
"""
UtilClient.validate_model(request)
return deps_models.QueryWorkspacegroupResponse().from_map(
self.do_request('1.0', 'antcloud.deps.workspacegroup.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_workspacegroup_ex_async(
self,
request: deps_models.QueryWorkspacegroupRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
| |
" + fastq + "\n")
os.remove(fastq)
def remap_gsnap_bam(bamfn, threads, fastaref, samtofastq, gsnaprefdir, gsnaprefname, mutid='null', paired=True):
""" call gsnap and samtools to remap .bam
"""
assert os.path.exists(samtofastq)
assert os.path.exists(gsnaprefdir)
assert bamreadcount(bamfn) > 0
sam_out = bamfn + '.realign.sam'
sort_out = bamfn + '.realign.sorted'
print "INFO\t" + now() + "\t" + mutid + "\tconverting " + bamfn + " to fastq\n"
fastq = bamtofastq(bamfn, samtofastq, threads=threads, paired=paired, twofastq=True)
sam_cmd = []
if paired:
sam_cmd = ['gsnap', '-D', gsnaprefdir, '-d', gsnaprefname, '-t', str(threads), '--quality-protocol=sanger',
'-M', '2', '-n', '10', '-B', '2', '-i', '1', '--pairmax-dna=1000', '--terminal-threshold=1000',
'--gmap-mode=none', '--clip-overlap', '-A', 'sam', '-a', 'paired', fastq[0], fastq[1]]
else:
sam_cmd = ['gsnap', '-D', gsnaprefdir, '-d', gsnaprefname, '-t', str(threads), '--quality-protocol=sanger',
'-M', '2', '-n', '10', '-B', '2', '-i', '1', '--terminal-threshold=1000', '--gmap-mode=none',
'--clip-overlap', '-A', 'sam', fastq[0]]
assert len(sam_cmd) > 0
bam_cmd = ['samtools', 'view', '-bt', fastaref + '.fai', '-o', bamfn, sam_out]
sort_cmd = ['samtools', 'sort', '-@', str(threads), '-m', '10000000000', bamfn, sort_out]
idx_cmd = ['samtools', 'index', bamfn]
print "INFO\t" + now() + "\t" + mutid + "\taligning " + str(fastq) + " with gsnap\n"
with open(sam_out, 'w') as sam:
p = subprocess.Popen(sam_cmd, stdout=subprocess.PIPE)
for line in p.stdout:
sam.write(line)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\twriting " + sam_out + " to BAM...\n")
subprocess.call(bam_cmd)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tdeleting SAM: " + sam_out + "\n")
os.remove(sam_out)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tsorting output: " + ' '.join(sort_cmd) + "\n")
subprocess.call(sort_cmd)
sort_out += '.bam'
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremove original bam:" + bamfn + "\n")
os.remove(bamfn)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\trename sorted bam: " + sort_out + " to original name: " + bamfn + "\n")
move(sort_out, bamfn)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tindexing: " + ' '.join(idx_cmd) + "\n")
subprocess.call(idx_cmd)
# check if BAM readcount looks sane
if paired:
if bamreadcount(bamfn) < fastqreadcount(fastq[0]) + fastqreadcount(fastq[1]):
raise ValueError("ERROR\t" + now() + "\t" + mutid + "\tbam readcount < fastq readcount, alignment sanity check failed!\n")
else:
if bamreadcount(bamfn) < fastqreadcount(fastq):
raise ValueError("ERROR\t" + now() + "\t" + mutid + "\tbam readcount < fastq readcount, alignment sanity check failed!\n")
if paired:
for fq in fastq:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremoving " + fq + "\n")
os.remove(fq)
else:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremoving " + fastq + "\n")
os.remove(fastq)
def remap_bowtie2_bam(bamfn, threads, fastaref, samtofastq, bowtie2ref, mutid='null', paired=True):
""" call bowtie2 and samtools to remap .bam
"""
assert bamreadcount(bamfn) > 0
sam_out = bamfn + '.realign.sam'
sort_out = bamfn + '.realign.sorted'
print "INFO\t" + now() + "\t" + mutid + "\tconverting " + bamfn + " to fastq\n"
fastq = bamtofastq(bamfn, samtofastq, threads=threads, paired=paired, twofastq=True)
sam_cmd = []
if paired:
sam_cmd = ['bowtie2', '-x', bowtie2ref, '-1', fastq[0], '-2', fastq[1], '-S', sam_out]
else:
sam_cmd = ['bowtie2', '-x', bowtie2ref, '-U', fastq[0], '-S', sam_out]
assert len(sam_cmd) > 0
bam_cmd = ['samtools', 'view', '-bt', fastaref + '.fai', '-o', bamfn, sam_out]
sort_cmd = ['samtools', 'sort', '-@', str(threads), '-m', '10000000000', bamfn, sort_out]
idx_cmd = ['samtools', 'index', bamfn]
print "INFO\t" + now() + "\t" + mutid + "\taligning " + str(fastq) + " with bowtie2\n"
with open(sam_out, 'w') as sam:
p = subprocess.Popen(sam_cmd, stdout=subprocess.PIPE)
for line in p.stdout:
sam.write(line)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\twriting " + sam_out + " to BAM...\n")
subprocess.call(bam_cmd)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tdeleting SAM: " + sam_out + "\n")
os.remove(sam_out)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tsorting output: " + ' '.join(sort_cmd) + "\n")
subprocess.call(sort_cmd)
sort_out += '.bam'
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremove original bam:" + bamfn + "\n")
os.remove(bamfn)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\trename sorted bam: " + sort_out + " to original name: " + bamfn + "\n")
move(sort_out, bamfn)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tindexing: " + ' '.join(idx_cmd) + "\n")
subprocess.call(idx_cmd)
# check if BAM readcount looks sane
if paired:
if bamreadcount(bamfn) < fastqreadcount(fastq[0]) + fastqreadcount(fastq[1]):
raise ValueError("ERROR\t" + now() + "\t" + mutid + "\tbam readcount < fastq readcount, alignment sanity check failed!\n")
else:
if bamreadcount(bamfn) < fastqreadcount(fastq):
raise ValueError("ERROR\t" + now() + "\t" + mutid + "\tbam readcount < fastq readcount, alignment sanity check failed!\n")
if paired:
for fq in fastq:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremoving " + fq + "\n")
os.remove(fq)
else:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremoving " + fastq + "\n")
os.remove(fastq)
#
# Remapping functions paired fastq --> BAM
#
def remap_fastq(name, fq1, fq2, fastaref, outbam, options, mutid='null', threads=1, deltmp=True):
''' remap bam file with supported alignment method. "options" param is a dict of aligner-specific required options '''
checkoptions(name, options, None, sv=True)
if name == 'backtrack':
return remap_backtrack_fastq(fq1, fq2, threads, fastaref, outbam, deltmp=deltmp, mutid=mutid)
if name == 'mem':
return remap_bwamem_fastq(fq1, fq2, threads, fastaref, outbam, deltmp=deltmp, mutid=mutid)
if name == 'novoalign':
return remap_novoalign_fastq(fq1, fq2, threads, fastaref, options['novoref'], outbam, deltmp=deltmp, mutid=mutid)
def remap_bwamem_fastq(fq1, fq2, threads, fastaref, outbam, deltmp=True, mutid='null'):
""" call bwa mem and samtools to remap .bam
"""
basefn = "bwatmp." + str(uuid4())
sam_out = basefn + '.sam'
sort_out = basefn + '.sorted'
sam_cmd = ['bwa', 'mem', '-t', str(threads), '-M', '-Y', fastaref, fq1, fq2]
bam_cmd = ['samtools', 'view', '-bt', fastaref + '.fai', '-o', outbam, sam_out]
sort_cmd = ['samtools', 'sort', '-@', str(threads), '-m', '10000000000', outbam, sort_out]
idx_cmd = ['samtools', 'index', outbam]
print "INFO\t" + now() + "\t" + mutid + "\taligning " + fq1 + ',' + fq2 + " with bwa mem"
with open(sam_out, 'w') as sam:
p = subprocess.Popen(sam_cmd, stdout=subprocess.PIPE)
for line in p.stdout:
sam.write(line)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\twriting " + sam_out + " to BAM...\n")
subprocess.call(bam_cmd)
if deltmp:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tdeleting SAM: " + sam_out + "\n")
os.remove(sam_out)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tsorting output: " + ' '.join(sort_cmd) + "\n")
subprocess.call(sort_cmd)
sort_out += '.bam'
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremove original bam:" + outbam + "\n")
os.remove(outbam)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\trename sorted bam: " + sort_out + " to original name: " + outbam + "\n")
move(sort_out, outbam)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tindexing: " + ' '.join(idx_cmd) + "\n")
subprocess.call(idx_cmd)
if deltmp:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremoving " + fq1 + "\n")
os.remove(fq1)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremoving " + fq2 + "\n")
os.remove(fq2)
return bamreadcount(outbam)
def remap_novoalign_fastq(fq1, fq2, threads, fastaref, novoref, outbam, deltmp=True, mutid='null'):
""" call novoalign and samtools to remap .bam
"""
basefn = "novotmp." + str(uuid4())
sam_out = basefn + '.sam'
sort_out = basefn + '.sorted'
sam_cmd = ['novoalign', '-F', 'STDFQ', '-f', fq1, fq2, '-r', 'Random', '-d', novoref, '-oSAM']
bam_cmd = ['samtools', 'view', '-bt', fastaref + '.fai', '-o', outbam, sam_out]
sort_cmd = ['samtools', 'sort', '-@', str(threads), '-m', '10000000000', outbam, sort_out]
idx_cmd = ['samtools', 'index', outbam]
print "INFO\t" + now() + "\t" + mutid + "\taligning " + fq1 + ',' + fq2 + " with novoalign"
with open(sam_out, 'w') as sam:
p = subprocess.Popen(sam_cmd, stdout=subprocess.PIPE)
for line in p.stdout:
sam.write(line)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\twriting " + sam_out + " to BAM...\n")
subprocess.call(bam_cmd)
if deltmp:
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tdeleting SAM: " + sam_out + "\n")
os.remove(sam_out)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tsorting output: " + ' '.join(sort_cmd) + "\n")
subprocess.call(sort_cmd)
sort_out += '.bam'
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tremove original bam:" + outbam + "\n")
os.remove(outbam)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\trename sorted bam: " + sort_out + " to original name: " + outbam + "\n")
move(sort_out, outbam)
sys.stdout.write("INFO\t" + now() + "\t" + mutid + | |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import math
import os
import sys
import time
import warnings
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
import torchtext
from torchtext.data.utils import get_tokenizer
from fairscale.experimental.nn.ampnet_pipe import pipe
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule
from fairscale.optim import GradScaler
from fairscale.utils.testing import dist_init, get_worker_map
try:
from fairscale.optim import Adam # type: ignore
can_benchmark = True
except ImportError:
from torch.optim import Adam # type: ignore
can_benchmark = False
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
PIPE_CHUNKS = 2
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
global iteration_count
iteration_count += 1
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLMSequntial(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequeitnal
for compatability with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequntial, self).__init__(*layers)
class MySGD(Optimizer):
r"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate (required)
"""
def __init__(self, params, lr):
defaults = dict(lr=lr)
super(MySGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(MySGD, self).__setstate__(state)
def step(self, closure=None):
""" Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
p.data.add_(d_p, alpha=-group["lr"])
return loss
class SpectrainSGDMomentum(Optimizer):
r"""
Implements a SGD with momentum optimizer with Spectrain based weight
prediction. Please refer to the spectrain paper: https://arxiv.org/pdf/1809.02839.pdf
for more details.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate (required)
momentum (float): momentum (default=0.9)
"""
def __init__(self, params, lr, momentum=0.9):
defaults = dict(lr=lr, momentum=momentum)
params = list(params)
super(SpectrainSGDMomentum, self).__init__(params, defaults)
self.old_weights = None
self.cur_params, self.reference_params = self.prep_param_copies(params)
for group in self.param_groups:
for p in group["params"]:
if momentum != 0:
param_state = self.state[p]
param_state["momentum_buffer"] = torch.zeros_like(p.data)
def __setstate__(self, state):
super(SpectrainSGDMomentum, self).__setstate__(state)
def prep_param_copies(self, params):
model_params = [param for param in params if param.requires_grad]
reference_params = [param.clone().detach() for param in model_params]
for param in reference_params:
param.requires_grad = True
return model_params, reference_params
def copy_params(self, master_params, model_params):
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
def modify_reference_params_using_current_params(self):
self.copy_params(self.cur_params, self.reference_params)
def modify_current_params_using_reference_params(self):
self.copy_params(self.reference_params, self.cur_params)
# chunk_index and chunks parameters are for unused for spectrain usecase
def update_weight_using_future_predictions(self, model_index, num_gpus, chunk_index, chunks, forward):
if forward:
# In forward pass:
# 1. clone weights to self.old_weights
# 2. predict new weights and modify
self.modify_reference_params_using_current_params()
for group in self.param_groups:
multiplier = group["lr"] * (model_index // 2 + num_gpus - model_index - 1)
for p in group["params"]:
param_state = self.state[p]
p.data.sub_(param_state["momentum_buffer"].data, alpha=multiplier)
else:
# In backward pass:
# 1. load old weights
# 2. predict new weights and modify
self.modify_current_params_using_reference_params()
for group in self.param_groups:
multiplier = group["lr"] * (model_index // 2)
for p in group["params"]:
param_state = self.state[p]
p.data.sub_(param_state["momentum_buffer"].data, alpha=multiplier)
def step(self, weight_prediction=True, closure=None):
""" Performs a single optimization step.
Args:
weight_prediction (bool, optional): Enable weight prediction based updates
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
if weight_prediction:
self.modify_current_params_using_reference_params()
for group in self.param_groups:
momentum = group["momentum"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
buf = param_state["momentum_buffer"]
buf.data.mul_(momentum).add_(d_p, alpha=1 - momentum)
d_p = buf
p.data.add_(d_p, alpha=-group["lr"])
return loss
class XpipeAdam(Optimizer):
r"""Implements Xpipe approach on top of Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Xpipe details can be found here: https://arxiv.org/abs/1911.04610
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
params = list(params)
super(XpipeAdam, self).__init__(params, defaults)
self.cur_params, self.master_params = self.prep_param_copies(params)
_, self.forward_params = self.prep_param_copies(params)
_, self.backward_params = self.prep_param_copies(params)
for group in self.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["step"] = 0
# Exponential moving average of gradient values
param_state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
param_state["exp_avg_sq"] = torch.zeros_like(p.data)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("amsgrad", False)
def prep_param_copies(self, params):
model_params = [param for param in params if param.requires_grad]
reference_params = [param.clone().detach() for param in model_params]
for param in reference_params:
param.requires_grad = True
return model_params, reference_params
def copy_params(self, master_params, model_params):
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
def update_weight_using_future_predictions(
self, model_index, num_gpus, current_microbatch_index, microbatches_per_minibatch, forward
):
if forward:
# Forward pass overview:
# if bell-weather:
# 1. read from master copy
# 2. predict and modify
# 3. flush updates to forward copy
# else:
# 1. read from forward copy
if current_microbatch_index % microbatches_per_minibatch == 0:
# read from master copy
self.copy_params(self.master_params, self.cur_params)
microbatch_index = current_microbatch_index + 1
# predict and modify
for group in self.param_groups:
multiplier = group["lr"] * round(
(microbatch_index + num_gpus - model_index / 2 - 2) / microbatch_index
)
beta1, beta2 = group["betas"]
eps = group["eps"]
for p in group["params"]:
param_state = self.state[p]
temp1 = param_state["exp_avg"].data / (1 - beta1)
temp2 = ((param_state["exp_avg_sq"].data / (1 - beta2)) + eps).sqrt()
p.data.addcdiv_(temp1, temp2, value=-multiplier)
# flush updates to forward copy
self.copy_params(self.cur_params, self.forward_params)
else:
self.copy_params(self.forward_params, self.cur_params)
else:
# Backward pass overview:
# if bell-weather:
# 1. read from master copy
# 2. predict and modify
# 3. flush updates to backward copy
# else:
# 1. read from backward copy
if current_microbatch_index % microbatches_per_minibatch == 0:
# read from master copy
self.copy_params(self.master_params, self.cur_params)
microbatch_index = current_microbatch_index + 1
# predict and modify
for group in self.param_groups:
multiplier = group["lr"] * (microbatch_index + model_index // 2 - | |
<filename>configs/tupipa/replay_qemu_mem.py
# Copyright (c) 2015-2016 ARM Limited
# All rights reserved.
#
# modified by <NAME>, 2020-06-19
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from __future__ import absolute_import
import gzip
import six
import optparse
import os
import bz2 # bz2 for qemu trace
from PhysicalTraceRecord import *
import m5
from m5.objects import *
from m5.util import addToPath
from m5.stats import periodicStatDump
addToPath('../')
from common import ObjectList
from common import MemConfig
addToPath('../../util')
import protolib
if six.PY3:
long = int
# Lele: this is written based on configs/dram/lat_mem_rd.py
#
# this script is helpful to observe the memory latency for various
# levels in a cache hierarchy, and various cache and memory
# configurations, in essence replicating the lmbench lat_mem_rd thrash
# behaviour
# import the packet proto definitions, and if they are not found,
# attempt to generate them automatically
try:
import packet_pb2
except:
print("Did not find packet proto definitions, attempting to generate")
from subprocess import call
error = call(['protoc', '--python_out=configs/tupipa',
'--proto_path=src/proto', 'src/proto/packet.proto'])
if not error:
print("Generated packet proto definitions")
try:
import google.protobuf
except:
print("Please install the Python protobuf module")
exit(-1)
import packet_pb2
else:
print("Failed to import packet proto definitions")
exit(-1)
parser = optparse.OptionParser()
parser.add_option("--mem-type", type="choice", default="DDR3_1600_8x8",
choices=ObjectList.mem_list.get_names(),
help = "type of memory to use")
parser.add_option("--mem-size", action="store", type="string",
default="16MB",
help="Specify the memory size")
parser.add_option("--reuse-trace", action="store_true",
help="Prevent generation of traces and reuse existing")
parser.add_option("--gem5-trace", action="store", type="string",
default="m5out/lat_mem_rd.trc.gz",
help="The Gem5 Trace file name to be generated or reused")
parser.add_option("--enable-shadow-tags", action="store_true",
help="Enable tag cache for shadow memory at L3 layer.")
parser.add_option("--req-size", action="store", type="int",
default=None,
help="Specify the size of memory rw request")
parser.add_option("--read-reqs-per-addr", action="store", type="int",
default="8",
help="Specify the number of read requests per address")
parser.add_option("--write-reqs-per-addr", action="store", type="int",
default="8",
help="Specify the number of write requests per address")
parser.add_option("--tagcache-inclusive", action="store_true",
help="Set tag cache to be inclusive")
parser.add_option("--write-first", action="store_true",
help="do a write first for each address generated")
parser.add_option("--one-write-only", action="store_true",
help=("do one write only for each generated addr, "
"must be used with --write-first"))
parser.add_option("--single-addr", action="store_true",
help=("access one address only, for testing"))
parser.add_option("--qemu-trace", action="store", type="string",
default="qemu_trace/test.txt.bz2",
help="Specify the qemu memory trace file")
parser.add_option("--qemu-trace-is-txt", action="store_true",
help="QEMU trace is in txt format")
parser.add_option("--random-trace", action="store_true",
help="Use/generate random trace instead of QEMU trace")
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
if (options.one_write_only and not options.write_first):
print("Error: --one-write-only must be used withe --write-first")
sys.exit(1)
# start by creating the system itself, using a multi-layer 2.0 GHz
# crossbar, delivering 64 bytes / 3 cycles (one header cycle) which
# amounts to 42.7 GByte/s per layer and thus per port
system = System(membus = SystemXBar(width = 32))
system.clk_domain = SrcClockDomain(clock = '2.0GHz',
voltage_domain =
VoltageDomain(voltage = '1V'))
mem_range = AddrRange(options.mem_size)
print("Mem Range: ", int(mem_range.end))
system.mem_ranges = [mem_range]
# do not worry about reserving space for the backing store
system.mmap_using_noreserve = True
# currently not exposed as command-line options, set here for now
options.mem_channels = 1
options.mem_ranks = 1
options.external_memory_system = 0
options.tlm_memory = 0
options.elastic_trace_en = 0
MemConfig.config_mem(options, system)
# there is no point slowing things down by saving any data
for ctrl in system.mem_ctrls:
ctrl.null = True
# the following assumes that we are using the native DRAM
# controller, check to be sure
if isinstance(ctrl, m5.objects.DRAMCtrl):
# make the DRAM refresh interval sufficiently infinite to avoid
# latency spikes
ctrl.tREFI = '100s'
# set an appropriate burst length in bytes
burst_size = 64
system.cache_line_size = burst_size
# lazy version to check if an integer is a power of two
def is_pow2(num):
return num != 0 and ((num & (num - 1)) == 0)
# assume we start every range at 0
max_range = int(mem_range.end)
# start at a size of 4 kByte, and go up till we hit the max, increase
# the step every time we hit a power of two
# min_range = 4096
# Lele: keep only one range
ranges = [max_range]
#step = 1024
#while ranges[-1] < max_range:
# new_range = ranges[-1] + step
# if is_pow2(new_range):
# step *= 2
# ranges.append(new_range)
# how many times to repeat the measurement for each data point
iterations = 2
# 150 ns in ticks, this is choosen to be high enough that transactions
# do not pile up in the system, adjust if needed
# itt = 150 * 1000
itt = 600 * 1000
# TODO: read trace from qemu-generated tracing data and write to
# traffic_gen compatitble format
def load_qemu_trace(qemu_trace_in):
for line in qemu_trace_in:
line = line.strip()
print("got a line: ", line)
if (line == ''):
continue
record = PhysicalTraceRecord()
record.init_with_str_record(line)
yield record
def create_trace_from_qemu(filename, qemu_trace, max_addr, itt):
half_max = max_addr / 2
print("----------------------------------------")
print("all addr in qemu trace are added by " + hex(half_max))
print("----------------------------------------")
filename_txt = filename + '.txt'
addr_dict = {}
try:
print("Trying to open file ", filename)
proto_out = gzip.open(filename, 'wb')
txt_out = open(filename_txt, 'wb')
except IOError:
print("Failed to open ", filename, " for writing")
exit(-1)
# write the magic number in 4-byte Little Endian, similar to what
# is done in src/proto/protoio.cc
proto_out.write("gem5")
txt_out.write("gem5\n")
# add the packet header
header = packet_pb2.PacketHeader()
header.obj_id = "lat_mem_rd for range 0:" + str(max_addr)
# assume the default tick rate (1 ps)
header.tick_freq = 1000000000000
protolib.encodeMessage(proto_out, header)
tick = 0
# create a packet we can re-use for all the addresses
packet = packet_pb2.Packet()
# ReadReq is 1 in src/mem/packet.hh Command enum
# packet.cmd = 1
#packet.size = int(burst_size)
# use 8 bytes word
packet.size = options.req_size
total_reqs = 0
######################################################
# Loading request from QEMU trace
# Parsing the request and convert into Gem5 Trace
######################################################
print("loading qemu trace: ", qemu_trace)
try:
if options.qemu_trace_is_txt:
qemu_trace_in = open(qemu_trace, 'r')
else:
qemu_trace_in = bz2.BZ2File(qemu_trace, 'r')
except:
print("Failed to open qemu trace file: ", qemu_trace, " for reading")
exit(-1)
print("qemu_trace file opened: ", qemu_trace)
# exit(1)
for qemu_record in load_qemu_trace(qemu_trace_in):
# generate a request from qemu record
# parse read or write
if (qemu_record.rw == 'r'):
packet.cmd = 1
elif (qemu_record.rw == 'w'):
packet.cmd = 4
# pass the addr and instcolor
addr = long(qemu_record.paddr) + half_max
packet.addr = addr
if addr in addr_dict:
addr_dict[addr] = addr_dict[addr] + 1
else:
addr_dict[addr] = 1
packet.inst_color = long(qemu_record.instcolor)
if (options.req_size == None):
packet.size = long(qemu_record.size)
packet.tick = long(tick)
print("generating the ", str(total_reqs), " request (%s), addr: %s" % \
( qemu_record.rw, hex(addr)))
protolib.encodeMessage(proto_out, packet)
txt_out.write( str(tick) + ' ' + str(qemu_record) + '\n')
tick = tick + itt
total_reqs = total_reqs + 1
print("Total number of requests in traces: ", str(total_reqs))
proto_out.close()
txt_out.close()
qemu_trace_in.close()
return (addr_dict, total_reqs)
#
# for every data point, we create a trace containing a random address
# sequence, so that we can play back the | |
import bs4 as bs
import os
import os.path
import requests
import base64
import json
from datetime import datetime, timezone
from requests.packages import urllib3
# rrdtool dump test.rrd > test.xml
cfg_name = 'config.json'
db_path = '/home/lcladmin/cmstats/data/'
web_path = '/var/www/html/cmstats/'
def main():
with open(db_path + cfg_name) as f:
config_text = f.read()
config = json.loads(config_text)
conn_type = config['conn_type']
if (conn_type == 'http'):
read_http()
elif (conn_type == 'https'):
username = config['username']
password = config['password']
read_https(username, password)
else:
raise Exception('invalid conn_type')
def read_http():
cm_status_page = requests.get('http://192.168.100.1/cmconnectionstatus.html').text
cm_info_page = requests.get('http://192.168.100.1/cmswinfo.html').text
parse_all(cm_status_page, cm_info_page)
def read_https(username, password):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
s = requests.Session()
r0 = s.get('https://192.168.100.1', verify=False)
message = username + ':' + password
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
cm_cred = base64_bytes.decode('ascii')
cm_head = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', 'Authorization': 'Basic ' + cm_cred}
r1 = s.get('https://192.168.100.1/cmconnectionstatus.html?login_' + cm_cred, headers=cm_head, verify=False)
cm_ct = r1.text
r2a = s.get('https://192.168.100.1/cmconnectionstatus.html?ct_' + cm_ct, verify=False)
r2b = s.get('https://192.168.100.1/cmswinfo.html?ct_' + cm_ct, verify=False)
try:
s.get('https://192.168.100.1/logout.html', verify=False)
except:
pass
parse_all(r2a.text, r2b.text)
def parse_all(cm_status_page, cm_info_page):
channels = parse_cm_status(cm_status_page)
information = parse_cm_info(cm_info_page)
update_rrd(channels, information)
def parse_cm_status(source):
soup = bs.BeautifulSoup(source, 'lxml')
tables = soup.find_all('table', attrs={'class':'simpleTable'})
ds_table = tables[1]
ds_channels = ds_table.find_all('tr', attrs={'align':'left'})
ds = []
for ds_channel in ds_channels:
cols = ds_channel.find_all('td')
channel_id = cols[0].text.strip()
lock_status = cols[1].text.strip()
modulation = cols[2].text.strip()
raw_frequency = cols[3].text.strip()
raw_power = cols[4].text.strip()
raw_snr = cols[5].text.strip()
corrected = cols[6].text.strip()
uncorrected = cols[7].text.strip()
# print('* downstream channel raw values *')
# print('channel id: ' + channel_id)
# print('lock status: ' + lock_status)
# print('modulation: ' + modulation)
# print('frequency: ' + raw_frequency)
# print('power: ' + raw_power)
# print('snr: ' + raw_snr)
# print('corrected: ' + corrected)
# print('uncorrected: ' + uncorrected)
frequency = raw_frequency.replace(' Hz', '')
power = raw_power.replace(' dBmV', '')
snr = raw_snr.replace(' dB', '')
# print('* downstream channel parsed values *')
# print('frequency: ' + frequency)
# print('power: ' + power)
# print('snr: ' + snr)
# print('corrected: ' + corrected)
# print('uncorrected: ' + uncorrected)
ds_channel_values = {
'frequency': frequency,
'power': power,
'snr': snr,
'corrected': corrected,
'uncorrected': uncorrected
}
ds.append(ds_channel_values)
us_table = tables[2]
us_channels = us_table.find_all('tr', attrs={'align':'left'})
us = []
for us_channel in us_channels:
cols = us_channel.find_all('td')
channel = cols[0].text.strip()
channel_id = cols[1].text.strip()
lock_status = cols[2].text.strip()
modulation = cols[3].text.strip()
raw_frequency = cols[4].text.strip()
raw_width = cols[5].text.strip()
raw_power = cols[6].text.strip()
# print('* upstream channel raw values *')
# print('channel: ' + channel)
# print('channel id: ' + channel_id)
# print('lock status: ' + lock_status)
# print('modulation: ' + modulation)
# print('frequency: ' + raw_frequency)
# print('width: ' + raw_width)
# print('power: ' + raw_power)
frequency = raw_frequency.replace(' Hz', '')
width = raw_width.replace(' Hz', '')
power = raw_power.replace(' dBmV', '')
# print('* upstream channel parsed values *')
# print('frequency: ' + frequency)
# print('width: ' + width)
# print('power: ' + power)
us_channel_values = {
'frequency': frequency,
'width': width,
'power': power
}
us.append(us_channel_values)
ret = {
'downstream': ds,
'upstream': us
}
return ret
def parse_cm_info(source):
soup = bs.BeautifulSoup(source, 'lxml')
# model number
header_elements = soup.find_all('span', attrs={'id':'thisModelNumberIs'})
header_element = header_elements[0]
model_number = header_element.text.strip()
# information table
tables = soup.find_all('table', attrs={'class':'simpleTable'})
info_table = tables[0]
info_elements = info_table.find_all('tr')
# hardware version
hw_ver_elements = info_elements[2]
hw_ver_cols = hw_ver_elements.find_all('td')
hw_ver = hw_ver_cols[1].text.strip()
# software version
sw_ver_elements = info_elements[3]
sw_ver_cols = sw_ver_elements.find_all('td')
sw_ver = sw_ver_cols[1].text.strip()
# hfc mac
hfc_mac_elements = info_elements[4]
hfc_mac_cols = hfc_mac_elements.find_all('td')
hfc_mac = hfc_mac_cols[1].text.strip()
# serial number
ser_num_elements = info_elements[5]
ser_num_cols = ser_num_elements.find_all('td')
ser_num = ser_num_cols[1].text.strip()
# status table
status_table = tables[1]
status_elements = status_table.find_all('tr')
# uptime
uptime_elements = status_elements[1]
uptime_cols = uptime_elements.find_all('td')
uptime = uptime_cols[1].text.strip()
# print('* product information raw values *')
# print('model number: ' + model_number)
# print('hardware version: ' + hw_ver)
# print('software version: ' + sw_ver)
# print('hfc mac: ' + hfc_mac)
# print('serial number: ' + ser_num)
# print('uptime: ' + uptime)
ret = {
'model_number': model_number,
'hw_ver': hw_ver,
'sw_ver': sw_ver,
'hfc_mac': hfc_mac,
'ser_num': ser_num,
'uptime': uptime
}
return ret
def get_frequency_value(elem):
return int(elem['frequency'])
def update_rrd(channels, information):
# sort channels by frequency
channels['downstream'] = sorted(channels['downstream'], key=get_frequency_value)
channels['upstream'] = sorted(channels['upstream'], key=get_frequency_value)
db_ext = '.rrd'
img_ext = '.png'
current_time = datetime.now(timezone.utc).isoformat()
# **** DOWNSTREAM ****
ds_path = db_path + 'downstream/'
graph_path = web_path
index_contents = str(
'<html><head><title>' +
'Cable Modem Statistics (' +
'Model: ' + information['model_number'] + ', ' +
'MAC: ' + information['hfc_mac'] + ', ' +
'Serial: ' + information['ser_num'] +
')</title></head><body>' +
'<h2>Cable Modem Statistics</h2>' +
'<h3>Last Update</h3>' +
'<p>' + current_time + '</p>' +
'<h3>Modem Information</h3>' +
'<table border="1">' +
'<tr>' +
'<th align="left">Model Number</th>' +
'<td>' + information['model_number'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Hardware Version</th>' +
'<td>' + information['hw_ver'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Software Version</th>' +
'<td>' + information['sw_ver'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">HFC MAC Address</th>' +
'<td>' + information['hfc_mac'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Serial Number</th>' +
'<td>' + information['ser_num'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Uptime</th>' +
'<td>' + information['uptime'] + '</td>' +
'</tr>' +
'</table>'
)
index_page_ds_summary_contents = str(
'<h3>Downstream Channels Summary</h3>' +
'<table border="1">' +
'<tr>' +
'<th>Frequency (Hz)</th>' +
'<th>Power (dBm)</th>' +
'<th>SNR (dB)</th>' +
'<th>Corrected (Symbols)</th>' +
'<th>Uncorrected (Symbols)</th>' +
'</tr>'
)
# power
ds_power_all_path = graph_path + 'downstream_all_power' + img_ext
ds_power_all_cmd = str(
'rrdtool graph ' + ds_power_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Power" ' +
'--vertical-label "dBm" --disable-rrdtool-tag '
)
# snr
ds_snr_all_path = graph_path + 'downstream_all_snr' + img_ext
ds_snr_all_cmd = str(
'rrdtool graph ' + ds_snr_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "SNR" ' +
'--vertical-label "dB" --disable-rrdtool-tag '
)
# corrected
ds_corrected_all_path = graph_path + 'downstream_all_corrected' + img_ext
ds_corrected_all_cmd = str(
'rrdtool graph ' + ds_corrected_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Corrected" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag '
)
# uncorrected
ds_uncorrected_all_path = graph_path + 'downstream_all_uncorrected' + img_ext
ds_uncorrected_all_cmd = str(
'rrdtool graph ' + ds_uncorrected_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Uncorrected" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag '
)
for ds_channel in channels['downstream']:
frequency = ds_channel['frequency']
power = ds_channel['power']
snr = ds_channel['snr']
corrected = ds_channel['corrected']
uncorrected = ds_channel['uncorrected']
ds_ch_path = ds_path + frequency + db_ext
if (not os.path.exists(ds_ch_path)):
os.system(
'rrdtool create ' + ds_ch_path + ' ' +
'--start N --step 300 ' +
'DS:power:GAUGE:600:U:U ' +
'DS:snr:GAUGE:600:U:U ' +
'DS:corrected:DERIVE:600:0:U ' +
'DS:uncorrected:DERIVE:600:0:U ' +
'RRA:AVERAGE:0.5:1:1440'
)
os.system(
'rrdtool update ' + ds_ch_path + ' ' +
'N:' + power + ':' + snr + ':' + corrected + ':' + uncorrected
)
# power
power_graph_path = graph_path + 'downstream_' + frequency + '_power' + img_ext
ds_power_ch_cmd = str(
'rrdtool graph ' + power_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "dBm" --disable-rrdtool-tag ' +
'DEF:power=' + ds_ch_path + ':power:AVERAGE ' +
'LINE1:power#ff0000:Power'
)
os.system(ds_power_ch_cmd)
ds_power_all_cmd = ds_power_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':power:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# snr
snr_graph_path = graph_path + 'downstream_' + frequency + '_snr' + img_ext
ds_snr_ch_cmd = str(
'rrdtool graph ' + snr_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "dB" --disable-rrdtool-tag ' +
'DEF:snr=' + ds_ch_path + ':snr:AVERAGE ' +
'LINE1:snr#ff0000:SNR'
)
os.system(ds_snr_ch_cmd)
ds_snr_all_cmd = ds_snr_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':snr:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# corrected
corrected_graph_path = graph_path + 'downstream_' + frequency + '_corrected' + img_ext
ds_corrected_ch_cmd = str(
'rrdtool graph ' + corrected_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag ' +
'DEF:corrected=' + ds_ch_path + ':corrected:AVERAGE ' +
'LINE1:corrected#ff0000:Corrected'
)
os.system(ds_corrected_ch_cmd)
ds_corrected_all_cmd = ds_corrected_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':corrected:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# uncorrected
uncorrected_graph_path = graph_path + | |
# This file was *autogenerated* from the file sidh_pohlig_hellman.sage
from sage.all_cmdline import * # import sage library
_sage_const_0 = Integer(0); _sage_const_2 = Integer(2); _sage_const_1 = Integer(1); _sage_const_6 = Integer(6); _sage_const_3 = Integer(3); _sage_const_5 = Integer(5); _sage_const_20 = Integer(20); _sage_const_21 = Integer(21); _sage_const_63 = Integer(63); _sage_const_36 = Integer(36); _sage_const_84 = Integer(84); _sage_const_4 = Integer(4); _sage_const_336 = Integer(336); _sage_const_366 = Integer(366); _sage_const_372 = Integer(372); _sage_const_12 = Integer(12); _sage_const_11 = Integer(11); _sage_const_15 = Integer(15); _sage_const_9 = Integer(9); _sage_const_45 = Integer(45); _sage_const_60 = Integer(60); _sage_const_56 = Integer(56); _sage_const_61 = Integer(61); _sage_const_183 = Integer(183); _sage_const_10 = Integer(10); _sage_const_239 = Integer(239)# Import Sage and other SIDH related modules
from sage.all import *
from sidh_field_arithmetic import *
from sidh_pairings import *
"""
Implements the Pohlig-Hellman algorithm to compute discrete logarithms in the
2- and 3-torsion subgroups, respectively. Different sub-routines reflect the
windowed Pohlig-Hellman approach.
"""
# Turn off arithmetic proof
proof.arithmetic(False)
# Two torsion
def phn1_2(q, LUT, a):
u = q
alpha_i = _sage_const_0
for l in (ellipsis_range(_sage_const_0 ,Ellipsis,a-_sage_const_2 )):
v = u
for h in (ellipsis_range(_sage_const_1 ,Ellipsis,a-_sage_const_1 -l)):
v = sqr_fp2_cycl(v)
if v != _sage_const_1 :
alpha_i += _sage_const_2 **l
tmp = LUT[_sage_const_6 -a+l]
u = u * tmp
if u != _sage_const_1 :
alpha_i += _sage_const_2 **(a-_sage_const_1 )
return alpha_i
def phn5_2(q, LUT, LUT_1):
u = q
alpha_k = _sage_const_0
for ii in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_3 )):
v = u
v = sqr_fp2_cycl(v)
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_5 *(_sage_const_3 -ii))):
v = sqr_fp2_cycl(v)
alpha_i = phn1_2(v, LUT, _sage_const_5 ) # u order 5
alpha_k += alpha_i * (_sage_const_2 **(_sage_const_5 *ii))
tmp = exp_fp2_cycl(LUT_1[ii], alpha_i)
u *= tmp
# Do the last part
if u != _sage_const_1 : # u order 2
alpha_k += _sage_const_2 **(_sage_const_20 )
return alpha_k
def phn21_2(q, LUT, LUT_0, LUT_1):
u = q
alpha_k = _sage_const_0
for ii in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_2 )):
v = u
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_21 *(_sage_const_3 -ii))):
v = sqr_fp2_cycl(v)
alpha_i = phn5_2(v, LUT, LUT_1) # u order 21
alpha_k += alpha_i * (_sage_const_2 **(_sage_const_21 *ii))
tmp = exp_fp2_cycl(LUT_0[ii], alpha_i)
u *= tmp
alpha_i = phn5_2(u, LUT, LUT_1) # u order 21
alpha_k += alpha_i * (_sage_const_2 **_sage_const_63 )
return alpha_k
def phn84_2(r, t_ori, LUT, LUT_0, LUT_1, LUT_3):
alpha = _sage_const_0
t = r
for k in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_3 )):
u = t
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_36 )):
u = sqr_fp2_cycl(u)
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_84 *(_sage_const_3 -k))):
u = sqr_fp2_cycl(u)
alpha_k = phn21_2(u, LUT, LUT_0, LUT_1) # q order 2^84
alpha += _sage_const_2 **(_sage_const_84 *k) * alpha_k
tmp = exp_fp2_cycl(t_ori[k], alpha_k)
t *= tmp
# Do the last part
for ii in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_4 )):
u = t
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_6 *(_sage_const_5 -ii))):
u = sqr_fp2_cycl(u)
alpha_i = phn1_2(u, LUT, _sage_const_6 ) # u order 2^6
alpha += alpha_i * (_sage_const_2 **(_sage_const_336 +_sage_const_6 *ii))
tmp = exp_fp2_cycl(LUT_3[ii], alpha_i)
t *= tmp
alpha_i = phn1_2(t, LUT, _sage_const_6 ) # u order 2^6
alpha += alpha_i * (_sage_const_2 **(_sage_const_366 ))
return alpha
def build_LUTs_2(g):
# Build (small) tables
tmp = g
tmp = inv_fp2_cycl(tmp)
t_ori = [tmp] # order 2^372
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_3 )):
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_84 )):
tmp = sqr_fp2_cycl(tmp)
t_ori.append(tmp) # order 2^288 & 2^204 & 2^120
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_36 )):
tmp = sqr_fp2_cycl(tmp)
t_ori.append(tmp) # order 2^84
LUT_0 = [tmp] # order 2^84
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_2 )):
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_21 )):
tmp = sqr_fp2_cycl(tmp)
LUT_0.append(tmp) # order 2^63 & 2^42
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_6 )):
tmp = sqr_fp2_cycl(tmp)
LUT_3 = [tmp] # order 2^36
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_6 )):
tmp = sqr_fp2_cycl(tmp)
LUT_3.append(tmp) # order 2^30
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_6 )):
tmp = sqr_fp2_cycl(tmp)
LUT_3.append(tmp) # order 2^24
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_3 )):
tmp = sqr_fp2_cycl(tmp)
LUT_0.append(tmp) # order 2^21
LUT_1 = [LUT_0[_sage_const_3 ]] # order 2^21
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_3 )):
tmp = sqr_fp2_cycl(tmp)
LUT_3.append(tmp) # order 2^18
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_2 )):
tmp = sqr_fp2_cycl(tmp)
LUT_1.append(tmp) # order 2^16
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_4 )):
tmp = sqr_fp2_cycl(tmp)
LUT_3.append(tmp) # order 2^12
tmp = sqr_fp2_cycl(tmp)
LUT_1.append(tmp) # order 2^11
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_5 )):
tmp = sqr_fp2_cycl(tmp)
LUT_1.append(tmp) # order 2^16 & 2^11 & 2^6
LUT_3.append(tmp)
LUT = [LUT_3[_sage_const_5 ]]
for ii in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_4 )):
LUT.append(sqr_fp2_cycl(LUT[ii-_sage_const_1 ])) # order 2^5 -- 2^1
return t_ori, LUT, LUT_0, LUT_1, LUT_3
def ph_2(phiP, phiQ, PS, QS, A):
eqp, r0, t0, r1, t1 = tate_pairings_2_torsion(QS, PS, phiP, phiQ, A)
# n = [84,36,21,0,5,1,0,0,6,0]
t_ori, LUT, LUT_0, LUT_1, LUT_3 = build_LUTs_2(eqp)
# Finish computation
a0 = phn84_2(r0, t_ori, LUT, LUT_0, LUT_1, LUT_3)
b0 = phn84_2(r1, t_ori, LUT, LUT_0, LUT_1, LUT_3)
b0 = _sage_const_2 **_sage_const_372 - b0
a1 = phn84_2(t0, t_ori, LUT, LUT_0, LUT_1, LUT_3)
b1 = phn84_2(t1, t_ori, LUT, LUT_0, LUT_1, LUT_3)
b1 = _sage_const_2 **_sage_const_372 - b1
return a0, b0, a1, b1
# Three torsion
def phn1_3(q, LUT, a):
u = q
alpha_i = _sage_const_0
for l in (ellipsis_range(_sage_const_0 ,Ellipsis,a-_sage_const_2 )):
v = u
for h in (ellipsis_range(_sage_const_1 ,Ellipsis,a-_sage_const_1 -l)):
v = cube_fp2_cycl(v)
if v == LUT[_sage_const_3 ]:
alpha_i += _sage_const_3 **l
tmp = LUT[_sage_const_3 -a+l]
u = u * tmp
else:
if not v == _sage_const_1 :
alpha_i += _sage_const_2 *_sage_const_3 **l
tmp = LUT[_sage_const_3 -a+l]**_sage_const_2
u = u * tmp
if u == LUT[_sage_const_3 ]:
alpha_i += _sage_const_3 **(a-_sage_const_1 )
else:
if not u == _sage_const_1 :
alpha_i += _sage_const_2 *_sage_const_3 **(a-_sage_const_1 )
return alpha_i
def phn3_3(q, LUT, LUT_1):
u = q
alpha = _sage_const_0
for i in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_3 )):
v = u
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_3 *(_sage_const_4 -i))):
v = cube_fp2_cycl(v)
alpha_i = phn1_3(v, LUT, _sage_const_3 ) # order 3^3
alpha += alpha_i * (_sage_const_3 **(_sage_const_3 *i))
tmp = exp_fp2_cycl(LUT_1[i], alpha_i)
u *= tmp
alpha_i = phn1_3(u, LUT, _sage_const_3 ) # q order 3^3
alpha += alpha_i * (_sage_const_3 **_sage_const_12 )
return alpha
def phn15_l_3(q, LUT, LUT_0, LUT_1):
u = q
alpha = _sage_const_0
for i in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_2 )):
v = u
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_11 )):
v = cube_fp2_cycl(v)
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_15 *(_sage_const_2 -i))):
v = cube_fp2_cycl(v)
alpha_i = phn3_3(v, LUT, LUT_1) # u order 3^15
alpha += alpha_i * (_sage_const_3 **(_sage_const_15 *i))
v = LUT_0[i]
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_5 )):
v = cube_fp2_cycl(v)
tmp = exp_fp2_cycl(v, alpha_i)
u *= tmp
# Do the last part
alpha_n = _sage_const_0
for i in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_2 )):
v = u
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_2 )):
v = cube_fp2_cycl(v)
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_3 *(_sage_const_2 -i))):
v = cube_fp2_cycl(v)
alpha_i = phn1_3(v, LUT, _sage_const_3 ) # u order 3^3
alpha_n += alpha_i * (_sage_const_3 **(_sage_const_3 *i))
v = LUT_1[i]
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_4 )):
v = cube_fp2_cycl(v)
tmp = exp_fp2_cycl(v, alpha_i)
u *= tmp
# And the final part
alpha_i = phn1_3(u, LUT, _sage_const_2 ) # q order 3^2
alpha_n += alpha_i * (_sage_const_3 **_sage_const_9 )
alpha += alpha_n * (_sage_const_3 **_sage_const_45 )
return alpha
def phn15_3(q, LUT, LUT_0, LUT_1):
u = q
alpha = _sage_const_0
for i in (ellipsis_range(_sage_const_0 ,Ellipsis,_sage_const_2 )):
v = u
v = cube_fp2_cycl(v)
for j in (ellipsis_range(_sage_const_1 ,Ellipsis,_sage_const_15 *(_sage_const_3 -i))):
v = cube_fp2_cycl(v)
alpha_i = phn3_3(v, LUT, LUT_1) # u order 3^15
alpha += alpha_i * (_sage_const_3 **(_sage_const_15 *i))
tmp = exp_fp2_cycl(LUT_0[i], alpha_i)
u *= tmp
v = u
v = cube_fp2_cycl(v)
alpha_i = phn3_3(v, LUT, LUT_1) # u order 3^15
alpha += alpha_i * (_sage_const_3 **(_sage_const_45 ))
tmp = exp_fp2_cycl(LUT_0[_sage_const_3 ], alpha_i)
u *= tmp
# Do the last part
if u == LUT[_sage_const_3 ]:
alpha += _sage_const_3 **(_sage_const_60 )
else:
if not u == _sage_const_1 :
alpha += _sage_const_2 | |
<gh_stars>100-1000
from veros.core.operators import numpy as npx
from veros import veros_routine, veros_kernel, KernelOutput
from veros.distributed import global_sum
from veros.variables import allocate
from veros.core import advection, diffusion, isoneutral, density, utilities
from veros.core.operators import update, update_add, at
@veros_kernel
def advect_tracer(state, tr):
"""
calculate time tendency of a tracer due to advection
"""
vs = state.variables
settings = state.settings
if settings.enable_superbee_advection:
flux_east, flux_north, flux_top = advection.adv_flux_superbee(state, tr)
else:
flux_east, flux_north, flux_top = advection.adv_flux_2nd(state, tr)
dtr = allocate(state.dimensions, ("xt", "yt", "zt"))
dtr = update(
dtr,
at[2:-2, 2:-2, :],
vs.maskT[2:-2, 2:-2, :]
* (
-(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])
/ (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])
- (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])
/ (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])
),
)
dtr = update_add(dtr, at[:, :, 0], -1 * vs.maskT[:, :, 0] * flux_top[:, :, 0] / vs.dzt[0])
dtr = update_add(
dtr, at[:, :, 1:], -1 * vs.maskT[:, :, 1:] * (flux_top[:, :, 1:] - flux_top[:, :, :-1]) / vs.dzt[1:]
)
return dtr
@veros_kernel
def advect_temperature(state):
"""
integrate temperature
"""
vs = state.variables
dtr = advect_tracer(state, vs.temp[..., vs.tau])
vs.dtemp = update(vs.dtemp, at[..., vs.tau], dtr)
return KernelOutput(dtemp=vs.dtemp)
@veros_kernel
def advect_salinity(state):
"""
integrate salinity
"""
vs = state.variables
dtr = advect_tracer(state, vs.salt[..., vs.tau])
vs.dsalt = update(vs.dsalt, at[..., vs.tau], dtr)
return KernelOutput(dsalt=vs.dsalt)
@veros_kernel
def calc_eq_of_state(state, n):
"""
calculate density, stability frequency, dynamic enthalpy and derivatives
for time level n from temperature and salinity
"""
vs = state.variables
settings = state.settings
salt = vs.salt[..., n]
temp = vs.temp[..., n]
press = npx.abs(vs.zt)
"""
calculate new density
"""
vs.rho = update(vs.rho, at[..., n], density.get_rho(state, salt, temp, press) * vs.maskT)
"""
calculate new potential density
"""
vs.prho = update(vs.prho, at[...], density.get_potential_rho(state, salt, temp) * vs.maskT)
"""
calculate new dynamic enthalpy and derivatives
"""
if settings.enable_conserve_energy:
vs.Hd = update(vs.Hd, at[..., n], density.get_dyn_enthalpy(state, salt, temp, press) * vs.maskT)
vs.int_drhodT = update(vs.int_drhodT, at[..., n], density.get_int_drhodT(state, salt, temp, press))
vs.int_drhodS = update(vs.int_drhodS, at[..., n], density.get_int_drhodS(state, salt, temp, press))
"""
new stability frequency
"""
fxa = -settings.grav / settings.rho_0 / vs.dzw[npx.newaxis, npx.newaxis, :-1] * vs.maskW[:, :, :-1]
vs.Nsqr = update(
vs.Nsqr,
at[:, :, :-1, n],
fxa * (density.get_rho(state, salt[:, :, 1:], temp[:, :, 1:], press[:-1]) - vs.rho[:, :, :-1, n]),
)
vs.Nsqr = update(vs.Nsqr, at[:, :, -1, n], vs.Nsqr[:, :, -2, n])
return KernelOutput(
rho=vs.rho, prho=vs.prho, Hd=vs.Hd, int_drhodT=vs.int_drhodT, int_drhodS=vs.int_drhodS, Nsqr=vs.Nsqr
)
@veros_kernel
def advect_temp_salt_enthalpy(state):
"""
integrate temperature and salinity and diagnose sources of dynamic enthalpy
"""
vs = state.variables
settings = state.settings
vs.dtemp = advect_temperature(state).dtemp
vs.dsalt = advect_salinity(state).dsalt
if settings.enable_conserve_energy:
"""
advection of dynamic enthalpy
"""
if settings.enable_superbee_advection:
flux_east, flux_north, flux_top = advection.adv_flux_superbee(state, vs.Hd[:, :, :, vs.tau])
else:
flux_east, flux_north, flux_top = advection.adv_flux_2nd(state, vs.Hd[:, :, :, vs.tau])
vs.dHd = update(
vs.dHd,
at[2:-2, 2:-2, :, vs.tau],
vs.maskT[2:-2, 2:-2, :]
* (
-(flux_east[2:-2, 2:-2, :] - flux_east[1:-3, 2:-2, :])
/ (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dxt[2:-2, npx.newaxis, npx.newaxis])
- (flux_north[2:-2, 2:-2, :] - flux_north[2:-2, 1:-3, :])
/ (vs.cost[npx.newaxis, 2:-2, npx.newaxis] * vs.dyt[npx.newaxis, 2:-2, npx.newaxis])
),
)
vs.dHd = update_add(vs.dHd, at[:, :, 0, vs.tau], -1 * vs.maskT[:, :, 0] * flux_top[:, :, 0] / vs.dzt[0])
vs.dHd = update_add(
vs.dHd,
at[:, :, 1:, vs.tau],
-1 * vs.maskT[:, :, 1:] * (flux_top[:, :, 1:] - flux_top[:, :, :-1]) / vs.dzt[npx.newaxis, npx.newaxis, 1:],
)
"""
changes in dyn. Enthalpy due to advection
"""
diss = allocate(state.dimensions, ("xt", "yt", "zt"))
diss = update(
diss,
at[2:-2, 2:-2, :],
settings.grav
/ settings.rho_0
* (
-vs.int_drhodT[2:-2, 2:-2, :, vs.tau] * vs.dtemp[2:-2, 2:-2, :, vs.tau]
- vs.int_drhodS[2:-2, 2:-2, :, vs.tau] * vs.dsalt[2:-2, 2:-2, :, vs.tau]
)
- vs.dHd[2:-2, 2:-2, :, vs.tau],
)
"""
contribution by vertical advection is - g rho w / rho0, substract this also
"""
diss = update_add(
diss,
at[:, :, :-1],
-0.25
* settings.grav
/ settings.rho_0
* vs.w[:, :, :-1, vs.tau]
* (vs.rho[:, :, :-1, vs.tau] + vs.rho[:, :, 1:, vs.tau])
* vs.dzw[npx.newaxis, npx.newaxis, :-1]
/ vs.dzt[npx.newaxis, npx.newaxis, :-1],
)
diss = update_add(
diss,
at[:, :, 1:],
-0.25
* settings.grav
/ settings.rho_0
* vs.w[:, :, :-1, vs.tau]
* (vs.rho[:, :, 1:, vs.tau] + vs.rho[:, :, :-1, vs.tau])
* vs.dzw[npx.newaxis, npx.newaxis, :-1]
/ vs.dzt[npx.newaxis, npx.newaxis, 1:],
)
if settings.enable_conserve_energy and settings.enable_tke:
"""
dissipation by advection interpolated on W-grid
"""
vs.P_diss_adv = diffusion.dissipation_on_wgrid(state, diss, vs.kbot)
"""
distribute P_diss_adv over domain, prevent draining of TKE
"""
fxa = npx.sum(
vs.area_t[2:-2, 2:-2, npx.newaxis]
* vs.P_diss_adv[2:-2, 2:-2, :-1]
* vs.dzw[npx.newaxis, npx.newaxis, :-1]
* vs.maskW[2:-2, 2:-2, :-1]
) + npx.sum(0.5 * vs.area_t[2:-2, 2:-2] * vs.P_diss_adv[2:-2, 2:-2, -1] * vs.dzw[-1] * vs.maskW[2:-2, 2:-2, -1])
tke_mask = vs.tke[2:-2, 2:-2, :-1, vs.tau] > 0.0
fxb = npx.sum(
vs.area_t[2:-2, 2:-2, npx.newaxis]
* vs.dzw[npx.newaxis, npx.newaxis, :-1]
* vs.maskW[2:-2, 2:-2, :-1]
* tke_mask
) + npx.sum(0.5 * vs.area_t[2:-2, 2:-2] * vs.dzw[-1] * vs.maskW[2:-2, 2:-2, -1])
fxa = global_sum(fxa)
fxb = global_sum(fxb)
vs.P_diss_adv = update(vs.P_diss_adv, at[2:-2, 2:-2, :-1], fxa / fxb * tke_mask)
vs.P_diss_adv = update(vs.P_diss_adv, at[2:-2, 2:-2, -1], fxa / fxb)
"""
<NAME> time stepping for advection
"""
vs.temp = update(
vs.temp,
at[:, :, :, vs.taup1],
vs.temp[:, :, :, vs.tau]
+ settings.dt_tracer
* ((1.5 + settings.AB_eps) * vs.dtemp[:, :, :, vs.tau] - (0.5 + settings.AB_eps) * vs.dtemp[:, :, :, vs.taum1])
* vs.maskT,
)
vs.salt = update(
vs.salt,
at[:, :, :, vs.taup1],
vs.salt[:, :, :, vs.tau]
+ settings.dt_tracer
* ((1.5 + settings.AB_eps) * vs.dsalt[:, :, :, vs.tau] - (0.5 + settings.AB_eps) * vs.dsalt[:, :, :, vs.taum1])
* vs.maskT,
)
return KernelOutput(
temp=vs.temp, salt=vs.salt, dtemp=vs.dtemp, dsalt=vs.dsalt, dHd=vs.dHd, P_diss_adv=vs.P_diss_adv
)
@veros_kernel
def vertmix_tempsalt(state):
"""
vertical mixing of temperature and salinity
"""
vs = state.variables
settings = state.settings
vs.dtemp_vmix = update(vs.dtemp_vmix, at[...], vs.temp[:, :, :, vs.taup1])
vs.dsalt_vmix = update(vs.dsalt_vmix, at[...], vs.salt[:, :, :, vs.taup1])
a_tri = allocate(state.dimensions, ("xt", "yt", "zt"))[2:-2, 2:-2]
b_tri = allocate(state.dimensions, ("xt", "yt", "zt"))[2:-2, 2:-2]
c_tri = allocate(state.dimensions, ("xt", "yt", "zt"))[2:-2, 2:-2]
d_tri = allocate(state.dimensions, ("xt", "yt", "zt"))[2:-2, 2:-2]
delta = allocate(state.dimensions, ("xt", "yt", "zt"))[2:-2, 2:-2]
_, water_mask, edge_mask = utilities.create_water_masks(vs.kbot[2:-2, 2:-2], settings.nz)
delta = update(
delta, at[:, :, :-1], settings.dt_tracer / vs.dzw[npx.newaxis, npx.newaxis, :-1] * vs.kappaH[2:-2, 2:-2, :-1]
)
delta = update(delta, at[:, :, -1], 0.0)
a_tri = update(a_tri, at[:, :, 1:], -delta[:, :, :-1] / vs.dzt[npx.newaxis, npx.newaxis, 1:])
b_tri = update(b_tri, at[:, :, 1:], 1 + (delta[:, :, 1:] + delta[:, :, :-1]) / vs.dzt[npx.newaxis, npx.newaxis, 1:])
b_tri_edge = 1 + delta / vs.dzt[npx.newaxis, npx.newaxis, :]
c_tri = update(c_tri, at[:, :, :-1], -delta[:, :, :-1] / vs.dzt[npx.newaxis, npx.newaxis, :-1])
d_tri = vs.temp[2:-2, 2:-2, :, vs.taup1]
d_tri = update_add(d_tri, at[:, :, -1], settings.dt_tracer * vs.forc_temp_surface[2:-2, 2:-2] / vs.dzt[-1])
sol = utilities.solve_implicit(a_tri, b_tri, c_tri, d_tri, water_mask, b_edge=b_tri_edge, edge_mask=edge_mask)
vs.temp = update(vs.temp, at[2:-2, 2:-2, :, vs.taup1], npx.where(water_mask, sol, vs.temp[2:-2, 2:-2, :, vs.taup1]))
d_tri = vs.salt[2:-2, 2:-2, :, vs.taup1]
d_tri = update_add(d_tri, at[:, :, -1], settings.dt_tracer * vs.forc_salt_surface[2:-2, 2:-2] / vs.dzt[-1])
sol = utilities.solve_implicit(a_tri, b_tri, c_tri, d_tri, water_mask, b_edge=b_tri_edge, edge_mask=edge_mask)
vs.salt = update(vs.salt, at[2:-2, 2:-2, :, vs.taup1], npx.where(water_mask, sol, vs.salt[2:-2, 2:-2, :, vs.taup1]))
vs.dtemp_vmix = (vs.temp[:, :, :, vs.taup1] - vs.dtemp_vmix) / settings.dt_tracer
vs.dsalt_vmix = (vs.salt[:, :, :, vs.taup1] - vs.dsalt_vmix) / settings.dt_tracer
"""
boundary exchange
"""
vs.temp = update(
vs.temp, at[..., vs.taup1], utilities.enforce_boundaries(vs.temp[..., vs.taup1], settings.enable_cyclic_x)
)
vs.salt = update(
vs.salt, at[..., vs.taup1], utilities.enforce_boundaries(vs.salt[..., vs.taup1], settings.enable_cyclic_x)
)
return KernelOutput(dtemp_vmix=vs.dtemp_vmix, temp=vs.temp, dsalt_vmix=vs.dsalt_vmix, salt=vs.salt)
@veros_kernel
def surf_densityf(state):
"""
surface density flux
"""
vs = state.variables
vs.forc_rho_surface = vs.maskT[:, :, -1] * (
density.get_drhodT(state, vs.salt[:, :, -1, vs.taup1], vs.temp[:, :, -1, vs.taup1], npx.abs(vs.zt[-1]))
* vs.forc_temp_surface
+ density.get_drhodS(state, vs.salt[:, :, -1, vs.taup1], vs.temp[:, :, -1, vs.taup1], npx.abs(vs.zt[-1]))
* vs.forc_salt_surface
)
return KernelOutput(forc_rho_surface=vs.forc_rho_surface)
@veros_kernel
def diag_P_diss_v(state):
vs = state.variables
settings = state.settings
vs.P_diss_v = update(vs.P_diss_v, at[...], 0.0)
aloc = allocate(state.dimensions, ("xt", "yt", "zt"))
if settings.enable_conserve_energy:
"""
diagnose dissipation of dynamic enthalpy by vertical mixing
"""
fxa = (-vs.int_drhodT[2:-2, 2:-2, 1:, vs.taup1] + vs.int_drhodT[2:-2, 2:-2, :-1, vs.taup1]) / vs.dzw[
npx.newaxis, npx.newaxis, :-1
]
vs.P_diss_v = update_add(
vs.P_diss_v,
at[2:-2, 2:-2, :-1],
-settings.grav
/ settings.rho_0
* fxa
* vs.kappaH[2:-2, 2:-2, :-1]
* (vs.temp[2:-2, 2:-2, 1:, vs.taup1] - vs.temp[2:-2, 2:-2, :-1, vs.taup1])
/ vs.dzw[npx.newaxis, npx.newaxis, :-1]
* vs.maskW[2:-2, 2:-2, :-1],
)
fxa = (-vs.int_drhodS[2:-2, 2:-2, 1:, vs.taup1] + vs.int_drhodS[2:-2, 2:-2, :-1, vs.taup1]) / vs.dzw[
| |
<reponame>Eastsouthern/datascope<filename>experiments/scenarios/base.py
import collections.abc
import datetime
import logging
import logging.handlers
import numpy as np
import os
import pandas as pd
import random
import re
import string
import sys
import threading
import time
import traceback
import warnings
import yaml
from abc import ABC, abstractmethod
from enum import Enum
from glob import glob
from inspect import signature
from io import TextIOBase, StringIO, SEEK_END
from itertools import product
from logging import Logger
from matplotlib.figure import Figure
from numpy import ndarray
from pandas import DataFrame
from ray.util.multiprocessing import Pool
from ray.util.queue import Queue
from shutil import copyfileobj
from tqdm.auto import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
get_origin,
get_args,
overload,
Union,
)
class PropertyTag(property):
domain: Optional[Iterable] = None
@classmethod
def get_properties(cls: Type[property], target: object) -> Dict[str, property]:
res: Dict[str, property] = {}
for name in dir(target):
if hasattr(target, name):
member = getattr(target, name)
if isinstance(member, cls):
res[name] = member
return res
class attribute(PropertyTag):
def __init__(
self,
fget: Optional[Callable[[Any], Any]] = None,
fset: Optional[Callable[[Any, Any], None]] = None,
fdel: Optional[Callable[[Any], None]] = None,
doc: Optional[str] = None,
domain: Optional[Iterable] = None,
) -> None:
super().__init__(fget, fset, fdel, doc)
self.domain = domain
def __call__(
self,
fget: Optional[Callable[[Any], Any]] = None,
fset: Optional[Callable[[Any, Any], None]] = None,
fdel: Optional[Callable[[Any], None]] = None,
doc: Optional[str] = None,
) -> "attribute":
if fget is None:
fget = self.fget
if fset is None:
fset = self.fset
if fdel is None:
fdel = self.fdel
if doc is None:
doc = self.__doc__
return type(self)(fget, fset, fdel, doc, self.domain)
__isattribute__ = True
class result(PropertyTag):
__isresult__ = True
def extract_simpletype(target: object) -> type:
pass
def extract_enumtype(target: object) -> Optional[Type[Enum]]:
if isinstance(target, type) and issubclass(target, Enum):
return target
else:
origin = get_origin(target)
if origin is not None:
for arg in get_args(target):
argtype = extract_enumtype(arg)
if isinstance(argtype, type) and issubclass(argtype, Enum):
return argtype
return None
def get_property_and_getter(target: object, name: str) -> Tuple[property, Callable]:
member = getattr(target, name)
if not isinstance(member, property):
raise ValueError("The specified member '%s' is not a property." % name)
if member.fget is None:
raise ValueError("The specified member '%s' does not have a getter." % name)
return member, member.fget
def get_property_type(target: object, name: str) -> Optional[Type]:
_, getter = get_property_and_getter(target, name)
sign = signature(getter)
a = sign.return_annotation
if get_origin(a) in [collections.abc.Sequence, collections.abc.Iterable, list, set] and len(get_args(a)) > 0:
a = get_args(a)[0]
if get_origin(a) == Union:
a = next(x for x in get_args(a) if x is not None)
return a if isinstance(a, type) else None
def get_property_domain(target: object, name: str) -> List[Any]:
prop, getter = get_property_and_getter(target, name)
sign = signature(getter)
enum = extract_enumtype(sign.return_annotation)
if isinstance(prop, PropertyTag) and prop.domain is not None:
return list(prop.domain)
elif sign.return_annotation is bool:
return [False, True]
elif enum is None:
return [None]
else:
return list(enum.__members__.values())
def get_property_helpstring(target: object, name: str) -> Optional[str]:
prop, getter = get_property_and_getter(target, name)
return getter.__doc__
def get_property_default(target: object, name: str) -> Optional[Any]:
member = getattr(target, "__init__")
sign = signature(member)
param = sign.parameters.get(name, None)
if param is not None and param.default is not param.empty:
return param.default
else:
return None
def get_property_value(target: object, name: str) -> Any:
member = getattr(target, name)
if isinstance(target, type):
if isinstance(member, property) and member.fget is not None:
return member.fget(target)
else:
return None
else:
return member
def get_property_isiterable(target: object, name: str) -> bool:
_, getter = get_property_and_getter(target, name)
sign = signature(getter)
a = sign.return_annotation
return get_origin(a) in [collections.abc.Sequence, collections.abc.Iterable, list, set]
def has_attribute_value(target: object, name: str, value: Any, ignore_none: bool = True) -> bool:
target_value = get_property_value(target, name)
if not isinstance(value, Iterable):
value = [value]
if ignore_none:
return target_value is None or value == [None] or target_value in value
else:
return target_value in value
def save_dict(source: Dict[str, Any], dirpath: str, basename: str) -> None:
basedict: Dict[str, Any] = dict((k, v) for (k, v) in source.items() if type(v) in [int, float, bool, str])
basedict.update(dict((k, v.value) for (k, v) in source.items() if isinstance(v, Enum)))
if len(basedict) > 0:
with open(os.path.join(dirpath, ".".join([basename, "yaml"])), "w") as f:
yaml.safe_dump(basedict, f)
for name, data in source.items():
if data is None:
continue
if name not in basedict:
if isinstance(data, ndarray):
filename = os.path.join(dirpath, ".".join([basename, name, "npy"]))
np.save(filename, data)
elif isinstance(data, DataFrame):
filename = os.path.join(dirpath, ".".join([basename, name, "csv"]))
data.to_csv(filename)
elif isinstance(data, dict):
filename = os.path.join(dirpath, ".".join([basename, name, "yaml"]))
with open(filename, "w") as f:
yaml.safe_dump(data, f)
elif isinstance(data, Figure):
extra_artists = tuple(data.legends) + tuple(data.texts)
filename = os.path.join(dirpath, ".".join([basename, name, "pdf"]))
data.savefig(fname=filename, bbox_extra_artists=extra_artists, bbox_inches="tight")
filename = os.path.join(dirpath, ".".join([basename, name, "png"]))
data.savefig(fname=filename, bbox_extra_artists=extra_artists, bbox_inches="tight")
else:
raise ValueError("Key '%s' has unsupported type '%s'." % (name, str(type(data))))
def load_dict(dirpath: str, basename: str) -> Dict[str, Any]:
if not os.path.isdir(dirpath):
raise ValueError("The provided path '%s' does not point to a directory." % dirpath)
res: Dict[str, Any] = {}
filename = os.path.join(dirpath, ".".join([basename, "yaml"]))
if os.path.isfile(filename):
with open(filename, "r") as f:
res.update(yaml.safe_load(f))
for path in glob(os.path.join(dirpath, basename) + "*"):
filename = os.path.basename(path)
base, ext = os.path.splitext(filename)
name = base[len(basename) + 1 :] # noqa: E203
if name == "":
continue
if ext == ".npy":
res[name] = np.load(path)
elif ext == ".csv":
res[name] = pd.read_csv(path)
elif ext == ".yaml":
with open(path) as f:
res[name] = yaml.safe_load(f)
else:
warnings.warn("File '%s' with unsupported extension '%s' will be ignored." % (path, ext))
return res
class Progress:
class Event:
class Type(Enum):
START = "start"
UPDATE = "update"
CLOSE = "close"
def __init__(self, type: "Progress.Event.Type", id: str, **kwargs: Any) -> None:
self.type = type
self.id = id
self.kwargs = kwargs
pbars: Dict[str, tqdm] = {}
def __init__(self, queue: Optional[Queue] = None, id: Optional[str] = None) -> None:
self._queue = queue
self._id = id if id is not None else "".join(random.choices(string.ascii_lowercase + string.digits, k=10))
def start(self, total: Optional[int] = None, desc: Optional[str] = None) -> None:
self._submit(Progress.Event(Progress.Event.Type.START, self._id, total=total, desc=desc))
def update(self, n: int = 1) -> None:
self._submit(Progress.Event(Progress.Event.Type.UPDATE, self._id, n=n))
def close(self) -> None:
self._submit(Progress.Event(Progress.Event.Type.CLOSE, self._id))
def _submit(self, event: "Progress.Event") -> None:
if self._queue is None:
self.handle(event)
else:
self._queue.put(event)
def new(self, id: Optional[str] = None) -> "Progress":
return Progress(self._queue, id)
@property
def queue(self) -> Optional[Queue]:
return self._queue
@queue.setter
def queue(self, value: Optional[Queue]) -> None:
self._queue = value
@classmethod
def refresh(cls: Type["Progress"]) -> None:
for pbar in cls.pbars.values():
pbar.refresh()
@classmethod
def handle(cls: Type["Progress"], event: "Progress.Event") -> None:
if event.type == Progress.Event.Type.START:
cls.pbars[event.id] = tqdm(desc=event.kwargs["desc"], total=event.kwargs["total"])
elif event.type == Progress.Event.Type.UPDATE:
cls.pbars[event.id].update(event.kwargs["n"])
elif event.type == Progress.Event.Type.CLOSE:
cls.pbars[event.id].close()
del cls.pbars[event.id]
# Ensure that bars get redrawn properly after they reshuffle due to closure of one of them.
cls.refresh()
class Scenario(ABC):
scenarios: Dict[str, Type["Scenario"]] = {}
scenario_domains: Dict[str, Dict[str, Set[Any]]] = {}
attribute_domains: Dict[str, Set[Any]] = {}
attribute_helpstrings: Dict[str, Optional[str]] = {}
attribute_types: Dict[str, Optional[type]] = {}
attribute_defaults: Dict[str, Optional[Any]] = {}
attribute_isiterable: Dict[str, bool] = {}
_scenario: Optional[str] = None
def __init__(self, id: Optional[str] = None, logstream: Optional[TextIOBase] = None, **kwargs: Any) -> None:
super().__init__()
self._id = id if id is not None else "".join(random.choices(string.ascii_lowercase + string.digits, k=10))
self._logstream = logstream if logstream is not None else StringIO()
self._progress = Progress(id=self._id)
self._attributes: Optional[Dict[str, Any]] = None
def __init_subclass__(cls: Type["Scenario"], id: Optional[str] = None) -> None:
if id is None:
return
# Register scenario under the given name.
cls._scenario = id
Scenario.scenarios[id] = cls
assert isinstance(Scenario.scenario, PropertyTag)
assert isinstance(Scenario.scenario.domain, set)
Scenario.scenario.domain.add(id)
# Extract domain of scenario.
props = attribute.get_properties(cls)
domain = dict((name, set(get_property_domain(cls, name))) for name in props.keys())
Scenario.scenario_domains[id] = domain
# Include the new domain into the total domain.
for name, values in domain.items():
Scenario.attribute_domains.setdefault(name, set()).update(values)
# Extract types of the scenario attributes.
types = dict((name, get_property_type(cls, name)) for name in props.keys())
Scenario.attribute_types.update(types)
# Extract helpstrings of the scenario attributes.
helpstrings = dict((name, get_property_helpstring(cls, name)) for name in props.keys())
Scenario.attribute_helpstrings.update(helpstrings)
# Extract types of the scenario attributes.
defaults = dict((name, get_property_default(cls, name)) for name in props.keys())
Scenario.attribute_defaults.update(defaults)
# Set all attributes to be iterable when passed to get_instances.
isiterable = dict((k, True) for k in props.keys())
Scenario.attribute_isiterable.update(isiterable)
def run(self, progress_bar: bool = True, console_log: bool = True) -> None:
# Set up logging.
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
fh = logging.StreamHandler(self._logstream)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
ch: Optional[logging.Handler] = None
if console_log:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.info("Run started for scenario: %s" % str(self))
timestart = time.time()
self._run(progress_bar=progress_bar)
duration = datetime.timedelta(seconds=int(time.time() - | |
<gh_stars>10-100
#!/usr/bin/env python
'''
Script to change the grid to have different resolutions at different depths.
'''
from constants import *
import numpy as np
def regrid(self):
'''
Called in both firn_density_spin and firn_density_nospin
There are 3 subgrids in the regrid module. Grid 1 is the high resolution grid near the surface. Grid 2 is the lower resolution grid at greater depths; a user-defined number of nodes (self.c['nodestocombine']; refer as NTC here) are combined occasionally (every NTC time steps) to make one new node within grid 2. Grid 3 is at the bottom and has split up one grid 2 node back into a high-resolution grid (1 node into NTC nodes), which can be removed at each time step to keep the model Lagrangian.
the variable gridtrack keeps track of which subgrid each node is in.
'''
ind10 = np.where(self.gridtrack==1)[0] # all of the nodes in subgrid 1.
ind1 = np.where(self.gridtrack==1)[0][-1*self.c['nodestocombine']:] # the last NTC nodes of subgrid 1; will be combined into 1 node within subgrid 2.
ind1a = ind1[0]
ind1b = ind1[-1]
ind0 = ind1[0] - 1 # new last node of grid 1
### create the properties of the new subgrid 2 node
g2dz = np.array([np.sum(self.dz[ind1])])
g2mass = np.sum(self.mass[ind1])
g2rho = g2mass/g2dz
g2Tz0 = np.sum(self.Tz[ind1]*self.mass[ind1])
g2Tz = np.array([g2Tz0 / g2mass]) # Use a weighted average for temperature (effectively the enthalpy)
g2gt = 2 #gridtrack
g2age = np.mean(self.age[ind1])
# g2bm = np.mean(self.bdot_mean[ind1])
g2bm0 = np.sum(self.bdot_mean[ind1]*self.mass[ind1])
g2bm = np.array([g2bm0 / g2mass])
g2lwc = np.sum(self.LWC[ind1])
### split up the last node in grid 2 into NTC nodes. Each node retains the density, age, etc of the old subgrid 2 node.
g3dz = self.dz[-1]/self.nodestocombine * np.ones(self.nodestocombine)
g3rho = self.rho[-1] * np.ones(self.nodestocombine)
g3mass = g3rho * g3dz
g3gt = 3 * np.ones(self.nodestocombine)
g3Tz = self.Tz[-1]* np.ones(self.nodestocombine)
g3age = self.age[-1]*np.ones(self.nodestocombine)
g3bm = self.bdot_mean[-1]*np.ones(self.nodestocombine)
g3lwc = self.LWC[-1]/self.nodestocombine * np.ones(self.nodestocombine)
### combine the new and old nodes into the full grid.
self.dz = np.concatenate((self.dz[0:ind1a],g2dz,self.dz[ind1b+1:-1],g3dz))
self.z = self.dz.cumsum(axis=0)
self.z = np.concatenate(([0], self.z[:-1]))
self.rho = np.concatenate((self.rho[0:ind1a],g2rho,self.rho[ind1b+1:-1],g3rho))
self.Tz = np.concatenate((self.Tz[0:ind1a],g2Tz,self.Tz[ind1b+1:-1],g3Tz))
self.mass = np.concatenate((self.mass[0:ind1a],[g2mass],self.mass[ind1b+1:-1],g3mass))
self.sigma = self.mass * self.dx * GRAVITY
self.sigma = self.sigma.cumsum(axis = 0)
self.mass_sum = self.mass.cumsum(axis = 0)
self.age = np.concatenate((self.age[0:ind1a],[g2age],self.age[ind1b+1:-1],g3age))
self.bdot_mean = np.concatenate((self.bdot_mean[0:ind1a],g2bm,self.bdot_mean[ind1b+1:-1],g3bm))
self.LWC = np.concatenate((self.LWC[0:ind1a],[g2lwc],self.LWC[ind1b+1:-1],g3lwc))
self.gridtrack = np.concatenate((self.gridtrack[0:ind1a],[g2gt],self.gridtrack[ind1b+1:-1],g3gt))
if self.c['physGrain']:
#g2r2 = np.array([np.mean(self.r2)])
g2r2 = np.mean(self.r2[ind1]) # VV added
g3r2 = self.r2[-1]* np.ones(self.nodestocombine)
self.r2 = np.concatenate((self.r2[0:ind1a],[g2r2],self.r2[ind1b+1:-1],g3r2))
return self.dz, self.z, self.rho, self.Tz, self.mass, self.sigma, self.mass_sum, self.age, self.bdot_mean, self.LWC, self.gridtrack, self.r2
def init_regrid(self):
'''
Used in firn_density_spin for the initial regridding.
'''
grid1b = self.c['grid1bottom']
self.nodestocombine = self.c['nodestocombine']
ind1 = np.where(self.z<grid1b)[0]
ind2 = np.where(self.z>=grid1b)[0]
grid1z = self.z[ind1]
grid2z = self.z[ind2[0]::self.nodestocombine]
self.z = np.concatenate((grid1z,grid2z))
grid3z = self.z[-1] + np.cumsum(self.dz[-1*self.nodestocombine:])
self.z = np.concatenate((self.z,grid3z))
self.dz = np.diff(self.z)
self.dz = np.append(self.dz, self.dz[-1])
self.gridLen = len(self.z)
self.dx = np.ones(self.gridLen)
self.gridtrack = 2 * np.ones(self.gridLen)
self.gridtrack[ind1] = 1
self.gridtrack[-1*self.nodestocombine:] = 3
print('After regrid, grid length is', self.gridLen)
return self.nodestocombine, self.z, self.dz, self.gridLen, self.dx, self.gridtrack
############### VV changes 09/12/2020 ###############
def regrid22(self):
'''
Called in both firn_density_spin and firn_density_nospin
5 grids:
grid1 -> high resolution determined by accumulation events
grid2 -> low resolution by merging the batch of lowest nodestocombine layers of grid 1
grid22 -> very low resolution by merging the batch of lowest multnodestocombine layers of grid 2
grid23 -> low resolution by splitting the lowest layer of grid22 in multnodestocombine thinner layers
New layer of grid23 is formed only when their stock is empty
grid3 -> high resolution by splitting the lowest layer of grid23 in nodestocombine layers
gridtrack keeps track of which grid each layer is in
'''
n1 = self.c['nodestocombine'] # nodes to combine from grid1 to grid2 and to split from grid23 to grid3
n2 = self.c['multnodestocombine'] # nodes to combine from grid2 to grid22 and to split from grid22 to grid23
# if self.c['multnodestocombine'] is set to 0 -> process of grid22 is turned off and regrid works as old regrid
inds1 = np.where(self.gridtrack==1)[0] # layers in grid1
inds2 = np.where(self.gridtrack==2)[0] # layers in grid2
i1_2 = inds1[-1*n1:] # layers to transition from grid1 to grid2
ind2a = i1_2[0] # index of the future upper layer of grid2
ind2b = inds2[0] # index of old upper layer of grid2
# Next grids
inds22 = np.where(self.gridtrack==22)[0] # all nodes in grid22
inds23 = np.where(self.gridtrack==23)[0] # all nodes in grid23
### properties of the new subgrid 2 node
g2dz = np.array([np.sum(self.dz[i1_2])]) # sum thickness
g2mass = np.sum(self.mass[i1_2]) # sum mass
g2rho = g2mass/g2dz
g2Tz0 = np.sum(self.Tz[i1_2]*self.mass[i1_2])
g2Tz = np.array([g2Tz0 / g2mass]) # Use a weighted average for temperature (effectively the enthalpy)
g2gt = 2 #gridtrack
g2age = np.mean(self.age[i1_2]) # mean age
#g2age = np.sum(self.age[i1_2]*self.mass[i1_2])/g2mass #VV test weighted average for age -> change is imperceptible
g2bdm = np.mean(self.bdot_mean[i1_2]) #mean bdot_mean
g2lwc = np.sum(self.LWC[i1_2]) # sum for lwc
if self.r2 is not None:
g2r2 = np.mean(self.r2[i1_2]) # mean for r2
if (len(inds23)==0 and n2>0): # No more layer in grid23 -> we have to split a layer from grid22
## First: merge n2 layers from grid2 ##
i2_22 = inds2[-1*n2:] # layers to transition from grid2 to grid22
ind22a = i2_22[0] # index of the future upper layer of grid22
ind22b = inds22[0] # current upper node of grid22
# Properties of the new grid22 layer
g22dz = np.array([np.sum(self.dz[i2_22])]) # sum thickness
g22mass = np.sum(self.mass[i2_22]) # sum mass
g22rho = g22mass/g22dz
g22Tz0 = np.sum(self.Tz[i2_22]*self.mass[i2_22])
g22Tz = np.array([g22Tz0 / g22mass]) # Use a weighted average for temperature (the enthalpy)
g22gt = 22 #gridtrack
g22age = np.mean(self.age[i2_22])
g22bdm = np.mean(self.bdot_mean[i2_22])
g22lwc = np.sum(self.LWC[i2_22])
if self.r2 is not None:
g22r2 = np.mean(self.r2[i2_22])
## Second: split the last grid22 layer in n2 layers for grid23
ind22c = inds22[-1] # current lower layer of grid22 -> to be split (is also the last layer of the column)
g23dz = self.dz[ind22c]/n2 * np.ones(n2)
g23rho = self.rho[ind22c] * np.ones(n2)
g23mass = g23rho * g23dz
g23gt = 23 * np.ones(n2) #gridtrack values
g23Tz = self.Tz[ind22c]* np.ones(n2)
g23age = self.age[ind22c]*np.ones(n2)
g23bdm = self.bdot_mean[ind22c]*np.ones(n2)
g23lwc = self.LWC[ind22c]/n2 * np.ones(n2)
if self.r2 is not None:
g23r2 = self.r2[ind22c]*np.ones(n2)
## Third: split the last layer of the new grid23 in n1 layers for grid3
g3dz = g23dz[-1]/n1 * np.ones(n1)
g3rho = g23rho[-1] * np.ones(n1)
g3mass = g3rho * g3dz
g3gt = 3 * np.ones(n1)
g3Tz = g23Tz[-1]* np.ones(n1)
g3age = g23age[-1]*np.ones(n1)
g3bdm = g23bdm[-1]*np.ones(n1)
g3lwc = g23lwc[-1]/n1 * np.ones(n1)
if self.r2 is not None:
g3r2 = g23r2[-1]*np.ones(n1)
## Fourth: concatenate everything together
self.dz = np.concatenate((self.dz[0:ind2a],g2dz,self.dz[ind2b:ind22a],g22dz,self.dz[ind22b:ind22c],g23dz[0:-1],g3dz))
self.z = self.dz.cumsum(axis=0)
self.z = np.concatenate(([0], self.z[:-1]))
self.rho = np.concatenate((self.rho[0:ind2a],g2rho,self.rho[ind2b:ind22a],g22rho,self.rho[ind22b:ind22c],g23rho[0:-1],g3rho))
self.Tz = np.concatenate((self.Tz[0:ind2a],g2Tz,self.Tz[ind2b:ind22a],g22Tz,self.Tz[ind22b:ind22c],g23Tz[0:-1],g3Tz))
self.mass = np.concatenate((self.mass[0:ind2a],[g2mass],self.mass[ind2b:ind22a],[g22mass],self.mass[ind22b:ind22c],g23mass[0:-1],g3mass))
self.mass_sum = self.mass.cumsum(axis = 0)
self.age = np.concatenate((self.age[0:ind2a],[g2age],self.age[ind2b:ind22a],[g22age],self.age[ind22b:ind22c],g23age[0:-1],g3age))
self.bdot_mean = np.concatenate((self.bdot_mean[0:ind2a],[g2bdm],self.bdot_mean[ind2b:ind22a],[g22bdm],self.bdot_mean[ind22b:ind22c],g23bdm[0:-1],g3bdm))
self.LWC = np.concatenate((self.LWC[0:ind2a],[g2lwc],self.LWC[ind2b:ind22a],[g22lwc],self.LWC[ind22b:ind22c],g23lwc[0:-1],g3lwc))
self.sigma = (self.mass+self.LWC*RHO_W_KGM)*self.dx*GRAVITY
self.sigma = self.sigma.cumsum(axis = 0)
self.gridtrack = np.concatenate((self.gridtrack[0:ind2a],[g2gt],self.gridtrack[ind2b:ind22a],[g22gt],self.gridtrack[ind22b:ind22c],g23gt[0:-1],g3gt))
if self.r2 is not None:
self.r2 = np.concatenate((self.r2[0:ind2a],[g2r2],self.r2[ind2b:ind22a],[g22r2],self.r2[ind22b:ind22c],g23r2[0:-1],g3r2))
if (len(inds23)>0 or n2==0): # Still some layers in grid23 -> no need to split a layer from grid22
## Split the last layer of grid23 (layer [-1]) in n1 layers for grid3
g3dz = self.dz[-1]/n1 * np.ones(n1)
g3rho = self.rho[-1] * np.ones(n1)
g3mass = g3rho * g3dz
g3gt = 3 * np.ones(n1)
g3Tz = self.Tz[-1]* np.ones(n1)
g3age = self.age[-1]*np.ones(n1)
g3bdm = self.bdot_mean[-1]*np.ones(n1)
g3lwc = self.LWC[-1]/n1 * np.ones(n1)
if self.r2 is not None:
g3r2 = self.r2[-1]*np.ones(n1)
## Concatenate everything together
self.dz = np.concatenate((self.dz[0:ind2a],g2dz,self.dz[ind2b:-1],g3dz))
self.z = self.dz.cumsum(axis=0)
self.z = np.concatenate(([0], self.z[:-1]))
#print("self.z[-1]:",self.z[-1])
self.rho = np.concatenate((self.rho[0:ind2a],g2rho,self.rho[ind2b:-1],g3rho))
self.Tz = np.concatenate((self.Tz[0:ind2a],g2Tz,self.Tz[ind2b:-1],g3Tz))
self.mass = np.concatenate((self.mass[0:ind2a],[g2mass],self.mass[ind2b:-1],g3mass))
self.mass_sum = self.mass.cumsum(axis = 0)
self.age = np.concatenate((self.age[0:ind2a],[g2age],self.age[ind2b:-1],g3age))
self.bdot_mean = np.concatenate((self.bdot_mean[0:ind2a],[g2bdm],self.bdot_mean[ind2b:-1],g3bdm))
self.LWC = np.concatenate((self.LWC[0:ind2a],[g2lwc],self.LWC[ind2b:-1],g3lwc))
self.sigma = (self.mass+self.LWC*RHO_W_KGM)*self.dx*GRAVITY
self.sigma = self.sigma.cumsum(axis = 0)
self.gridtrack = np.concatenate((self.gridtrack[0:ind2a],[g2gt],self.gridtrack[ind2b:-1],g3gt))
if self.r2 is not None:
self.r2 = np.concatenate((self.r2[0:ind2a],[g2r2],self.r2[ind2b:-1],g3r2))
#self.bdot_mean = (np.concatenate(([self.mass_sum[0] / (RHO_I * S_PER_YEAR)], self.mass_sum[1:] * self.t / (self.age[1:] * RHO_I))))*self.c['stpsPerYear']*S_PER_YEAR
#print('sum(self.dz):',sum(self.dz))
return self.dz, self.z, self.rho, self.Tz, self.mass, self.sigma, self.mass_sum, self.age, self.bdot_mean, self.LWC, self.gridtrack, self.r2
def regrid22_reciprocal(self):
'''
Reciprocal of regrid22: must be called if we accumulate too many | |
3: 3, 'a4': 4, 'a5': 5})
assert r.zrevrangebyscore('a', 4, 2) == ['a4', 3, 'a2']
# slicing with start/num
assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \
[3, 'a2']
# withscores
assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \
[('a4', 4.0), (3, 3.0), ('a2', 2.0)]
# custom score function
assert r.zrevrangebyscore('a', 4, 2, withscores=True,
score_cast_func=int) == \
[('a4', 4), (3, 3), ('a2', 2)]
def test_zrevrank(self, r):
r.zadd('a', {1: 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5})
assert r.zrevrank('a', 1) == 4
assert r.zrevrank('a', 'a2') == 3
assert r.zrevrank('a', 'a6') is None
def test_zscore(self, r):
r.zadd('a', {1: 1, 'a2': 2, 'a3': 3})
assert r.zscore('a', 1) == 1.0
assert r.zscore('a', 'a2') == 2.0
assert r.zscore('a', 'a4') is None
def test_zunionstore_sum(self, r):
r.zadd('a', {1: 1, 'a2': 1, 'a3': 1})
r.zadd('b', {1: 2, 'a2': 2, 'a3': 2})
r.zadd('c', {1: 6, 'a3': 5, 'a4': 4})
assert r.zunionstore('d', ['a', 'b', 'c']) == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[('a2', 3), ('a4', 4), ('a3', 8), (1, 9)]
def test_zunionstore_max(self, r):
r.zadd('a', {1: 1, 'a2': 1, 'a3': 1})
r.zadd('b', {1: 2, 'a2': 2, 'a3': 2})
r.zadd('c', {1: 6, 'a3': 5, 'a4': 4})
assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[('a2', 2), ('a4', 4), ('a3', 5), (1, 6)]
def test_zunionstore_min(self, r):
r.zadd('a', {1: 1, 'a2': 2, 'a3': 3})
r.zadd('b', {1: 2, 'a2': 2, 'a3': 4})
r.zadd('c', {1: 6, 'a3': 5, 'a4': 4})
assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[(1, 1), ('a2', 2), ('a3', 3), ('a4', 4)]
def test_zunionstore_with_weight(self, r):
r.zadd('a', {1: 1, 'a2': 1, 'a3': 1})
r.zadd('b', {1: 2, 'a2': 2, 'a3': 2})
r.zadd('c', {1: 6, 'a3': 5, 'a4': 4})
assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[('a2', 5), ('a4', 12), ('a3', 20), (1, 23)]
# HYPERLOGLOG TESTS
@skip_if_server_version_lt('2.8.9')
def test_pfadd(self, r):
members = set(['1', 2, '3'])
assert r.pfadd('a', *members) == 1
assert r.pfadd('a', *members) == 0
assert r.pfcount('a') == len(members)
@skip_if_server_version_lt('2.8.9')
def test_pfcount(self, r):
members = set(['1', 2, '3'])
r.pfadd('a', *members)
assert r.pfcount('a') == len(members)
members_b = set([2, '3', '4'])
r.pfadd('b', *members_b)
assert r.pfcount('b') == len(members_b)
assert r.pfcount('a', 'b') == len(members_b.union(members))
@skip_if_server_version_lt('2.8.9')
def test_pfmerge(self, r):
mema = set(['1', 2, '3'])
memb = set([2, '3', '4'])
memc = set(['5', '6', '7'])
r.pfadd('a', *mema)
r.pfadd('b', *memb)
r.pfadd('c', *memc)
r.pfmerge('d', 'c', 'a')
assert r.pfcount('d') == 6
r.pfmerge('d', 'b')
assert r.pfcount('d') == 7
# HASH COMMANDS
def test_hget_and_hset(self, r):
r.hmset('a', {1: 1, '2': 2, '3': 3})
# field is not serialized: '1' instead of 1
assert r.hget('a', '1') == 1
assert r.hget('a', '2') == 2
assert r.hget('a', '3') == 3
# field was updated, redis returns 0
assert r.hset('a', '2', '5') == 0
assert r.hget('a', '2') == '5'
# field is new, redis returns 1
assert r.hset('a', '4', 4) == 1
assert r.hget('a', '4') == 4
# key inside of hash that doesn't exist returns null value
assert r.hget('a', 'b') is None
def test_hdel(self, r):
r.hmset('a', {'1': 1, 2: 2, '3': 3})
assert r.hdel('a', '2') == 1
assert r.hget('a', '2') is None
assert r.hdel('a', '1', '3') == 2
assert r.hlen('a') == 0
def test_hexists(self, r):
r.hmset('a', {1: 1, '2': 2, '3': 3})
assert r.hexists('a', '1')
assert not r.hexists('a', '4')
def test_hgetall(self, r):
h = {'1': '1', 'a2': '2', 'a3': 3}
r.hmset('a', h)
assert r.hgetall('a') == h
def test_hincrby(self, r):
assert r.hincrby('a', '1') == 1
assert r.hincrby('a', '1', amount=2) == 3
assert r.hincrby('a', '1', amount=-2) == 1
@skip_if_server_version_lt('2.6.0')
def test_hincrbyfloat(self, r):
assert r.hincrbyfloat('a', '1') == 1.0
assert r.hincrbyfloat('a', '1') == 2.0
assert r.hincrbyfloat('a', '1', 1.2) == 3.2
def test_hkeys(self, r):
h = {'a1': '1', 'a2': 2, 'a3': '3'}
r.hmset('a', h)
local_keys = list(h.keys())
remote_keys = r.hkeys('a')
assert (sorted(local_keys) == sorted(remote_keys))
def test_hlen(self, r):
r.hmset('a', {'1': 1, '2': 2, '3': 3})
assert r.hlen('a') == 3
def test_hmget(self, r):
assert r.hmset('a', {'a': 1, 'b': 2, 'c': '3'})
assert r.hmget('a', 'a', 'b', 'c') == [1, 2, '3']
def test_hmset(self, r):
h = {'a': '1', 'b': 2, 'c': '3'}
assert r.hmset('a', h)
assert r.hgetall('a') == h
def test_hsetnx(self, r):
# Initially set the hash field
assert r.hsetnx('a', '1', 1)
assert r.hget('a', '1') == 1
assert not r.hsetnx('a', '1', 2)
assert r.hget('a', '1') == 1
def test_hvals(self, r):
h = {'a1': '1', 'a2': '2', 'a3': '3'}
r.hmset('a', h)
local_vals = list(h.values())
remote_vals = r.hvals('a')
assert sorted(local_vals) == sorted(remote_vals)
@skip_if_server_version_lt('3.2.0')
def test_hstrlen(self, r):
r.hmset('a', {'1': 22, '2': '333'})
# Not Supported
# assert r.hstrlen('a', '1') == 2
# assert r.hstrlen('a', '2') == 3
# SORT
def test_sort_basic(self, r):
r.rpush('a', 3, 2, 1, 4)
assert r.sort('a') == [1, 2, 3, 4]
def test_sort_limited(self, r):
r.rpush('a', 3, 2, 1, 4)
assert r.sort('a', start=1, num=2) == [2, 3]
def test_sort_by(self, r):
r['score:1'] = 8
r['score:2'] = 3
r['score:3'] = 5
# Only supported for interger values
r.rpush('a', 3, 2, 1)
assert r.sort('a', by='score:*') == [2, 3, 1]
def test_sort_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', 2, 3, 1)
# Not Supported
# assert r.sort('a', get='user:*') == ['u1', 'u2', 'u3']
def test_sort_get_multi(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
# Not Supported
# assert r.sort('a', get=('user:*', '#')) == \
# [b('u1'), b('1'), b('u2'), b('2'), b('u3'), b('3')]
def test_sort_get_groups_two(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
# Not Supported
# assert r.sort('a', get=('user:*', '#'), groups=True) == \
# [(b('u1'), b('1')), (b('u2'), b('2')), (b('u3'), b('3'))]
def test_sort_groups_string_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
with pytest.raises(redis.DataError):
r.sort('a', get='user:*', groups=True)
def test_sort_groups_just_one_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
with pytest.raises(redis.DataError):
r.sort('a', get=['user:*'], groups=True)
def test_sort_groups_no_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
with pytest.raises(redis.DataError):
r.sort('a', groups=True)
def test_sort_groups_three_gets(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r['door:1'] = 'd1'
r['door:2'] = 'd2'
r['door:3'] = 'd3'
r.rpush('a', '2', '3', '1')
# Not Supported
# assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \
# [
# ('u1', 'd1', '1'),
# ('u2', 'd2', '2'),
# ('u3', 'd3', '3')
# ]
def test_sort_desc(self, r):
r.rpush('a', 2, 3, 1)
assert r.sort('a', desc=True) == [3, 2, 1]
def test_sort_alpha(self, r):
r.rpush('a', 'e', 'c', 'b', 'd', 'a')
assert r.sort('a', alpha=True) == \
['a', 'b', 'c', 'd', 'e']
def test_sort_store(self, r):
r.rpush('a', 2, 3, 1)
assert r.sort('a', store='sorted_values') == 3
assert r.lrange('sorted_values', 0, -1) == [1, 2, 3]
def test_sort_all_options(self, r):
r['user:1:username'] = 'zeus'
r['user:2:username'] = 'titan'
r['user:3:username'] = 'hermes'
r['user:4:username'] = 'hercules'
r['user:5:username'] = 'apollo'
r['user:6:username'] = 'athena'
r['user:7:username'] = 'hades'
r['user:8:username'] = 'dionysus'
r['user:1:favorite_drink'] = 'yuengling'
r['user:2:favorite_drink'] = 'rum'
r['user:3:favorite_drink'] = 'vodka'
r['user:4:favorite_drink'] = 'milk'
r['user:5:favorite_drink'] = 'pinot noir'
r['user:6:favorite_drink'] = 'water'
r['user:7:favorite_drink'] = 'gin'
r['user:8:favorite_drink'] = 'apple juice'
r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4')
num = r.sort('gods', start=2, num=4, by='user:*:username',
get='user:*:favorite_drink', desc=True, alpha=True,
store='sorted')
assert num == 4
# Not Supported
# assert r.lrange('sorted', 0, 10) == \
# [b('vodka'), b('milk'), b('gin'), b('apple juice')]
def test_cluster_addslots(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True
def test_cluster_count_failure_reports(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'COUNT-FAILURE-REPORTS', 'node'), int)
def test_cluster_countkeysinslot(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'COUNTKEYSINSLOT', 2), int)
def test_cluster_delslots(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True
def test_cluster_failover(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True
def test_cluster_forget(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('FORGET', 1) is True
def test_cluster_info(self, mock_cluster_resp_info):
assert isinstance(mock_cluster_resp_info.cluster('info'), dict)
def test_cluster_keyslot(self, mock_cluster_resp_int):
assert isinstance(mock_cluster_resp_int.cluster(
'keyslot', 'asdf'), int)
def test_cluster_meet(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True
def test_cluster_nodes(self, mock_cluster_resp_nodes):
assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict)
def test_cluster_replicate(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True
def test_cluster_reset(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('reset', 'hard') is True
def test_cluster_saveconfig(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('saveconfig') is True
def test_cluster_setslot(self, mock_cluster_resp_ok):
assert mock_cluster_resp_ok.cluster('setslot', 1,
'IMPORTING', 'nodeid') is True
def test_cluster_slaves(self, mock_cluster_resp_slaves):
assert isinstance(mock_cluster_resp_slaves.cluster(
'slaves', 'nodeid'), dict)
# GEO COMMANDS
@skip_if_server_version_lt('3.2.0')
def test_geoadd(self, r):
values = (2.1909389952632, 41.433791470673, 'place1') + \
(2.1873744593677, 41.406342043777, 'place2')
assert r.geoadd('barcelona', | |
import torch
from torch.nn.parameter import Parameter
import numbers
import numpy as np
from scipy.special import factorial
from . import point_process
from . import distributions as dist
from . import base
class count_model(base._likelihood):
"""
Count likelihood base class.
"""
def __init__(self, tbin, neurons, dispersion_mapping, inv_link, tensor_type):
super().__init__(tbin, neurons, neurons, inv_link, tensor_type)
self.strict_likelihood = True
if dispersion_mapping is not None:
self.add_module('dispersion_mapping', dispersion_mapping)
else:
self.dispersion_mapping = None
def set_params(self, strict_likelihood=None):
"""
:param float tbin: time bin duration in some time unit (sets time unit in model)
:param bool strict_likelihood: flag for whether to compute the count probability (involves
constants to be loaded into memory)
:param float jitter: value for stabilization of matrix inverses
"""
if strict_likelihood is not None:
self.strict_likelihood = strict_likelihood
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Get all the activity into batches useable format for quick log-likelihood evaluation
Tensor shapes: self.spikes (neuron_dim, batch_dim)
tfact is the log of time_bin times the spike count
lfact is the log (spike count)!
"""
super().set_Y(spikes, batch_size, filter_len=filter_len)
self.lfact = []
self.tfact = []
self.totspik = []
for b in range(self.batches):
spikes = self.spikes[b][..., self.filter_len-1:]
self.totspik.append(spikes.sum(-1))
self.tfact.append(spikes*torch.log(self.tbin.cpu()))
self.lfact.append(torch.lgamma(spikes+1.))
def KL_prior(self):
"""
"""
if self.dispersion_mapping is not None:
return self.dispersion_mapping.KL_prior()
else:
return 0
def sample_helper(self, h, b, neuron, samples):
"""
NLL helper function for sample evaluation. Note that spikes is batched including history
when the model uses history couplings, hence we sample the spike batches without the
history segments from this function.
"""
rates = self.f(h) # watch out for underflow or overflow here
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if self.trials != 1 and samples > 1 and self.trials < h.shape[0]: # cannot rely on broadcasting
spikes = spikes.repeat(samples, 1, 1) # MCxtrials
if self.inv_link == 'exp': # spike count times log rate
l_rates = (spikes*h)
else:
l_rates = (spikes*torch.log(rates+1e-12))
return rates, l_rates, spikes
def eval_dispersion_mapping(self, XZ, samples, neuron):
"""
Posterior predictive mean of the dispersion model.
"""
disp, disp_var = self.dispersion_mapping.compute_F(XZ)
dh = self.mc_gen(disp, disp_var, samples, neuron)
return self.dispersion_mapping.f(dh).mean(0) # watch out for underflow or overflow here
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
"""
Computes the terms for variational expectation :math:`\mathbb{E}_{q(f)q(z)}[]`, which
can be used to compute different likelihood objectives.
The returned tensor will have sample dimension as MC over :math:`q(z)`, depending
on the evaluation mode will be MC or GH or exact over the likelihood samples. This
is all combined in to the same dimension to be summed over. The weights :math:`w_s`
are the quadrature weights or equal weights for MC, with appropriate normalization.
:param int samples: number of MC samples or GH points (exact will ignore and give 1)
:returns: negative likelihood term of shape (samples, timesteps), sample weights (samples, 1
:rtype: tuple of torch.tensors
"""
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, neuron) # h has only observed neurons
rates, l_rates, spikes = self.sample_helper(h, b, neuron, samples)
ws = torch.tensor(1./rates.shape[0])
elif mode == 'GH':
h, ws = self.gh_gen(F_mu, F_var, samples, neuron)
rates, l_rates, spikes = self.sample_helper(h, b, neuron, samples)
ws = ws[:, None]
else:
raise NotImplementedError
if self.dispersion_mapping is None:
disper_param = None
else: # MC sampling
disper_param = self.eval_dispersion_mapping(XZ, samples, neuron)
return self.nll(b, rates, l_rates, spikes, neuron, disper_param), ws
class renewal_model(base._likelihood):
"""
Renewal model base class
"""
def __init__(self, tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize):
super().__init__(tbin, neurons, neurons, inv_link, tensor_type)
self.allow_duplicate = allow_duplicate
self.dequant = dequantize
def train_to_ind(self, train):
if self.allow_duplicate:
duplicate = False
spike_ind = train.nonzero().flatten()
bigger = torch.where(train > 1)[0]
add_on = (spike_ind,)
for b in bigger:
add_on += (b*torch.ones(int(train[b])-1, device=train.device, dtype=int),)
if len(add_on) > 1:
duplicate = True
spike_ind = torch.cat(add_on)
return torch.sort(spike_ind)[0], duplicate
else:
return torch.nonzero(train).flatten(), False
def ind_to_train(self, ind, timesteps):
train = torch.zeros((timesteps))
train[ind] += 1
return train
def rate_rescale(self, neuron, spike_ind, rates, duplicate, minimum=1e-8):
"""
Rate rescaling with option to dequantize, which will be random per sample.
:param torch.tensor rates: input rates of shape (trials, neurons, timesteps)
:returns: list of rescaled ISIs, list index over neurons, elements of shape (trials, ISIs)
:rtype: list
"""
rtime = torch.cumsum(rates, dim=-1)*self.tbin
samples = rtime.shape[0]
rISI = []
for tr in range(self.trials):
isis = []
for en, n in enumerate(neuron):
if len(spike_ind[tr][n]) > 1:
if self.dequant:
deqn = torch.rand(
samples,
*spike_ind[tr][n].shape,
device=rates.device
)*rates[tr::self.trials, en, spike_ind[tr][n]]*self.tbin # assume spike at 0
tau = rtime[tr::self.trials, en, spike_ind[tr][n]] - deqn
if duplicate[n]: # re-oder in case of duplicate spike_ind
tau = torch.sort(tau, dim=-1)[0]
else:
tau = rtime[tr::self.trials, en, spike_ind[tr][n]]
a = tau[:, 1:]-tau[:, :-1]
a[a < minimum] = minimum # don't allow near zero ISI
isis.append(a) # samples, order
else:
isis.append([])
rISI.append(isis)
return rISI
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Get all the activity into batches useable format for quick log-likelihood evaluation
Tensor shapes: self.act [neuron_dim, batch_dim]
"""
if self.allow_duplicate is False and spikes.max() > 1: # only binary trains
raise ValueError('Only binary spike trains are accepted in set_Y() here')
super().set_Y(spikes, batch_size, filter_len=filter_len)
self.spiketimes = []
self.intervals = torch.empty((self.batches, self.trials, self.neurons))
self.duplicate = np.empty((self.batches, self.trials, self.neurons), dtype=bool)
for b, spk in enumerate(self.spikes):
spiketimes = []
for tr in range(self.trials):
cont = []
for k in range(self.neurons):
s, self.duplicate[b, tr, k] = self.train_to_ind(spk[tr, k])
cont.append(s)
self.intervals[b, tr, k] = len(s)-1
spiketimes.append(cont)
self.spiketimes.append(spiketimes) # batch list of trial list of spike times list over neurons
def sample_helper(self, h, b, neuron, scale, samples):
"""
MC estimator for NLL function.
:param torch.tensor scale: additional scaling of the rate rescaling to preserve the ISI mean
:returns: tuple of rates, spikes*log(rates*scale), rescaled ISIs
:rtype: tuple
"""
scale = scale.expand(1, self.F_dims)[:, neuron, None] # rescale to get mean 1 in renewal distribution
rates = self.f(h)*scale
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if self.trials != 1 and samples > 1 and self.trials < h.shape[0]: # cannot rely on broadcasting
spikes = spikes.repeat(samples, 1, 1) # trial blocks are preserved, concatenated in first dim
if self.inv_link == 'exp': # bit masking seems faster than integer indexing using spiketimes
l_rates = (spikes*(h+torch.log(scale))).sum(-1)
else:
l_rates = (spikes*torch.log(rates+1e-12)).sum(-1) # rates include scaling
spiketimes = [[s.to(self.tbin.device) for s in ss] for ss in self.spiketimes[b]]
rISI = self.rate_rescale(neuron, spiketimes, rates, self.duplicate[b])
return rates, l_rates, rISI
def objective(self, F_mu, F_var, XZ, b, neuron, scale, samples=10, mode='MC'):
"""
:param torch.tensor F_mu: model output F mean values of shape (samplesxtrials, neurons, time)
:returns: negative likelihood term of shape (samples, timesteps), sample weights (samples, 1
:rtype: tuple of torch.tensors
"""
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, neuron)
rates, l_rates, rISI = self.sample_helper(h, b, neuron, scale, samples)
ws = torch.tensor(1./rates.shape[0])
else:
raise NotImplementedError
return self.nll(l_rates, rISI, neuron), ws
def sample(self, rate, neuron=None, XZ=None):
"""
Sample spike trains from the modulated renewal process.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
spiketimes = point_process.gen_IRP(self.ISI_dist(neuron), rate[:, neuron, :], self.tbin.item())
tr_t_spike = []
for sp in spiketimes:
tr_t_spike.append(self.ind_to_train(torch.tensor(sp), rate.shape[-1]).numpy())
return np.array(tr_t_spike).reshape(rate.shape[0], -1, rate.shape[-1])
# Special cases
class Spike_phase(base._likelihood):
"""
Renewal model base class
"""
def __init__(self, tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize):
super().__init__(tbin, neurons, neurons, inv_link, tensor_type)
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Assumes at time zero, we start at global phase zero. At the end after the last spike, we do
not increment the phase here.
"""
assert spikes.max() < 2 # only binary trains
super().set_Y(spikes, batch_size, filter_len=filter_len)
phases = [] # list of spike phases
for spiketrain in self.all_spikes: # loop over trials
cont = []
dphase = torch.zeros(*spiketrain.shape, dtype=self.tensor_type)
for k in range(self.neurons):
locs = torch.nonzero(spiketrain).flatten()
dlocs = torch.cat((locs[0:1]+1, locs[1:]-locs[:-1]))
cur = 0
for dd in dlocs:
dphase[k, cur:cur+dd] = 1./dd
| |
for the
calculation of the current U*A-value. Thus this array is
UA : float, int, np.ndarray
Total heat transfer coefficient in [W/K].
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
"""
# get outer surface temperature, following WTP Formelsammlung Chapter 3.3
# with sigma = (T-T_inf) / (T_i - T_inf) instead of the constant heat
# production formula. This formula is only for steady state, thus an error
# will be incorporated. To get the outer layer temperature, the heatflow
# from the fluid through the pipe-wall (and insulation) to ambient is set
# equal with the heatflow from the outer surface (index o) to ambient:
# (T_s - T_inf) * alpha_inf * A_s = U * A_s * (T_i - T_inf)
# Since UA already incorporates the inner fluid-wall-contact-surface as
# reference area, alpha_inf needs to be adjusted by its area.
T_s[:] = T_inf + (T - T_inf) * UA / (alpha_inf * A_s)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def surface_temp_steady_state(T, T_inf, A_s, alpha_inf, UA):
"""
Parameters:
-----------
A_s : float, int
The outer surface area (air-contact-area) PER CELL. Calculated with:
A_s = np.pi * r_s * 2 * grid_spacing
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
UA : float, int, np.ndarray
Total heat transfer coefficient in [W/K].
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
"""
# get outer surface temperature, following WTP Formelsammlung Chapter 3.3
# with sigma = (T-T_inf) / (T_i - T_inf) instead of the constant heat
# production formula. This formula is only for steady state, thus an error
# will be incorporated. To get the outer layer temperature, the heatflow
# from the fluid through the pipe-wall (and insulation) to ambient is set
# equal with the heatflow from the outer surface (index o) to ambient:
# (T_s - T_inf) * alpha_inf * A_s = U * A_s * (T_i - T_inf)
# Since UA already incorporates the inner fluid-wall-contact-surface as
# reference area, alpha_inf needs to be adjusted by its area.
return T_inf + (T - T_inf) * UA / (alpha_inf * A_s)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def series_circuit_UA(*args):
"""
Calculates the total U*A-value for a series circuit of two or more U*A
values.
Parameters:
-----------
UA : float, int, np.ndarray
U*A value (heat conductivity) in [W/K] for each part of the series
circuit. If given as np.ndarray, all arrays have to be of the same
shape.
Returns:
--------
UA_series : float, np.ndarray
Total U*A value (heat conductivity) in [W/K] of the series.
"""
UA_series = 1 / args[0] # get inverse of first value
arg_iter = iter(args) # make iterator out of args
next(arg_iter) # skip first entry since it is already taken
for arg in arg_iter: # iterate over the rest of args
UA_series += 1 / arg # sum up inverse values
return 1 / UA_series # return inverse of sum
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def parallel_circuit_UA(*args):
"""
Calculates the total U*A-value for a parallel circuit of two or more U*A
values.
Parameters:
-----------
UA : float, int, np.ndarray
U*A value (heat conductivity) in [W/K] for each part of the parallel
circuit. If given as np.ndarray, all arrays have to be of the same
shape.
Returns:
--------
UA_series : float, np.ndarray
Total U*A value (heat conductivity) in [W/K] of the parallel circuit.
"""
UA_parallel = args[0] # get first value
arg_iter = iter(args) # make iterator out of args
next(arg_iter) # skip first entry since it is already taken
for arg in arg_iter: # iterate over the rest of args
UA_parallel += arg # sum up values
return UA_parallel # return sum
# ---> GENERAL FUNCTIONS:
# logarithmic mean temperature difference:
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def log_mean_temp_diff(T_A_one, T_A_two, T_B_one, T_B_two):
"""
Calculate the logarithmic mean temperature difference (LMTD) of two fluid
streams `one` and `two` of a heat exchanger with two ends `A` and `B`.
Parameters:
-----------
T_A_one : float, int, np.array
Fluid temperature of stream one at end A.
T_A_two : float, int, np.array
Fluid temperature of stream two at end A.
T_B_one : float, int, np.array
Fluid temperature of stream one at end B.
T_B_two : float, int, np.array
Fluid temperature of stream two at end B.
"""
Delta_T_A = T_A_one - T_A_two
Delta_T_B = T_B_one - T_B_two
lmtd = (Delta_T_A - Delta_T_B) / (np.log(Delta_T_A / Delta_T_B))
return lmtd
# get simple moving average of the array x and N cells:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def moving_avg(x, N):
arr = np.zeros(x.shape[0] + 1)
arr[1:] = x
cumsum = np.cumsum(arr)
return (cumsum[N:] - cumsum[:-N]) / float(N)
# fill the edges of x to new_length, so that input x is placed in the middle
# of the output array. if the number of new cells is not even, the array is
# shifted one cell to the end:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def fill_edges(x, new_length):
old_length = x.shape[0] # get old length
residual = new_length - old_length # get difference in lengths
x_new = np.zeros(new_length) # create new array
start = residual // 2 + residual % 2 # get start point where to insert
x_new[start : start + old_length] = x # fill new array in the middle
x_new[:start] = x[0] # fill before start with first value
x_new[old_length + start :] = x[-1] # fill at end with last value
return x_new
# this function calls simple moving average on array x and N cells AND fills
# the edges with the last and first value:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def moving_avg_fill_edges(x, N):
return fill_edges(moving_avg(x, N), x.shape[0])
# get window weighted moving average over array x with window weight wght and
# the possibility to fill the edges with the last correct value to get an array
# of the same shape as x:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def weighted_moving_avg(x, wght, fill_edges=False):
# get number of cells to calculate average in each step and get total
# average array length:
N = wght.size
length = x.shape[0] - N + 1
# if edges shall be filled, create an array like the input array and calc.
# new starting point where the "real" moving average is starting:
if fill_edges:
wa_len = x.shape[0] # get length
residual = wa_len - length # calc. remaining edge points to be filled
start = residual // 2 + residual % 2 # calc. starting point
wa = np.zeros(wa_len) # create result array
else:
start = 0 # start at 0
wa = np.zeros(length) # create result array
# loop over array:
for i in range(length):
wa[i + start] = (x[i : i + N] * wght).sum() # calc weighted mean
# fill edges before start with first value and after end with last value
if fill_edges:
wa[:start] = wa[start]
wa[length + start :] = wa[i + start]
return wa
@nb.njit(parallel=GLOB_PARALLEL)
def root_finder(poly_coeff, roots):
"""
Finds the roots of a polynome for an array of root values `roots`.
This means that a polynome, given by its polynome coefficient array
`poly_coeff`, is reversed at each value of `roots`. A polynome defining
the saturated water mass in air for a given temperature, this returns the
Taupunkt temperature for a given water mass.
Since the results have a shape of n-1 for a polynome of degree n, the
results have to be filtered. This may be done in the following way:
>>> # set all imaginary dominated values to zero:
>>> rts_arr[np.abs(rts_arr.imag) > 1e-12] = 0.
>>> # set values above an upper and lower boundary to zero:
>>> rts_arr[rts_arr > 85] = 0.
>>> rts_arr[rts_arr | |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from random import random
from time import sleep
from warnings import warn
from neo4j.bolt.connection import RUN, PULL_ALL
from neo4j.bolt.response import Response
from neo4j.exceptions import ServiceUnavailable
from neo4j.compat import urlparse, ustr
from neo4j.exceptions import CypherError, TransientError
from neo4j.config import default_config
from neo4j.compat import perf_counter
from .exceptions import DriverError, SessionError, SessionExpired, TransactionError
_warned_about_transaction_bookmarks = False
READ_ACCESS = "READ"
WRITE_ACCESS = "WRITE"
INITIAL_RETRY_DELAY = 1.0
RETRY_DELAY_MULTIPLIER = 2.0
RETRY_DELAY_JITTER_FACTOR = 0.2
BOOKMARK_PREFIX = "neo4j:bookmark:v1:tx"
def last_bookmark(b0, b1):
""" Return the latest of two bookmarks.
"""
return b0 if _bookmark_value(b0) > _bookmark_value(b1) else b1
def _bookmark_value(b):
"""Return the int value of the given bookmark.
"""
if b is None or not b.startswith(BOOKMARK_PREFIX):
raise ValueError("Invalid bookmark: {}".format(b))
value_string = b[len(BOOKMARK_PREFIX):]
try:
return int(value_string)
except ValueError:
raise ValueError("Invalid bookmark: {}".format(b))
def retry_delay_generator(initial_delay, multiplier, jitter_factor):
delay = initial_delay
while True:
jitter = jitter_factor * delay
yield delay - jitter + (2 * jitter * random())
delay *= multiplier
def is_retriable_transient_error(error):
"""
:type error: TransientError
"""
return not (error.code in ("Neo.TransientError.Transaction.Terminated",
"Neo.TransientError.Transaction.LockClientStopped"))
class GraphDatabase(object):
""" Accessor for :class:`.Driver` construction.
"""
@classmethod
def driver(cls, uri, **config):
""" Create a :class:`.Driver` object. Calling this method provides
identical functionality to constructing a :class:`.Driver` or
:class:`.Driver` subclass instance directly.
"""
return Driver(uri, **config)
class Driver(object):
""" Base class for all types of :class:`.Driver`, instances of which are
used as the primary access point to Neo4j.
:param uri: URI for a graph database service
:param config: configuration and authentication details (valid keys are listed below)
"""
#: Overridden by subclasses to specify the URI scheme owned by that
#: class.
uri_scheme = None
#: Connection pool
_pool = None
#: Indicator of driver closure.
_closed = False
@classmethod
def _check_uri(cls, uri):
""" Check whether a URI is compatible with a :class:`.Driver`
subclass. When called from a subclass, execution simply passes
through if the URI scheme is valid for that class. If invalid,
a `ValueError` is raised.
:param uri: URI to check for compatibility
:raise: `ValueError` if URI scheme is incompatible
"""
parsed = urlparse(uri)
if parsed.scheme != cls.uri_scheme:
raise ValueError("%s objects require the %r URI scheme" % (cls.__name__, cls.uri_scheme))
def __new__(cls, uri, **config):
parsed = urlparse(uri)
for subclass in Driver.__subclasses__():
if parsed.scheme == subclass.uri_scheme:
return subclass(uri, **config)
raise ValueError("URI scheme %r not supported" % parsed.scheme)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def session(self, access_mode=None, **parameters):
""" Create a new :class:`.Session` object based on this
:class:`.Driver`.
:param access_mode: default access mode (read or write) for
transactions in this session
:param parameters: custom session parameters (see
:class:`.Session` for details)
:returns: new :class:`.Session` object
"""
if self.closed():
raise DriverError("Driver closed")
def close(self):
""" Shut down, closing any open connections that were spawned by
this :class:`.Driver`.
"""
if not self._closed:
self._closed = True
if self._pool is not None:
self._pool.close()
self._pool = None
def closed(self):
return self._closed
class Session(object):
""" A :class:`.Session` is a logical context for transactional units
of work. Connections are drawn from the :class:`.Driver` connection
pool as required.
Session creation is a lightweight operation and sessions are not thread
safe. Therefore a session should generally be short-lived, and not
span multiple threads.
In general, sessions will be created and destroyed within a `with`
context. For example::
with driver.session() as session:
result = session.run("MATCH (a:Person) RETURN a.name")
# do something with the result...
:param acquirer: callback function for acquiring new connections
with a given access mode
:param access_mode: default access mode (read or write) for
transactions in this session
:param parameters: custom session parameters, including:
`bookmark`
A single bookmark after which this session should begin.
(Deprecated, use `bookmarks` instead)
`bookmarks`
A collection of bookmarks after which this session should begin.
`max_retry_time`
The maximum time after which to stop attempting retries of failed
transactions.
"""
# The current connection.
_connection = None
# The current :class:`.Transaction` instance, if any.
_transaction = None
# The last result received.
_last_result = None
# The collection of bookmarks after which the next
# :class:`.Transaction` should be carried out.
_bookmarks = ()
# Default maximum time to keep retrying failed transactions.
_max_retry_time = default_config["max_retry_time"]
_closed = False
def __init__(self, acquirer, access_mode, **parameters):
self._acquirer = acquirer
self._default_access_mode = access_mode
for key, value in parameters.items():
if key == "bookmark":
self._bookmarks = [value] if value else []
elif key == "bookmarks":
self._bookmarks = value or []
elif key == "max_retry_time":
self._max_retry_time = value
else:
pass # for compatibility
def __del__(self):
try:
self.close()
except (SessionError, ServiceUnavailable):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _connect(self, access_mode=None):
if access_mode is None:
access_mode = self._default_access_mode
if self._connection:
self._disconnect(sync=True)
self._connection = self._acquirer(access_mode)
def _disconnect(self, sync):
if self._connection:
if sync:
try:
self._connection.sync()
except (SessionError, ServiceUnavailable):
pass
if self._connection:
self._connection.in_use = False
self._connection = None
def close(self):
""" Close the session. This will release any borrowed resources,
such as connections, and will roll back any outstanding transactions.
"""
try:
if self.has_transaction():
try:
self.rollback_transaction()
except (CypherError, TransactionError, SessionError, ServiceUnavailable):
pass
finally:
self._closed = True
self._disconnect(sync=True)
def closed(self):
""" Indicator for whether or not this session has been closed.
:returns: :const:`True` if closed, :const:`False` otherwise.
"""
return self._closed
def run(self, statement, parameters=None, **kwparameters):
""" Run a Cypher statement within an auto-commit transaction.
The statement is sent and the result header received
immediately but the :class:`.StatementResult` content is
fetched lazily as consumed by the client application.
If a statement is executed before a previous
:class:`.StatementResult` in the same :class:`.Session` has
been fully consumed, the first result will be fully fetched
and buffered. Note therefore that the generally recommended
pattern of usage is to fully consume one result before
executing a subsequent statement. If two results need to be
consumed in parallel, multiple :class:`.Session` objects
can be used as an alternative to result buffering.
For more usage details, see :meth:`.Transaction.run`.
:param statement: template Cypher statement
:param parameters: dictionary of parameters
:param kwparameters: additional keyword parameters
:returns: :class:`.StatementResult` object
"""
if self.closed():
raise SessionError("Session closed")
if not statement:
raise ValueError("Cannot run an empty statement")
if not self.has_transaction():
self._connect()
result = self.__run__(statement, dict(parameters or {}, **kwparameters))
if not self.has_transaction():
self._connection.send()
self._connection.fetch()
return result
def send(self):
""" Send all outstanding requests.
"""
if self._connection:
self._connection.send()
def fetch(self):
""" Attempt to fetch at least one more record.
:returns: number of records fetched
"""
if self._connection:
detail_count, _ = self._connection.fetch()
return detail_count
return 0
def sync(self):
""" Carry out a full send and receive.
:returns: number of records fetched
"""
if self._connection:
detail_count, _ = self._connection.sync()
return detail_count
return 0
def detach(self, result):
""" Detach a result from this session by fetching and buffering any
remaining records.
:param result:
:returns: number of records fetched
"""
count = 0
self.send()
fetch = self.fetch
while result.attached():
count += fetch()
if self._last_result is result:
self._last_result = None
if not self.has_transaction():
self._disconnect(sync=False)
return count
def last_bookmark(self):
""" The bookmark returned by the last :class:`.Transaction`.
"""
last = None
for bookmark in self._bookmarks:
if last is None:
last = bookmark
else:
last = last_bookmark(last, bookmark)
return last
def has_transaction(self):
return bool(self._transaction)
def _create_transaction(self):
self._transaction = Transaction(self, on_close=self._destroy_transaction)
def _destroy_transaction(self):
self._transaction = None
def begin_transaction(self, bookmark=None):
""" Create a new :class:`.Transaction` within this session.
Calling this method with a bookmark is equivalent to
:param bookmark: a bookmark to which the server should
synchronise before beginning the transaction
:returns: new :class:`.Transaction` instance.
:raise: :class:`.TransactionError` if a transaction is already | |
value"),
input_type="tree",
output_type="tree",
)
def highlighted(channel, _, **kwargs):
"""
Set tree list to highlighted nodes
"""
return "tree", list(self.flat(highlighted=True))
@self.console_command(
"targeted",
help=_("delegate commands to sub-focused value"),
input_type="tree",
output_type="tree",
)
def targeted(channel, _, **kwargs):
"""
Set tree list to highlighted nodes
"""
return "tree", list(self.flat(targeted=True))
@self.console_command(
"delete",
help=_("delete the given nodes"),
input_type="tree",
output_type="tree",
)
def delete(channel, _, data=None, **kwargs):
"""
Delete nodes.
Structural nodes such as root, elements branch, and operations branch are not able to be deleted
"""
self.remove_nodes(data)
self.signal("tree_changed")
self.signal("refresh_scene", 0)
return "tree", [self._tree]
@self.console_command(
"delegate",
help=_("delegate commands to focused value"),
input_type="tree",
output_type=("op", "elements"),
)
def delegate(channel, _, **kwargs):
"""
Delegate to either ops or elements depending on the current node emphasis
"""
for item in self.flat(emphasized=True):
if item.type.startswith("op"):
return "ops", list(self.ops(emphasized=True))
if item.type in elem_nodes or item.type in ("group", "file"):
return "elements", list(self.elems(emphasized=True))
# ==========
# CLIPBOARD COMMANDS
# ==========
@self.console_option("name", "n", type=str)
@self.console_command(
"clipboard",
help=_("clipboard"),
input_type=(None, "elements"),
output_type="clipboard",
)
def clipboard_base(data=None, name=None, **kwargs):
"""
Clipboard commands. Applies to current selected elements to
make a copy of those elements. Paste a copy of those elements
or cut those elements. Clear clears the clipboard.
The list command will list them but this is only for debug.
"""
if name is not None:
self._clipboard_default = name
if data is None:
return "clipboard", list(self.elems(emphasized=True))
else:
return "clipboard", data
@self.console_command(
"copy",
help=_("clipboard copy"),
input_type="clipboard",
output_type="elements",
)
def clipboard_copy(data=None, **kwargs):
destination = self._clipboard_default
self._clipboard[destination] = [copy(e) for e in data]
return "elements", self._clipboard[destination]
@self.console_option(
"dx", "x", help=_("paste offset x"), type=Length, default=0
)
@self.console_option(
"dy", "y", help=_("paste offset y"), type=Length, default=0
)
@self.console_command(
"paste",
help=_("clipboard paste"),
input_type="clipboard",
output_type="elements",
)
def clipboard_paste(command, channel, _, data=None, dx=None, dy=None, **kwargs):
destination = self._clipboard_default
try:
pasted = [copy(e) for e in self._clipboard[destination]]
except KeyError:
channel(_("Error: Clipboard Empty"))
return
if dx != 0 or dy != 0:
matrix = Matrix(
"translate({dx}, {dy})".format(dx=float(dx), dy=float(dy))
)
for node in pasted:
node.matrix *= matrix
group = self.elem_branch.add(type="group", label="Group")
for p in pasted:
group.add_node(copy(p))
self.set_emphasis([group])
return "elements", pasted
@self.console_command(
"cut",
help=_("clipboard cut"),
input_type="clipboard",
output_type="elements",
)
def clipboard_cut(data=None, **kwargs):
destination = self._clipboard_default
self._clipboard[destination] = [copy(e) for e in data]
self.remove_elements(data)
return "elements", self._clipboard[destination]
@self.console_command(
"clear",
help=_("clipboard clear"),
input_type="clipboard",
output_type="elements",
)
def clipboard_clear(data=None, **kwargs):
destination = self._clipboard_default
old = self._clipboard[destination]
self._clipboard[destination] = None
return "elements", old
@self.console_command(
"contents",
help=_("clipboard contents"),
input_type="clipboard",
output_type="elements",
)
def clipboard_contents(**kwargs):
destination = self._clipboard_default
return "elements", self._clipboard[destination]
@self.console_command(
"list",
help=_("clipboard list"),
input_type="clipboard",
)
def clipboard_list(command, channel, _, **kwargs):
for v in self._clipboard:
k = self._clipboard[v]
channel("%s: %s" % (str(v).ljust(5), str(k)))
# ==========
# NOTES COMMANDS
# ==========
@self.console_option(
"append", "a", type=bool, action="store_true", default=False
)
@self.console_command("note", help=_("note <note>"))
def note(command, channel, _, append=False, remainder=None, **kwargs):
note = remainder
if note is None:
if self.note is None:
channel(_("No Note."))
else:
channel(str(self.note))
else:
if append:
self.note += "\n" + note
else:
self.note = note
channel(_("Note Set."))
channel(str(self.note))
# ==========
# TRACE OPERATIONS
# ==========
# Function to return the euclidean distance
# between two points
def dist(a, b):
return sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))
# Function to check whether a point lies inside
# or on the boundaries of the circle
def is_inside(center, radius, p):
return dist(center, p) <= radius
# The following two functions are used
# To find the equation of the circle when
# three points are given.
# Helper method to get a circle defined by 3 points
def get_circle_center(bx, by, cx, cy):
B = bx * bx + by * by
C = cx * cx + cy * cy
D = bx * cy - by * cx
return [(cy * B - by * C) / (2 * D), (bx * C - cx * B) / (2 * D)]
# Function to return the smallest circle
# that intersects 2 points
def circle_from1(A, B):
# Set the center to be the midpoint of A and B
C = [(A[0] + B[0]) / 2.0, (A[1] + B[1]) / 2.0]
# Set the radius to be half the distance AB
return C, dist(A, B) / 2.0
# Function to return a unique circle that
# intersects three points
def circle_from2(A, B, C):
if A == B:
I, radius = circle_from1(A, C)
return I, radius
elif A == C:
I, radius = circle_from1(A, B)
return I, radius
elif B == C:
I, radius = circle_from1(A, B)
return I, radius
else:
I = get_circle_center(
B[0] - A[0], B[1] - A[1], C[0] - A[0], C[1] - A[1]
)
I[0] += A[0]
I[1] += A[1]
radius = dist(I, A)
return I, radius
# Function to check whether a circle
# encloses the given points
def is_valid_circle(center, radius, P):
# Iterating through all the points
# to check whether the points
# lie inside the circle or not
for p in P:
if not is_inside(center, radius, p):
return False
return True
# Function to return the minimum enclosing
# circle for N <= 3
def min_circle_trivial(P):
assert len(P) <= 3
if not P:
return [0, 0], 0
elif len(P) == 1:
return P[0], 0
elif len(P) == 2:
center, radius = circle_from1(P[0], P[1])
return center, radius
# To check if MEC can be determined
# by 2 points only
for i in range(3):
for j in range(i + 1, 3):
center, radius = circle_from1(P[i], P[j])
if is_valid_circle(center, radius, P):
return center, radius
center, radius = circle_from2(P[0], P[1], P[2])
return center, radius
# Returns the MEC using Welzl's algorithm
# Takes a set of input points P and a set R
# points on the circle boundary.
# n represents the number of points in P
# that are not yet processed.
def welzl_helper(P, R, n):
# Base case when all points processed or |R| = 3
if n == 0 or len(R) == 3:
center, radius = min_circle_trivial(R)
return center, radius
# Pick a random point randomly
idx = randint(0, n - 1)
p = P[idx]
# Put the picked point at the end of P
# since it's more efficient than
# deleting from the middle of the vector
P[idx], P[n - 1] = P[n - 1], P[idx]
# Get the MEC circle d from the
# set of points P - :p
dcenter, dradius = welzl_helper(P, R.copy(), n - 1)
# If d contains p, return d
if is_inside(dcenter, dradius, p):
return dcenter, dradius
# Otherwise, must be on the boundary of the MEC
R.append(p)
# Return the MEC for P - :p and R U :p
dcenter, dradius = welzl_helper(P, R.copy(), n - 1)
return dcenter, dradius
def welzl(P):
P_copy = P.copy()
shuffle(P_copy)
center, radius = welzl_helper(P_copy, [], len(P_copy))
return center, radius
def generate_hull_shape(method, data, resolution=None):
if resolution is None:
DETAIL = 500 # How coarse / fine shall a subpath be split
else:
DETAIL = int(resolution)
pts = []
min_val = [float("inf"), float("inf")]
max_val = [-float("inf"), -float("inf")]
for node in data:
if method in ("hull", "segment", "circle"):
try:
path = node.as_path()
except AttributeError:
path = None
if not path is None:
p = path.first_point
pts += [(p.x, p.y)]
for segment in path:
p = segment.end
pts += [(p.x, p.y)]
else:
bounds = node.bounds
pts += [
(bounds[0], bounds[1]),
(bounds[0], bounds[3]),
(bounds[2], bounds[1]),
(bounds[2], bounds[3]),
]
elif method in ("complex"):
try:
path = node.as_path()
except AttributeError:
path = None
if not path is None:
for subpath in path.as_subpaths():
psp = Path(subpath)
p = psp.first_point
pts += [(p.x, p.y)]
positions = linspace(0, 1, num=DETAIL, endpoint=True)
subj = psp.npoint(positions)
# Not sure why we need to do that, its already rows x 2
# subj.reshape((2, DETAIL))
s = list(map(Point, subj))
for p in s:
pts += [(p.x, p.y)]
else:
bounds = node.bounds
pts += [
(bounds[0], bounds[1]),
(bounds[0], bounds[3]),
(bounds[2], bounds[1]),
(bounds[2], bounds[3]),
]
elif method == "quick":
bounds = node.bounds
min_val[0] = min(min_val[0], bounds[0])
min_val[1] = min(min_val[1], | |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more information
## Copyright (C) <NAME> <<EMAIL>>
## This program is published under a GPLv2 license
## Copyright (C) 2014 <NAME> <<EMAIL>>
## OpenFlow is an open standard used in SDN deployments.
## Based on OpenFlow v1.0.1
## Specifications can be retrieved from https://www.opennetworking.org/
# scapy.contrib.description = openflow v1.0
# scapy.contrib.status = loads
import binascii
import struct
from scapy.all import *
### If prereq_autocomplete is True then match prerequisites will be
### automatically handled. See OFPMatch class.
prereq_autocomplete = False
#####################################################
################# Predefined values #################
#####################################################
ofp_port_no = { 0xfff8: "IN_PORT",
0xfff9: "TABLE",
0xfffa: "NORMAL",
0xfffb: "FLOOD",
0xfffc: "ALL",
0xfffd: "CONTROLLER",
0xfffe: "LOCAL",
0xffff: "NONE" }
ofp_table = { 0xff: "ALL" }
ofp_queue = { 0xffffffff: "ALL" }
ofp_buffer = { 0xffffffff: "NO_BUFFER" }
ofp_max_len = { 0xffff: "NO_BUFFER" }
#####################################################
################# Common structures #################
#####################################################
### The following structures will be used in different types
### of OpenFlow messages: ports, matches, actions, queues.
##################### Ports #####################
ofp_port_config = [ "PORT_DOWN",
"NO_STP",
"NO_RECV",
"NO_RECV_STP",
"NO_FLOOD",
"NO_FWD",
"NO_PACKET_IN" ]
ofp_port_state = [ "LINK_DOWN" ]
ofp_port_state_stp = { 0: "OFPPS_STP_LISTEN",
1: "OFPPS_STP_LEARN",
2: "OFPPS_STP_FORWARD",
3: "OFPPS_STP_BLOCK" }
ofp_port_features = [ "10MB_HD",
"10MB_FD",
"100MB_HD",
"100MB_FD",
"1GB_HD",
"1GB_FD",
"10GB_FD",
"COPPER",
"FIBER",
"AUTONEG",
"PAUSE",
"PAUSE_ASYM" ]
class OFPPhyPort(Packet):
name = "OFP_PHY_PORT"
fields_desc = [ ShortEnumField("port_no", 0, ofp_port_no),
MACField("hw_addr", "0"),
StrFixedLenField("port_name", "", 16),
FlagsField("config", 0, 32, ofp_port_config),
BitEnumField("stp_state", 0, 24, ofp_port_state),
FlagsField("state", 0, 8, ofp_port_state),
FlagsField("curr", 0, 32, ofp_port_features),
FlagsField("advertised", 0, 32, ofp_port_features),
FlagsField("supported", 0, 32, ofp_port_features),
FlagsField("peer", 0, 32, ofp_port_features) ]
def extract_padding(self, s):
return "", s
class OFPMatch(Packet):
name = "OFP_MATCH"
fields_desc= [ FlagsField("wildcards1", None, 12, [ "DL_VLAN_PCP",
"NW_TOS" ]),
BitField("nw_dst_mask", None, 6),
BitField("nw_src_mask", None, 6),
FlagsField("wildcards2", None, 8, [ "IN_PORT",
"DL_VLAN",
"DL_SRC",
"DL_DST",
"DL_TYPE",
"NW_PROTO",
"TP_SRC",
"TP_DST" ]),
ShortEnumField("in_port", None, ofp_port_no),
MACField("dl_src", None),
MACField("dl_dst", None),
ShortField("dl_vlan", None),
ByteField("dl_vlan_pcp", None),
XByteField("pad1", None),
ShortField("dl_type", None),
ByteField("nw_tos", None),
ByteField("nw_proto", None),
XShortField("pad2", None),
IPField("nw_src", "0"),
IPField("nw_dst", "0"),
ShortField("tp_src", None),
ShortField("tp_dst", None) ]
def extract_padding(self, s):
return "", s
### with post_build we create the wildcards field bit by bit
def post_build(self, p, pay):
# first 10 bits of an ofp_match are always set to 0
l = ["0"*10]
# when one field has not been declared, it is assumed to be wildcarded
if self.wildcards1 is None:
if self.nw_tos is None: l.append("1")
else: l.append("0")
if self.dl_vlan_pcp is None: l.append("1")
else: l.append("0")
else:
w1 = bin(self.wildcards1)[2:]
l.append("0"*(2-len(w1)))
l.append(w1)
# ip masks use 6 bits each
if self.nw_dst_mask is None:
if self.nw_dst is "0": l.append("111111")
# 0x100000 would be ok too (32-bit IP mask)
else: l.append("0"*6)
else:
m1 = bin(self.nw_dst_mask)[2:]
l.append("0"*(6-len(m1)))
l.append(m1)
if self.nw_src_mask is None:
if self.nw_src is "0": l.append("111111")
else: l.append("0"*6)
else:
m2 = bin(self.nw_src_mask)[2:]
l.append("0"*(6-len(m2)))
l.append(m2)
# wildcards2 works the same way as wildcards1
if self.wildcards2 is None:
if self.tp_dst is None: l.append("1")
else: l.append("0")
if self.tp_src is None: l.append("1")
else: l.append("0")
if self.nw_proto is None: l.append("1")
else: l.append("0")
if self.dl_type is None: l.append("1")
else: l.append("0")
if self.dl_dst is None: l.append("1")
else: l.append("0")
if self.dl_src is None: l.append("1")
else: l.append("0")
if self.dl_vlan is None: l.append("1")
else: l.append("0")
if self.in_port is None: l.append("1")
else: l.append("0")
else:
w2 = bin(self.wildcards2)[2:]
l.append("0"*(8-len(w2)))
l.append(w2)
### In order to write OFPMatch compliant with the specifications,
### if prereq_autocomplete has been set to True
### we assume ethertype=IP or nwproto=TCP when appropriate subfields are provided.
if prereq_autocomplete:
if self.dl_type is None:
if self.nw_src is not "0" or self.nw_dst is not "0" or self.nw_proto is not None or self.nw_tos is not None:
p = p[:22] + struct.pack("!H", 0x0800) + p[24:]
l[-5] = "0"
if self.nw_proto is None:
if self.tp_src is not None or self.tp_dst is not None:
p = p[:22] + struct.pack("!H", 0x0800) + p[24:]
l[-5] = "0"
p = p[:25] + struct.pack("!B", 0x06) + p[26:]
l[-6] = "0"
wild = "".join(l)
pad = ""
i = 0
while i < 32 and wild[i:i+4] == "0000":
pad += "0"
i += 4
ins = binascii.unhexlify(pad + "%x" % int(wild, 2))
p = ins + p[4:]
return p + pay
###################### Actions ######################
class _ofp_action_header(Packet):
name = "Dummy OpenFlow Action Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_action_types = { 0: "OFPAT_OUTPUT",
1: "OFPAT_SET_VLAN_VID",
2: "OFPAT_SET_VLAN_PCP",
3: "OFPAT_STRIP_VLAN",
4: "OFPAT_SET_DL_SRC",
5: "OFPAT_SET_DL_DST",
6: "OFPAT_SET_NW_SRC",
7: "OFPAT_SET_NW_DST",
8: "OFPAT_SET_NW_TOS",
9: "OFPAT_SET_TP_SRC",
10: "OFPAT_SET_TP_DST",
11: "OFPAT_ENQUEUE",
65535: "OFPAT_VENDOR" }
class OFPATOutput(_ofp_action_header):
name = "OFPAT_OUTPUT"
fields_desc = [ ShortEnumField("type", 0, ofp_action_types),
ShortField("len", 8),
ShortEnumField("port", 0, ofp_port_no),
ShortEnumField("max_len", "NO_BUFFER", ofp_max_len) ]
class OFPATSetVLANVID(_ofp_action_header):
name = "OFPAT_SET_VLAN_VID"
fields_desc = [ ShortEnumField("type", 1, ofp_action_types),
ShortField("len", 8),
ShortField("vlan_vid", 0),
XShortField("pad", 0) ]
class OFPATSetVLANPCP(_ofp_action_header):
name = "OFPAT_SET_VLAN_PCP"
fields_desc = [ ShortEnumField("type", 2, ofp_action_types),
ShortField("len", 8),
ByteField("vlan_pcp", 0),
X3BytesField("pad", 0) ]
class OFPATStripVLAN(_ofp_action_header):
name = "OFPAT_STRIP_VLAN"
fields_desc = [ ShortEnumField("type", 3, ofp_action_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPATSetDlSrc(_ofp_action_header):
name = "OFPAT_SET_DL_SRC"
fields_desc = [ ShortEnumField("type", 4, ofp_action_types),
ShortField("len", 16),
MACField("dl_addr", "0"),
XBitField("pad", 0, 48) ]
class OFPATSetDlDst(_ofp_action_header):
name = "OFPAT_SET_DL_DST"
fields_desc = [ ShortEnumField("type", 5, ofp_action_types),
ShortField("len", 16),
MACField("dl_addr", "0"),
XBitField("pad", 0, 48) ]
class OFPATSetNwSrc(_ofp_action_header):
name = "OFPAT_SET_NW_SRC"
fields_desc = [ ShortEnumField("type", 6, ofp_action_types),
ShortField("len", 8),
IPField("nw_addr", "0") ]
class OFPATSetNwDst(_ofp_action_header):
name = "OFPAT_SET_NW_DST"
fields_desc = [ ShortEnumField("type", 7, ofp_action_types),
ShortField("len", 8),
IPField("nw_addr", "0") ]
class OFPATSetNwToS(_ofp_action_header):
name = "OFPAT_SET_TP_TOS"
fields_desc = [ ShortEnumField("type", 8, ofp_action_types),
ShortField("len", 8),
ByteField("nw_tos", 0),
X3BytesField("pad", 0) ]
class OFPATSetTpSrc(_ofp_action_header):
name = "OFPAT_SET_TP_SRC"
fields_desc = [ ShortEnumField("type", 9, ofp_action_types),
ShortField("len", 8),
ShortField("tp_port", 0),
XShortField("pad", 0) ]
class OFPATSetTpDst(_ofp_action_header):
name = "OFPAT_SET_TP_DST"
fields_desc = [ ShortEnumField("type", 10, ofp_action_types),
ShortField("len", 8),
ShortField("tp_port", 0),
XShortField("pad", 0) ]
class OFPATEnqueue(_ofp_action_header):
name = "OFPAT_ENQUEUE"
fields_desc = [ ShortEnumField("type", 11, ofp_action_types),
ShortField("len", 16),
ShortEnumField("port", 0, ofp_port_no),
XBitField("pad", 0, 48),
IntField("queue_id", 0) ]
class OFPATVendor(_ofp_action_header):
name = "OFPAT_VENDOR"
fields_desc = [ ShortEnumField("type", 65535, ofp_action_types),
ShortField("len", 8),
IntField("vendor", 0) ]
ofp_action_cls = { 0: OFPATOutput,
1: OFPATSetVLANVID,
2: OFPATSetVLANPCP,
3: OFPATStripVLAN,
4: OFPATSetDlSrc,
5: OFPATSetDlDst,
6: OFPATSetNwSrc,
7: OFPATSetNwDst,
8: OFPATSetNwToS,
9: OFPATSetTpSrc,
10: OFPATSetTpDst,
11: OFPATEnqueue,
65535: OFPATVendor }
class ActionPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_action_cls.get(t, Raw)(s)
@staticmethod
def _get_action_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = ActionPacketListField._get_action_length(remain)
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain, lst
####################### Queues ######################
class _ofp_queue_property_header(Packet):
name = "Dummy OpenFlow Queue Property Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_queue_property_types = { 0: "OFPQT_NONE",
1: "OFPQT_MIN_RATE" }
class OFPQTNone(_ofp_queue_property_header):
name = "OFPQT_NONE"
fields_desc = [ ShortEnumField("type", 0, ofp_queue_property_types),
ShortField("len", 8),
XIntField("pad", 0) ]
class OFPQTMinRate(_ofp_queue_property_header):
name = "OFPQT_MIN_RATE"
fields_desc = [ ShortEnumField("type", 1, ofp_queue_property_types),
ShortField("len", 16),
XIntField("pad", 0),
ShortField("rate", 0),
XBitField("pad2", 0, 48) ]
ofp_queue_property_cls = { 0: OFPQTNone,
1: OFPQTMinRate }
class QueuePropertyPacketListField(PacketListField):
def m2i(self, pkt, s):
t = struct.unpack("!H", s[:2])[0]
return ofp_queue_property_cls.get(t, Raw)(s)
@staticmethod
def _get_queue_property_length(s):
return struct.unpack("!H", s[2:4])[0]
def getfield(self, pkt, s):
lst = []
l = 0
ret = ""
remain = s
while remain:
l = QueuePropertyPacketListField._get_queue_property_length(remain)
current = remain[:l]
remain = remain[l:]
p = self.m2i(pkt, current)
lst.append(p)
return remain + ret, lst
class OFPPacketQueue(Packet):
def extract_padding(self, s):
return "", s
def post_build(self, p, pay):
if self.properties == []:
p += str(OFPQTNone())
if self.len is None:
l = len(p)+len(pay)
p = p[:4] + struct.pack("!H", l) + p[6:]
return p + pay
name = "OFP_PACKET_QUEUE"
fields_desc = [ IntField("queue_id", 0),
ShortField("len", None),
XShortField("pad", 0),
QueuePropertyPacketListField("properties", [], Packet,
length_from=lambda pkt:pkt.len-8) ]
class QueuePacketListField(PacketListField):
@staticmethod
def _get_queue_length(s):
return struct.unpack("!H", s[4:6])[0]
def getfield(self, pkt, s):
lst = []
l = 0
ret = ""
remain = s
while remain:
l = QueuePacketListField._get_queue_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPPacketQueue(current)
lst.append(p)
return remain + ret, lst
#####################################################
############## OpenFlow 1.0 Messages ################
#####################################################
class _ofp_header(Packet):
name = "Dummy OpenFlow Header"
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2] + struct.pack("!H", l) + p[4:]
return p + pay
ofp_version = { 0x01: "OpenFlow 1.0",
0x02: "OpenFlow 1.1",
0x03: "OpenFlow 1.2",
0x04: "OpenFlow 1.3",
0x05: "OpenFlow 1.4" }
ofp_type = { 0: "OFPT_HELLO",
| |
By nuking the directory,
# the next test run hopefully passes.
path = error.filename
# Be defensive -- only call rmtree if we're sure we aren't removing anything
# valuable.
if path.startswith(test_temp_dir + '/') and os.path.isdir(path):
shutil.rmtree(path)
raise
assert self.old_cwd is not None and self.tmpdir is not None, \
"test was not properly set up"
os.chdir(self.old_cwd)
try:
self.tmpdir.cleanup()
except OSError:
pass
self.old_cwd = None
self.tmpdir = None
def reportinfo(self) -> Tuple[str, int, str]:
return self.file, self.line, self.name
def repr_failure(self, excinfo: Any) -> str:
if excinfo.errisinstance(SystemExit):
# We assume that before doing exit() (which raises SystemExit) we've printed
# enough context about what happened so that a stack trace is not useful.
# In particular, uncaught exceptions during semantic analysis or type checking
# call exit() and they already print out a stack trace.
excrepr = excinfo.exconly()
else:
self.parent._prunetraceback(excinfo)
excrepr = excinfo.getrepr(style='short')
return "data: {}:{}:\n{}".format(self.file, self.line, excrepr)
def find_steps(self) -> List[List[FileOperation]]:
"""Return a list of descriptions of file operations for each incremental step.
The first list item corresponds to the first incremental step, the second for the
second step, etc. Each operation can either be a file modification/creation (UpdateFile)
or deletion (DeleteFile).
Defaults to having two steps if there aern't any operations.
"""
steps = {} # type: Dict[int, List[FileOperation]]
for path, _ in self.files:
m = re.match(r'.*\.([0-9]+)$', path)
if m:
num = int(m.group(1))
assert num >= 2
target_path = re.sub(r'\.[0-9]+$', '', path)
module = module_from_path(target_path)
operation = UpdateFile(module, path, target_path)
steps.setdefault(num, []).append(operation)
for num, paths in self.deleted_paths.items():
assert num >= 2
for path in paths:
module = module_from_path(path)
steps.setdefault(num, []).append(DeleteFile(module, path))
max_step = max(steps) if steps else 2
return [steps.get(num, []) for num in range(2, max_step + 1)]
def module_from_path(path: str) -> str:
path = re.sub(r'\.pyi?$', '', path)
# We can have a mix of Unix-style and Windows-style separators.
parts = re.split(r'[/\\]', path)
assert parts[0] == test_temp_dir
del parts[0]
module = '.'.join(parts)
module = re.sub(r'\.__init__$', '', module)
return module
class TestItem:
"""Parsed test caseitem.
An item is of the form
[id arg]
.. data ..
"""
id = ''
arg = '' # type: Optional[str]
# Text data, array of 8-bit strings
data = None # type: List[str]
file = ''
line = 0 # Line number in file
def __init__(self, id: str, arg: Optional[str], data: List[str],
line: int) -> None:
self.id = id
self.arg = arg
self.data = data
self.line = line
def parse_test_data(raw_data: str, name: str) -> List[TestItem]:
"""Parse a list of lines that represent a sequence of test items."""
lines = ['', '[case ' + name + ']'] + raw_data.split('\n')
ret = [] # type: List[TestItem]
data = [] # type: List[str]
id = None # type: Optional[str]
arg = None # type: Optional[str]
i = 0
i0 = 0
while i < len(lines):
s = lines[i].strip()
if lines[i].startswith('[') and s.endswith(']') and not s.startswith('[['):
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, strip_list(data), i0 + 1))
i0 = i
id = s[1:-1]
arg = None
if ' ' in id:
arg = id[id.index(' ') + 1:]
id = id[:id.index(' ')]
data = []
elif lines[i].startswith('[['):
data.append(lines[i][1:])
elif not lines[i].startswith('--'):
data.append(lines[i])
elif lines[i].startswith('----'):
data.append(lines[i][2:])
i += 1
# Process the last item.
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, data, i0 + 1))
return ret
def strip_list(l: List[str]) -> List[str]:
"""Return a stripped copy of l.
Strip whitespace at the end of all lines, and strip all empty
lines from the end of the array.
"""
r = [] # type: List[str]
for s in l:
# Strip spaces at end of line
r.append(re.sub(r'\s+$', '', s))
while len(r) > 0 and r[-1] == '':
r.pop()
return r
def collapse_line_continuation(l: List[str]) -> List[str]:
r = [] # type: List[str]
cont = False
for s in l:
ss = re.sub(r'\\$', '', s)
if cont:
r[-1] += re.sub('^ +', '', ss)
else:
r.append(ss)
cont = s.endswith('\\')
return r
def expand_variables(s: str) -> str:
return s.replace('<ROOT>', root_dir)
def expand_errors(input: List[str], output: List[str], fnam: str) -> None:
"""Transform comments such as '# E: message' or
'# E:3: message' in input.
The result is lines like 'fnam:line: error: message'.
"""
for i in range(len(input)):
# The first in the split things isn't a comment
for possible_err_comment in input[i].split(' # ')[1:]:
m = re.search(
'^([ENW]):((?P<col>\d+):)? (?P<message>.*)$',
possible_err_comment.strip())
if m:
if m.group(1) == 'E':
severity = 'error'
elif m.group(1) == 'N':
severity = 'note'
elif m.group(1) == 'W':
severity = 'warning'
col = m.group('col')
if col is None:
output.append(
'{}:{}: {}: {}'.format(fnam, i + 1, severity, m.group('message')))
else:
output.append('{}:{}:{}: {}: {}'.format(
fnam, i + 1, col, severity, m.group('message')))
def fix_win_path(line: str) -> str:
r"""Changes Windows paths to Linux paths in error messages.
E.g. foo\bar.py -> foo/bar.py.
"""
line = line.replace(root_dir, root_dir.replace('\\', '/'))
m = re.match(r'^([\S/]+):(\d+:)?(\s+.*)', line)
if not m:
return line
else:
filename, lineno, message = m.groups()
return '{}:{}{}'.format(filename.replace('\\', '/'),
lineno or '', message)
def fix_cobertura_filename(line: str) -> str:
r"""Changes filename paths to Linux paths in Cobertura output files.
E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
"""
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
if not m:
return line
return '{}{}{}'.format(line[:m.start(1)],
m.group('filename').replace('\\', '/'),
line[m.end(1):])
##
#
# pytest setup
#
##
# This function name is special to pytest. See
# https://docs.pytest.org/en/latest/reference.html#initialization-hooks
def pytest_addoption(parser: Any) -> None:
group = parser.getgroup('mypy')
group.addoption('--update-data', action='store_true', default=False,
help='Update test data to reflect actual output'
' (supported only for certain tests)')
group.addoption('--save-failures-to', default=None,
help='Copy the temp directories from failing tests to a target directory')
group.addoption('--mypy-verbose', action='count',
help='Set the verbose flag when creating mypy Options')
# This function name is special to pytest. See
# http://doc.pytest.org/en/latest/writing_plugins.html#collection-hooks
def pytest_pycollect_makeitem(collector: Any, name: str,
obj: object) -> 'Optional[Any]':
"""Called by pytest on each object in modules configured in conftest.py files.
collector is pytest.Collector, returns Optional[pytest.Class]
"""
if isinstance(obj, type):
# Only classes derived from DataSuite contain test cases, not the DataSuite class itself
if issubclass(obj, DataSuite) and obj is not DataSuite:
# Non-None result means this obj is a test case.
# The collect method of the returned DataSuiteCollector instance will be called later,
# with self.obj being obj.
return DataSuiteCollector(name, parent=collector)
return None
def split_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
path: str) -> Iterator['DataDrivenTestCase']:
"""Iterate over raw test cases in file, at collection time, ignoring sub items.
The collection phase is slow, so any heavy processing should be deferred to after
uninteresting tests are filtered (when using -k PATTERN switch).
"""
with open(path, encoding='utf-8') as f:
data = f.read()
cases = re.split('^\[case ([a-zA-Z_0-9]+)'
'(-writescache)?'
'(-only_when_cache|-only_when_nocache)?'
'(-skip)?'
'\][ \t]*$\n', data,
flags=re.DOTALL | re.MULTILINE)
line_no = cases[0].count('\n') + 1
for i in range(1, len(cases), 5):
name, writescache, only_when, skip, data = cases[i:i + 5]
yield DataDrivenTestCase(parent, suite, path,
name=add_test_name_suffix(name, suite.test_name_suffix),
writescache=bool(writescache),
only_when=only_when,
skip=bool(skip),
data=data,
line=line_no)
line_no += data.count('\n') + 1
class DataSuiteCollector(pytest.Class): # type: ignore # inheriting from Any
def collect(self) -> Iterator[pytest.Item]: # type: ignore
"""Called by pytest on each of the object returned from pytest_pycollect_makeitem"""
# obj is the object for which pytest_pycollect_makeitem returned self.
suite = self.obj # type: DataSuite
for f in suite.files:
yield from split_test_cases(self, suite, os.path.join(suite.data_prefix, f))
def add_test_name_suffix(name: str, suffix: str) -> str:
# Find magic suffix of form "-foobar" (used for things like "-skip").
m = re.search(r'-[-A-Za-z0-9]+$', name)
if m:
# Insert suite-specific test name suffix before the magic suffix
# which must be the last thing in the test case name since we
# are using endswith() checks.
magic_suffix = m.group(0)
return name[:-len(magic_suffix)] + suffix + magic_suffix
else:
return name + suffix
def is_incremental(testcase: DataDrivenTestCase) -> bool:
return 'incremental' in testcase.name.lower() or 'incremental' in testcase.file
def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
if any(re.match(r'# flags[2-9]:', line) for line in testcase.input):
return False
for filename, contents in testcase.files:
if os.path.basename(filename).startswith('mypy.ini.'):
return False
return True
class DataSuite:
# option fields - class variables
files = None # type: List[str]
base_path = test_temp_dir
# Allow external users of the test code to override the data prefix
data_prefix = test_data_prefix
required_out_section = False
native_sep = False
# Name suffix automatically | |
# encoding: utf-8
import sys
import math
import itertools
import argparse
import networkx as nx
import logbook
from . import casedata as data
from . import casemaker
from . import similarity as sim
from . import pagerank as pr
from . import termexpand as tex
from . import syntaxscore as stx
def termmap(all_terms, logscale=False):
def _map(termset, label=None):
return casemaker.as_dist_map(
termset, all_terms, fill=1.0,
logscale=logscale,
), termset
return _map
def fn_idfweight(nx_graph, allterms, interpole=True, preinterpole=False, sqrt=False):
if preinterpole:
idfs = casemaker.idfmap_with_interpolation(nx_graph)
else:
idfs = data.idfmap()
cache = {}
def _map(termset, label=None):
if label in cache:
return cache[label], termset
wtmap = dict.fromkeys(allterms, 0.0)
for key in wtmap:
if key in idfs:
idfval = idfs[key]
if sqrt:
idfval = math.sqrt(idfval)
wtmap[key] = idfval
elif interpole:
nei_scores = [
idfs[node] for node
in nx.all_neighbors(nx_graph, key)
if node in idfs
]
if sqrt:
nei_scores = [math.sqrt(v) for v in nei_scores]
if nei_scores:
wtmap[key] = float(sum(nei_scores)) / float(len(nei_scores))
cache[label] = wtmap
return wtmap, termset
return _map
def fn_centrality(nx_graph, preproc=lambda x: x, method=nx.betweenness_centrality):
graph_copy = nx_graph.copy()
graph_copy = preproc(graph_copy)
def _distribution(featured_nodes, label=None):
return method(graph_copy), featured_nodes
return _distribution
def fn_multiply(mapper_a, mapper_b):
def _map(termset, label=None):
map_a, modterms_a = mapper_a(termset, label=label)
map_b, modterms_b = mapper_b(modterms_a, label=label)
assert set(map_a.keys()) == set(map_b.keys()), \
(u','.join(set(map_a.keys()).difference(set(map_b.keys()))) +
u'|' +
u','.join(set(map_b.keys()).difference(set(map_a.keys())))).encode('utf-8')
mulmap = {}
for key in map_a:
mulmap[key] = map_a[key] * map_b[key]
return mulmap, modterms_b
return _map
def fn_inverse(mapper):
def _map(termset, label=None):
mapped, modterms = mapper(termset, label=label)
invmap = {}
for key in mapped:
orig = mapped[key]
if orig == 0.0:
invmap[key] = orig
else:
invmap[key] = 1.0 / mapped[key]
return invmap, modterms
return _map
def fn_expand(nx_graph, allterms, amplify=False, lower_expands=False):
cache = {}
def _expand(termset, key):
termset = tuple(sorted(set(termset)))
if termset not in cache:
print(u'term expanding: {}...'.format(key))
expand, scoremap = tex.populate(
termset, nx_graph,
methods=tex.all_methods,
)
cache[termset] = expand, scoremap
expand, scoremap = cache[termset]
return expand, scoremap
def _map(termset, label=None):
expand, scoremap = _expand(termset, label)
distmap = casemaker.as_dist_map(
expand, allterms, fill=1.0,
)
if amplify:
for term in scoremap:
distmap[term] *= scoremap[term]
if lower_expands:
for term in termset:
distmap[term] *= 0.5
return distmap, expand
return _map
def fn_syntax(allterms, raw_titles, raw_sents):
def _map(termset, label=None):
if label.startswith(u'q'):
subject=True
else:
subject=False
stx_score = stx.syntaxscore(
termset,
raw_titles[label],
raw_sents[label],
subject=subject,
)
distmap = casemaker.as_dist_map(
termset, allterms, fill=1.0,
)
for term in stx_score:
distmap[term] *= stx_score[term]
return distmap, termset
return _map
def labeled_weights(terms, mapped):
score_labels = []
for term in terms:
score_labels.append(u'{}/{}'.format(term, mapped.get(term, 0.0)))
return score_labels
def fn_display(mapper, raw_titles, raw_sents):
def _map(termset, label=None):
mapped, modterms = mapper(termset, label=label)
score_labels = labeled_weights(modterms, mapped)
# print(u'{}:'.format(label))
# print(u'==================')
# print(u'Title: {}'.format(raw_titles[label]))
# print(u'==================')
# print(u'Content: {}'.format(u'/'.join(raw_sents[label])))
# print(u'==================')
# print(u', '.join(score_labels))
# print(u'')
return mapped, modterms
return _map
def fn_cutoff_right():
def _relmap(key_a, rankmap_a, key_b, rankmap_b):
rankmap_b = rankmap_b.copy()
for term, weight_a in rankmap_a.iteritems():
rankmap_b[term] *= weight_a
return rankmap_a, rankmap_b
return _relmap
def fn_double(mapper):
singlecache = {}
def _map(key, termset):
if key not in singlecache:
mapped, modterms = mapper(termset, label=key)
singlecache[key] = mapped, modterms
return singlecache[key]
def _jmap(key_a, termset_a, key_b, termset_b):
mapped_a, modterms_a = _map(key_a, termset_a)
mapped_b, modterms_b = _map(key_b, termset_b)
return mapped_a, modterms_a, mapped_b, modterms_b
return _jmap
def fn_joint_multiply(mapper_x, mapper_y):
def _jmap(key_a, termset_a, key_b, termset_b):
map_xa, modterms_xa, map_xb, modterms_xb = mapper_x(key_a, termset_a, key_b, termset_b)
map_ya, modterms_ya, map_yb, modterms_yb = mapper_y(key_a, modterms_xa, key_b, modterms_xb)
assert set(map_xa.keys()) == set(map_ya.keys()), u','.join(set(map_xa.keys()).symmetric_difference(set(map_ya.keys()))).encode('utf-8')
assert set(map_xb.keys()) == set(map_yb.keys())
mulmap_a = {}
mulmap_b = {}
for key in map_xa:
mulmap_a[key] = map_xa[key] * map_ya[key]
for key in map_xb:
mulmap_b[key] = map_xb[key] * map_yb[key]
return mulmap_a, modterms_yb, mulmap_b, modterms_yb
return _jmap
def fn_joint_cutoff_art(jmapper, by_multiplication=True, reverse=False, termsets=None):
answermap = data.answermap
def _filter(map_filtered, terms_filtered, map_base, terms_base):
map_filtered = map_filtered.copy()
terms_filtered = list(terms_filtered[:])
if by_multiplication:
for term, weight_base in map_base.iteritems():
map_filtered[term] *= weight_base
else:
for term in map_filtered.keys():
if map_base[term] == 0.0:
map_filtered[term] = 0.0
terms_filtered = [t for t in terms_filtered if t in terms_base]
return map_filtered, tuple(terms_filtered)
def _jmap(key_a, termset_a, key_b, termset_b):
map_a, modterms_a, map_b, modterms_b = jmapper(key_a, termset_a, key_b, termset_b)
a_is_art = not key_a.startswith(u'q')
b_is_art = not key_b.startswith(u'q')
if reverse:
a_is_art = not a_is_art
b_is_art = not b_is_art
if a_is_art and not b_is_art:
map_a, modterms_a = _filter(map_a, modterms_a, map_b, modterms_b)
elif b_is_art and not a_is_art:
map_b, modterms_b = _filter(map_b, modterms_b, map_a, modterms_a)
# if a_is_art and not b_is_art and key_a in answermap[key_b]:
# print(u'==========')
# print(u'{}/{} : {}/{}'.format(
# key_b, u','.join(termsets[key_b]),
# key_a, u','.join(termsets[key_a]),
# ))
# print(u'F{}: {}'.format(key_a, u','.join(labeled_weights(modterms_a, map_a))))
# print(u'F{}: {}'.format(key_b, u','.join(labeled_weights(modterms_b, map_b))))
# print(u'==========')
# elif not a_is_art and b_is_art and key_b in answermap[key_a]:
# print(u'==========')
# print(u'{}/{} : {}/{}'.format(
# key_a, u','.join(termsets[key_a]),
# key_b, u','.join(termsets[key_b]),
# ))
# print(u'F{}: {}'.format(key_a, u','.join(labeled_weights(modterms_a, map_a))))
# print(u'F{}: {}'.format(key_b, u','.join(labeled_weights(modterms_b, map_b))))
# print(u'==========')
return map_a, modterms_a, map_b, modterms_b
return _jmap
def fn_joint_idfonly(allterms, single_idfmapper, idfonly='a'):
if idfonly not in ('q', 'a', 'qa'):
raise ValueError('pick from {q, a, qa}')
idf_to_a = 'a' in idfonly
idf_to_q = 'q' in idfonly
def _jmap(key_a, termset_a, key_b, termset_b):
a_is_art = not key_a.startswith(u'q')
b_is_art = not key_b.startswith(u'q')
map_to_a = (a_is_art and idf_to_a) or (not a_is_art and idf_to_q)
map_to_b = (b_is_art and idf_to_a) or (not b_is_art and idf_to_a)
map_a = dict.fromkeys(allterms, 0.0)
map_a.update(dict.fromkeys(termset_a, 1.0))
if map_to_a:
comp_map_a, termset_a = single_idfmapper(termset_a, label=key_a)
map_a.update(comp_map_a)
map_b = dict.fromkeys(allterms, 0.0)
map_b.update(dict.fromkeys(termset_b, 1.0))
if map_to_b:
comp_map_b, termset_b = single_idfmapper(termset_b, label=key_b)
map_b.update(comp_map_b)
return map_a, termset_a, map_b, termset_b
return _jmap
def linkrater(nx_graph, allterms):
_graph = nx_graph.to_undirected()
cache = {}
def _neighbours(term):
if term not in cache:
if term not in _graph:
linked = tuple()
else:
linked = nx.all_neighbors(_graph, term)
cache[term] = linked
return cache[term]
def _factor(base_terms, checked_terms, expanded):
base_set = set(base_terms)
checked_set = set(checked_terms)
targets = base_set.intersection(checked_set)
targets = targets.intersection(expanded)
factor = {}
for term in targets:
neis = set(_neighbours(term))
neis_in_base = base_set.intersection(neis)
neis_in_checked = checked_set.intersection(neis)
if not neis_in_checked:
if not neis_in_base:
factor[term] = 1.0
else:
factor[term] = 0.0
continue
overlap = neis_in_base.intersection(neis_in_checked)
factor[term] = float(len(overlap)) / float(len(neis_in_checked))
assert factor[term] <= 1.0
return factor
allone = dict.fromkeys(allterms, 1.0)
def factor_maker(base_terms, checked_terms, expanded):
if not expanded:
return allone.copy()
factor = _factor(base_terms, checked_terms, expanded)
mapped = allone.copy()
mapped.update(factor)
return mapped
return factor_maker
def fn_joint_bridgehierarchy(nx_graph, allterms, bridge='q', expandother=False):
answermap = data.answermap
if bridge not in ('q', 'a', 'qa'):
raise ValueError('pick from {q, a, qa}')
bridge_from_art = 'a' in bridge
bridge_from_q = 'q' in bridge
hierarchy_graph = nx_graph.copy()
if not expandother:
for src, dest, edgedata in nx_graph.edges(data=True):
if edgedata['label'] not in (u'hyper', u'hyperx'):
hierarchy_graph.remove_edge(src, dest)
cache = {}
def _bridge(fromterms, toterms):
fromterms = list(fromterms)
ordered_fromterms = list(sorted(set(fromterms)))
toterms = list(sorted(set(toterms).difference(fromterms)))
from_as_tuple, to_as_tuple = tuple(ordered_fromterms), tuple(toterms)
if (from_as_tuple, to_as_tuple) in cache:
appends = list(cache[(from_as_tuple, to_as_tuple)])
else:
appends = []
for fromterm in ordered_fromterms:
for toterm in toterms:
if fromterm not in hierarchy_graph or toterm not in hierarchy_graph:
continue
if nx.has_path(hierarchy_graph, fromterm, toterm):
appends.append(toterm)
cache[(from_as_tuple, to_as_tuple)] = tuple(appends)
fromterms.extend(appends)
return tuple(fromterms)
def _jmap(key_a, termset_a, key_b, termset_b):
a_is_q = key_a.startswith(u'q')
b_is_q = key_b.startswith(u'q')
bridge_from_a = (a_is_q and bridge_from_q) or (not a_is_q and bridge_from_art)
bridge_from_b = (b_is_q and bridge_from_q) or (not b_is_q and bridge_from_art)
map_a = dict.fromkeys(allterms, 0.0)
if bridge_from_a:
expand_terms_a = _bridge(termset_a, termset_b)
map_a.update(dict.fromkeys(expand_terms_a, 1.0))
map_b = dict.fromkeys(allterms, 0.0)
if bridge_from_b:
expand_terms_b = _bridge(termset_b, termset_a)
map_b.update(dict.fromkeys(expand_terms_b, 1.0))
if bridge_from_a or bridge_from_b:
if a_is_q and key_b in answermap[key_a]:
print(u'q-{} & a-{}'.format(key_a, key_b))
print(u'====================')
print(u','.join(termset_a) + u'+->' + u','.join(set(expand_terms_a).difference(set(termset_a))))
print(u'====================')
print(u','.join(termset_b) + u'+->' + u','.join(set(expand_terms_b).difference(set(termset_b))))
print(u'')
elif b_is_q and key_a in answermap[key_b]:
print(u'q-{} & a-{}'.format(key_b, key_a))
print(u'====================')
print(u','.join(termset_a) + u'+->' + u','.join(set(expand_terms_a).difference(set(termset_a))))
print(u'====================')
print(u','.join(termset_b) + u'+->' + u','.join(set(expand_terms_b).difference(set(termset_b))))
print(u'')
print(u'')
else:
pass
if bridge_from_a:
termset_a = expand_terms_a
if bridge_from_b:
termset_b = expand_terms_b
return map_a, termset_a, map_b, termset_b
return _jmap
def fn_joint_hierarchyreduce(nx_graph, allterms, termsets):
answermap = data.answermap
hierarchy_graph = nx_graph.copy()
for src, dest, edgedata in nx_graph.edges(data=True):
if edgedata['label'] not in (u'hyper', u'hyperx'):#, u'antecedent_to'):
hierarchy_graph.remove_edge(src, dest)
cache = {}
def _upper(hyper, hypo):
key = (hyper, hypo)
if key not in cache:
cache[key] = nx.has_path(hierarchy_graph, hypo, hyper)
return cache[key]
def _hierarchy_reduce(termset, excepts=tuple()):
reduced_terms = list(termset)
reduced = False
for hyper, hypo in itertools.permutations(termset, 2):
if hyper in excepts:
continue
if hyper == hypo:
continue
if hyper not in hierarchy_graph or hypo not in hierarchy_graph:
continue
if _upper(hyper, hypo) and hyper in reduced_terms:
reduced_terms.remove(hyper)
reduced_terms.append(hypo)
reduced = True
return tuple(reduced_terms), | |
limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) == list):
if(sorted(actual_value, reverse=True) != actual_value):
msg += "List expected to be decremental, but is \"{}\"".format(actual_value);
raise ConstraintError(msg);
def check_name(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) == str):
total_list = [];
for i in range(len(limit)):
if(limit[i] == "a-z"):
total_list += list(string.ascii_lowercase)
elif(limit[i] == "A-Z"):
total_list += list(string.ascii_uppercase)
elif(limit[i] == "0-9"):
total_list += list(string.digits)
else:
total_list += limit[i];
actual_value = list(actual_value)
for j in range(len(actual_value)):
if(actual_value[j] not in total_list):
msg += "Character \"{}\" not allowed as per constrains \"{}\"".format(actual_value[j], limit);
raise ConstraintError(msg);
def accept_decorator(validate_function):
@functools.wraps(validate_function)
def decorator_wrapper(*function_args, **function_args_dicts):
if len(arg_constraints) is not len(function_args):
raise InvalidArgumentNumberError(validate_function.__name__)
if(kwargs_constraints["post_trace"]):
function_name = validate_function.function.function.__name__;
else:
function_name = validate_function.__name__;
for arg_num, (actual_arg, arg_constraint) in enumerate(zip(function_args, arg_constraints)):
if(arg_constraint):
for i in range(len(arg_constraint)//2):
if(arg_constraint[i*2] == "gte"):
check_gte(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "gt"):
check_gt(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "lte"):
check_lte(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "lt"):
check_lt(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "eq"):
check_eq(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "neq"):
check_neq(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "in"):
check_in(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "nin"):
check_nin(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "folder"):
check_folder(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "file"):
check_file(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "inc"):
check_inc(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "dec"):
check_dec(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
if(arg_constraint[i*2] == "name"):
check_name(actual_arg, arg_constraint[i*2+1], function_name, arg_num=arg_num+1);
keys = list(function_args_dicts.keys());
for x in range(len(keys)):
actual_arg = function_args_dicts[keys[x]];
arg_constraint = kwargs_constraints[keys[x]];
if(arg_constraint):
for i in range(len(arg_constraint)//2):
if(arg_constraint[i*2] == "gte"):
check_gte(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "gt"):
check_gt(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "lte"):
check_lte(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "lt"):
check_lt(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "eq"):
check_eq(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "neq"):
check_neq(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "in"):
check_in(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "nin"):
check_nin(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "folder"):
check_folder(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "file"):
check_file(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "inc"):
check_inc(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "dec"):
check_dec(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
if(arg_constraint[i*2] == "name"):
check_name(actual_arg, arg_constraint[i*2+1], function_name, arg_name=keys[x]);
return validate_function(*function_args, **function_args_dicts)
return decorator_wrapper
return accept_decorator
def warning_checks(*arg_constraints, **kwargs_constraints):
def check_gte(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in [int, float]):
if(actual_value < limit):
msg += "Value expected to be greater than equal to \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
if(type(actual_value) in [list, tuple]):
for i in range(len(actual_value)):
if(actual_value[i] < limit):
msg += "List's arg number \"{}\" expected to be greater than equal to \"{}\", but is \"{}\"".format(i+1, limit, actual_value[i]);
ConstraintWarning(msg);
def check_gt(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in [int, float]):
if(actual_value <= limit):
msg += "Value expected to be strictly greater than to \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
if(type(actual_value) in [list, tuple]):
for i in range(len(actual_value)):
if(actual_value[i] <= limit):
msg += "List's arg number \"{}\" expected to be strictly greater than equal to \"{}\", but is \"{}\"".format(i+1, limit, actual_value[i]);
ConstraintWarning(msg);
def check_lte(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in [int, float]):
if(actual_value > limit):
msg += "Value expected to be less than equal to \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
if(type(actual_value) in [list, tuple]):
for i in range(len(actual_value)):
if(actual_value[i] > limit):
msg += "List's arg number \"{}\" expected to be less than equal to \"{}\", but is \"{}\"".format(i+1, limit, actual_value[i]);
ConstraintWarning(msg);
def check_lt(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in [int, float]):
if(actual_value >= limit):
msg += "Value expected to be strictly less than to \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
if(type(actual_value) in [list, tuple]):
for i in range(len(actual_value)):
if(actual_value[i] >= limit):
msg += "List's arg number \"{}\" expected to be strictly less than equal to \"{}\", but is \"{}\"".format(i+1, limit, actual_value[i]);
ConstraintWarning(msg);
def check_eq(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in [int, float, str, list, tuple]):
if(actual_value != limit):
msg += "Value expected to be strictly equal to \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
def check_neq(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in [int, float, str, list, tuple]):
if(actual_value == limit):
msg += "Value expected to be strictly not equal to \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
def check_in(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in list(map(type, limit))):
if(actual_value not in limit):
msg += "Value expected to be one among \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
def check_nin(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) in list(map(type, limit))):
if(actual_value in limit):
msg += "Value expected to be anything except \"{}\", but is \"{}\"".format(limit, actual_value);
ConstraintWarning(msg);
def check_folder(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) == str):
if(not os.path.isdir(actual_value)):
msg = "Folder \"{}\" not found".format(actual_value)
ConstraintWarning(msg);
if(limit == "r"):
if(not os.access(actual_value, os.R_OK)):
msg = "Folder \"{}\" has no read access".format(actual_value)
ConstraintWarning(msg);
if(limit == "w"):
if(not os.access(actual_value, os.W_OK)):
msg = "Folder \"{}\" has no write access".format(actual_value)
ConstraintWarning(msg);
if(type(actual_value) == list):
for i in range(len(actual_value)):
if(not os.path.isdir(actual_value[i])):
msg = "Folder \"{}\" not found".format(actual_value[i])
ConstraintWarning(msg);
if(limit == "r"):
if(not os.access(actual_value[i], os.R_OK)):
msg = "Folder \"{}\" has no read access".format(actual_value[i])
ConstraintWarning(msg);
if(limit == "w"):
if(not os.access(actual_value[i], os.W_OK)):
msg = "Folder \"{}\" has no write access".format(actual_value[i])
ConstraintWarning(msg);
def check_file(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in function \"{}\".\n".format(arg, function_name);
if(arg_name):
arg = arg_name;
msg = "Constraint Mismatch for argument name \"{}\" in function \"{}\".\n".format(arg, function_name);
if(type(actual_value) == str):
if(not os.path.isdir(actual_value)):
msg = "File \"{}\" not found".format(actual_value)
ConstraintWarning(msg);
if(limit == "r"):
if(not os.access(actual_value, os.R_OK)):
msg = "File \"{}\" has no read access".format(actual_value)
ConstraintWarning(msg);
if(limit == "w"):
if(not os.access(actual_value, os.W_OK)):
msg = "File \"{}\" has no write access".format(actual_value)
ConstraintWarning(msg);
if(type(actual_value) == list):
for i in range(len(actual_value)):
if(not os.path.isdir(actual_value[i])):
msg = "File \"{}\" not found".format(actual_value[i])
ConstraintWarning(msg);
if(limit == "r"):
if(not os.access(actual_value[i], os.R_OK)):
msg = "File \"{}\" has no read access".format(actual_value[i])
ConstraintWarning(msg);
if(limit == "w"):
if(not os.access(actual_value[i], os.W_OK)):
msg = "File \"{}\" has no write access".format(actual_value[i])
ConstraintWarning(msg);
def check_inc(actual_value, limit, function_name, arg_num=None, arg_name=None):
if(arg_num):
arg = arg_num;
msg = "Constraint Mismatch for argument number \"{}\" in | |
<reponame>pombredanne/plyara-1
#!/usr/bin/env python
# Copyright 2014 <NAME>
# Copyright 2020 plyara Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse YARA rules and operate over them more easily.
Plyara is a script and library that lexes and parses a file consisting of one more YARA rules into a python
dictionary representation. The goal of this tool is to make it easier to perform bulk operations or transformations of
large sets of YARA rules, such as extracting indicators, updating attributes, and analyzing a corpus. Other applications
include linters and dependency checkers.
"""
import enum
import logging
import string
import tempfile
import re
import ply.lex as lex
import ply.yacc as yacc
from plyara.exceptions import ParseTypeError, ParseValueError
# Initialize the logger
logger = logging.getLogger(__name__)
class ElementTypes(enum.Enum):
"""An enumeration of the element types emitted by the parser to the interpreter."""
RULE_NAME = 1
METADATA_KEY_VALUE = 2
STRINGS_KEY_VALUE = 3
STRINGS_MODIFIER = 4
IMPORT = 5
TERM = 6
SCOPE = 7
TAG = 8
INCLUDE = 9
COMMENT = 10
MCOMMENT = 11
class StringTypes(enum.Enum):
"""String types found in a YARA rule."""
TEXT = 1
BYTE = 2
REGEX = 3
class Parser:
"""Interpret the output of the parser and produce an alternative representation of YARA rules."""
EXCLUSIVE_TEXT_MODIFIERS = {'nocase', 'xor', 'base64'}
COMPARISON_OPERATORS = {'==', '!=', '>', '<', '>=', '<='}
IMPORT_OPTIONS = {'pe',
'elf',
'cuckoo',
'magic',
'hash',
'math',
'dotnet',
'androguard',
'time'}
KEYWORDS = {'all', 'and', 'any', 'ascii', 'at', 'condition',
'contains', 'entrypoint', 'false', 'filesize',
'fullword', 'for', 'global', 'in', 'import',
'include', 'int8', 'int16', 'int32', 'int8be',
'int16be', 'int32be', 'matches', 'meta', 'nocase',
'not', 'or', 'of', 'private', 'rule', 'strings',
'them', 'true', 'uint8', 'uint16', 'uint32', 'uint8be',
'uint16be', 'uint32be', 'wide', 'xor', 'base64', 'base64wide'}
FUNCTION_KEYWORDS = {'uint8', 'uint16', 'uint32', 'uint8be', 'uint16be', 'uint32be'}
def __init__(self, console_logging=False, store_raw_sections=True, meta_as_kv=False):
"""Initialize the parser object.
Args:
console_logging: Enable a stream handler if no handlers exist. (default False)
store_raw_sections: Enable attribute storage of raw section input. (default True)
meta_as_kv: Enable alternate structure for meta section as dictionary. (default False)
"""
self.rules = list()
self.current_rule = dict()
self.string_modifiers = list()
self.imports = set()
self.includes = list()
self.terms = list()
self.scopes = list()
self.tags = list()
self.comments = list()
if console_logging:
self._set_logging()
# adds functionality to track attributes containing raw section data
# in case needed (ie modifying metadata and re-constructing a complete rule
# while maintaining original comments and padding)
self.store_raw_sections = store_raw_sections
self._raw_input = None
self._meta_start = None
self._meta_end = None
self._strings_start = None
self._strings_end = None
self._condition_start = None
self._condition_end = None
self._rule_comments = list()
self._stringnames = set()
# Adds a dictionary representation of the meta section of a rule
self.meta_as_kv = meta_as_kv
self.lexer = lex.lex(module=self, debug=False)
self.parser = yacc.yacc(module=self, debug=False, outputdir=tempfile.gettempdir())
def clear(self):
"""Clear all information about previously parsed rules."""
self.rules.clear()
self.current_rule.clear()
self.string_modifiers.clear()
self.imports.clear()
self.includes.clear()
self.terms.clear()
self.scopes.clear()
self.tags.clear()
self.comments.clear()
self._raw_input = None
self._meta_start = None
self._meta_end = None
self._strings_start = None
self._strings_end = None
self._condition_start = None
self._condition_end = None
self._rule_comments.clear()
self._stringnames.clear()
if self.lexer.lineno > 1:
# Per https://ply.readthedocs.io/en/latest/ply.html#panic-mode-recovery
# This discards the entire parsing stack and resets the parser to its
# initial state.
self.parser.restart()
# Per https://ply.readthedocs.io/en/latest/ply.html#eof-handling
# Be aware that setting more input with the self.lexer.input() method
# does NOT reset the lexer state or the lineno attribute used for
# position tracking.
self.lexer.lineno = 1
@staticmethod
def _set_logging():
"""Set the console logger only if handler(s) aren't already set."""
if not len(logger.handlers):
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
def _add_element(self, element_type, element_value):
"""Accept elements from the parser and uses them to construct a representation of the YARA rule.
Args:
element_type: The element type determined by the parser. Input is one of ElementTypes.
element_value: This is the contents of the element as parsed from the rule.
"""
if element_type == ElementTypes.RULE_NAME:
rule_name, start_line, stop_line = element_value
self.current_rule['rule_name'] = rule_name
self.current_rule['start_line'] = start_line
self.current_rule['stop_line'] = stop_line
if self.store_raw_sections:
if self._meta_start:
self.current_rule['raw_meta'] = self._raw_input[self._meta_start:self._meta_end]
if self._strings_start:
self.current_rule['raw_strings'] = self._raw_input[self._strings_start:self._strings_end]
if self._condition_start:
self.current_rule['raw_condition'] = self._raw_input[self._condition_start:self._condition_end]
self._flush_accumulators()
self.rules.append(self.current_rule)
logger.debug('Adding Rule: {}'.format(self.current_rule['rule_name']))
self.current_rule = dict()
self._stringnames.clear()
elif element_type == ElementTypes.METADATA_KEY_VALUE:
key, value = element_value
if 'metadata' not in self.current_rule:
self.current_rule['metadata'] = [{key: value}]
if self.meta_as_kv:
self.current_rule['metadata_kv'] = {key: value}
else:
self.current_rule['metadata'].append({key: value})
if self.meta_as_kv:
self.current_rule['metadata_kv'][key] = value
elif element_type == ElementTypes.STRINGS_KEY_VALUE:
key, value, string_type = element_value
string_dict = {'name': key, 'value': value, 'type': string_type.name.lower()}
if any(self.string_modifiers):
string_dict['modifiers'] = self.string_modifiers
self.string_modifiers = list()
if 'strings' not in self.current_rule:
self.current_rule['strings'] = [string_dict]
else:
self.current_rule['strings'].append(string_dict)
elif element_type == ElementTypes.STRINGS_MODIFIER:
self.string_modifiers.append(element_value)
elif element_type == ElementTypes.IMPORT:
self.imports.add(element_value)
elif element_type == ElementTypes.INCLUDE:
self.includes.append(element_value)
elif element_type == ElementTypes.TERM:
self.terms.append(element_value)
elif element_type == ElementTypes.SCOPE:
self.scopes.append(element_value)
elif element_type == ElementTypes.TAG:
self.tags.append(element_value)
elif element_type == ElementTypes.COMMENT:
self.comments.append(element_value)
elif element_type == ElementTypes.MCOMMENT:
self.comments.append(element_value)
def _flush_accumulators(self):
"""Add accumulated elements to the current rule and resets the accumulators."""
if any(self.terms):
self.current_rule['condition_terms'] = self.terms
self.terms = list()
if any(self.scopes):
self.current_rule['scopes'] = self.scopes
self.scopes = list()
if any(self.tags):
self.current_rule['tags'] = self.tags
self.tags = list()
if any(self.comments):
self.current_rule['comments'] = self.comments
self.comments = list()
self._meta_start = None
self._meta_end = None
self._strings_start = None
self._strings_end = None
self._condition_start = None
self._condition_end = None
def parse_string(self, input_string):
"""Take a string input expected to consist of YARA rules, and return list of dictionaries representing them.
Args:
input_string: String input expected to consist of YARA rules.
Returns:
dict: All the parsed components of a YARA rule.
"""
self._raw_input = input_string
self.parser.parse(input_string, lexer=self.lexer)
for rule in self.rules:
if any(self.imports):
rule['imports'] = list(self.imports)
if any(self.includes):
rule['includes'] = self.includes
return self.rules
class Plyara(Parser):
"""Define the lexer and the parser rules."""
STRING_ESCAPE_CHARS = {'"', '\\', 't', 'n', 'x'}
tokens = [
'BYTESTRING',
'STRING',
'REXSTRING',
'EQUALS',
'STRINGNAME',
'STRINGNAME_ARRAY',
'STRINGNAME_COUNT',
'STRINGNAME_LENGTH',
'LPAREN',
'RPAREN',
'LBRACK',
'RBRACK',
'LBRACE',
'RBRACE',
'ID',
'BACKSLASH',
'FORWARDSLASH',
'PIPE',
'PLUS',
'SECTIONMETA',
'SECTIONSTRINGS',
'SECTIONCONDITION',
'COMMA',
'GREATERTHAN',
'LESSTHAN',
'GREATEREQUAL',
'LESSEQUAL',
'RIGHTBITSHIFT',
'LEFTBITSHIFT',
'MODULO',
'TILDE',
'XOR_OP', # XOR operator token (from conditions section)
'PERIOD',
'COLON',
'STAR',
'HYPHEN',
'AMPERSAND',
'NEQUALS',
'EQUIVALENT',
'DOTDOT',
'HEXNUM',
'FILESIZE_SIZE',
'NUM',
'COMMENT',
'MCOMMENT'
]
reserved = {
'all': 'ALL',
'and': 'AND',
'any': 'ANY',
'ascii': 'ASCII',
'at': 'AT',
'contains': 'CONTAINS',
'entrypoint': 'ENTRYPOINT',
'false': 'FALSE',
'filesize': 'FILESIZE',
'for': 'FOR',
'fullword': 'FULLWORD',
'global': 'GLOBAL',
'import': 'IMPORT',
'in': 'IN',
'include': 'INCLUDE',
'int8': 'INT8',
'int16': 'INT16',
'int32': 'INT32',
'int8be': 'INT8BE',
'int16be': 'INT16BE',
'int32be': 'INT32BE',
'matches': 'MATCHES',
'nocase': 'NOCASE',
'not': 'NOT',
'of': 'OF',
'or': 'OR',
'private': 'PRIVATE',
'rule': 'RULE',
'them': 'THEM',
'true': 'TRUE',
'wide': 'WIDE',
'uint8': 'UINT8',
'uint16': 'UINT16',
'uint32': 'UINT32',
'uint8be': 'UINT8BE',
'uint16be': 'UINT16BE',
'uint32be': 'UINT32BE',
'xor': 'XOR_MOD', # XOR string modifier token (from strings section)
'base64': 'BASE64',
'base64wide': 'BASE64WIDE'
}
tokens = tokens + list(reserved.values())
# Regular expression rules for simple tokens
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_EQUIVALENT = r'=='
t_NEQUALS = r'!='
t_EQUALS = r'='
t_LBRACE = r'{'
t_PLUS = r'\+'
t_PIPE = r'\|'
t_BACKSLASH = r'\\'
t_FORWARDSLASH = r'/'
t_COMMA = r','
t_GREATERTHAN = r'>'
t_LESSTHAN = r'<'
t_GREATEREQUAL = r'>='
t_LESSEQUAL = r'<='
t_RIGHTBITSHIFT = r'>>'
t_LEFTBITSHIFT = r'<<'
t_MODULO = r'%'
t_TILDE = r'~'
t_XOR_OP = r'\^'
t_PERIOD = r'\.'
t_COLON = r':'
t_STAR = r'\*'
t_LBRACK = r'\['
t_RBRACK = r'\]'
t_HYPHEN = r'\-'
t_AMPERSAND = r'&'
t_DOTDOT = r'\.\.'
states = (
('STRING', 'exclusive', ),
('BYTESTRING', 'exclusive', ),
('REXSTRING', 'exclusive', ),
)
# Complex token handling
def t_RBRACE(self, t):
r'}'
t.value = t.value
self._condition_end = t.lexpos
return t
@staticmethod
def t_NEWLINE(t):
r'(\n|\r\n)+'
t.lexer.lineno += len(t.value)
t.value = t.value
@staticmethod
def t_COMMENT(t):
r'(//[^\n]*)'
return t
@staticmethod
def t_MCOMMENT(t):
r'/\*(.|\n|\r\n)*?\*/'
if '\r\n' in t.value:
t.lexer.lineno += t.value.count('\r\n')
else:
t.lexer.lineno += t.value.count('\n')
return t
@staticmethod
def t_HEXNUM(t):
r'0x[A-Fa-f0-9]+'
t.value = t.value
return t
def t_SECTIONMETA(self, t):
r'meta\s*:'
t.value = t.value
self._meta_start = t.lexpos
t.lexer.section = 'meta'
return t
def t_SECTIONSTRINGS(self, t):
r'strings\s*:'
t.value = t.value
| |
no hard links and meets the criteria for ratio limit/seed limit for deletion
del_tor_cont = 0 # counter for the number of torrents that has no hard links and meets the criteria for ratio limit/seed limit for deletion including contents
num_untag = 0 # counter for number of torrents that previously had no hard links but now have hard links
if self.config.args['tag_nohardlinks']:
util.separator("Tagging Torrents with No Hardlinks", space=False, border=False)
nohardlinks = self.config.nohardlinks
tdel_dict = {} # dictionary to track the torrent names and content path that meet the deletion criteria
root_dir = self.config.root_dir
remote_dir = self.config.remote_dir
for category in nohardlinks:
torrent_list = self.get_torrents({'category': category, 'filter': 'completed'})
if len(torrent_list) == 0:
e = 'No torrents found in the category ('+category+') defined under nohardlinks attribute in the config. ' + \
'Please check if this matches with any category in qbittorrent and has 1 or more torrents.'
# self.config.notify(e, 'Tag No Hard Links', False)
logger.warning(e)
continue
for torrent in alive_it(torrent_list):
tracker = self.config.get_tags([x.url for x in torrent.trackers if x.url.startswith('http')])
if any(tag in torrent.tags for tag in nohardlinks[category]['exclude_tags']):
# Skip to the next torrent if we find any torrents that are in the exclude tag
continue
else:
# Checks for any hard links and not already tagged
if util.nohardlink(torrent['content_path'].replace(root_dir, remote_dir)):
# Will only tag new torrents that don't have noHL tag
if 'noHL' not in torrent.tags:
num_tags += 1
body = []
body += print_line(util.insert_space(f'Torrent Name: {torrent.name}', 3), loglevel)
body += print_line(util.insert_space('Added Tag: noHL', 6), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
body.extend(self.set_tags_and_limits(torrent, nohardlinks[category]["max_ratio"],
nohardlinks[category]["max_seeding_time"], nohardlinks[category]["limit_upload_speed"], tags='noHL'))
attr = {
"function": "tag_nohardlinks",
"title": "Tagging Torrents with No Hardlinks",
"body": "\n".join(body),
"torrent_name": torrent.name,
"torrent_category": torrent.category,
"torrent_tag": 'noHL',
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"],
"torrent_max_ratio": nohardlinks[category]["max_ratio"],
"torrent_max_seeding_time": nohardlinks[category]["max_seeding_time"],
"torrent_limit_upload_speed": nohardlinks[category]["limit_upload_speed"]
}
self.config.send_notifications(attr)
# Cleans up previously tagged noHL torrents
else:
# Deletes torrent with data if cleanup is set to true and meets the ratio/seeding requirements
if (nohardlinks[category]['cleanup'] and torrent.state_enum.is_paused and len(nohardlinks[category]) > 0
and torrent.seeding_time > (nohardlinks[category]["min_seeding_time"]*60)):
tdel_dict[torrent.name] = torrent['content_path'].replace(root_dir, root_dir)
# Checks to see if previous noHL tagged torrents now have hard links.
if (not (util.nohardlink(torrent['content_path'].replace(root_dir, root_dir))) and ('noHL' in torrent.tags)):
num_untag += 1
body = []
body += print_line(f'Previous Tagged noHL Torrent Name: {torrent.name} has hard links found now.', loglevel)
body += print_line(util.insert_space('Removed Tag: noHL', 6), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
body += print_line(f"{'Not Reverting' if dry_run else 'Reverting'} share limits.", loglevel)
restore_max_ratio = tracker["max_ratio"]
restore_max_seeding_time = tracker["max_seeding_time"]
restore_limit_upload_speed = tracker["limit_upload_speed"]
if restore_max_ratio is None: restore_max_ratio = -2
if restore_max_seeding_time is None: restore_max_seeding_time = -2
if restore_limit_upload_speed is None: restore_limit_upload_speed = -1
if not dry_run:
torrent.remove_tags(tags='noHL')
body.extend(self.set_tags_and_limits(torrent, restore_max_ratio, restore_max_seeding_time, restore_limit_upload_speed, restore=True))
if torrent.state == 'pausedUP': torrent.resume()
attr = {
"function": "untag_nohardlinks",
"title": "Untagging Previous Torrents that now have Hard Links",
"body": "\n".join(body),
"torrent_name": torrent.name,
"torrent_category": torrent.category,
"torrent_tag": 'noHL',
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"],
"torrent_max_ratio": restore_max_ratio,
"torrent_max_seeding_time": restore_max_seeding_time,
"torrent_limit_upload_speed": restore_limit_upload_speed
}
self.config.send_notifications(attr)
# loop through torrent list again for cleanup purposes
if (nohardlinks[category]['cleanup']):
for torrent in torrent_list:
t_name = torrent.name
if t_name in tdel_dict.keys() and 'noHL' in torrent.tags:
t_count = self.torrentinfo[t_name]['count']
t_msg = self.torrentinfo[t_name]['msg']
t_status = self.torrentinfo[t_name]['status']
# Double check that the content path is the same before we delete anything
if torrent['content_path'].replace(root_dir, root_dir) == tdel_dict[t_name]:
tracker = self.config.get_tags([x.url for x in torrent.trackers if x.url.startswith('http')])
body = []
body += print_line(util.insert_space(f'Torrent Name: {t_name}', 3), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
body += print_line(util.insert_space("Cleanup: True [No hard links found and meets Share Limits.]", 8), loglevel)
attr = {
"function": "cleanup_tag_nohardlinks",
"title": "Removing NoHL Torrents and meets Share Limits",
"torrent_name": t_name,
"torrent_category": torrent.category,
"cleanup": 'True',
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"],
}
if (os.path.exists(torrent['content_path'].replace(root_dir, root_dir))):
# Checks if any of the original torrents are working
if t_count > 1 and ('' in t_msg or 2 in t_status):
del_tor += 1
attr["torrents_deleted_and_contents"] = False
if not dry_run: self.tor_delete_recycle(torrent, attr)
body += print_line(util.insert_space('Deleted .torrent but NOT content files.', 8), loglevel)
else:
del_tor_cont += 1
attr["torrents_deleted_and_contents"] = True
if not dry_run: self.tor_delete_recycle(torrent, attr)
body += print_line(util.insert_space('Deleted .torrent AND content files.', 8), loglevel)
else:
del_tor += 1
attr["torrents_deleted_and_contents"] = False
if not dry_run: self.tor_delete_recycle(torrent, attr)
body += print_line(util.insert_space('Deleted .torrent but NOT content files.', 8), loglevel)
attr["body"] = "\n".join(body)
self.config.send_notifications(attr)
self.torrentinfo[t_name]['count'] -= 1
if num_tags >= 1:
print_line(f"{'Did not Tag/set' if dry_run else 'Tag/set'} share limits for {num_tags} .torrent{'s.' if num_tags > 1 else '.'}", loglevel)
else:
print_line('No torrents to tag with no hard links.', loglevel)
if num_untag >= 1: print_line(f"{'Did not delete' if dry_run else 'Deleted'} noHL tags / share limits for {num_untag} .torrent{'s.' if num_untag > 1 else '.'}", loglevel)
if del_tor >= 1: print_line(f"{'Did not delete' if dry_run else 'Deleted'} {del_tor} .torrent{'s' if del_tor > 1 else ''} but not content files.", loglevel)
if del_tor_cont >= 1: print_line(f"{'Did not delete' if dry_run else 'Deleted'} {del_tor_cont} .torrent{'s' if del_tor_cont > 1 else ''} AND content files.", loglevel)
return num_tags, num_untag, del_tor, del_tor_cont
def rem_unregistered(self):
dry_run = self.config.args['dry_run']
loglevel = 'DRYRUN' if dry_run else 'INFO'
del_tor = 0
del_tor_cont = 0
num_tor_error = 0
num_untag = 0
tor_error_summary = ''
tag_error = self.config.settings['tracker_error_tag']
cfg_rem_unregistered = self.config.args['rem_unregistered']
cfg_tag_error = self.config.args['tag_tracker_error']
def tag_tracker_error():
nonlocal dry_run, t_name, msg_up, tracker, t_cat, torrent, tag_error, tor_error_summary, num_tor_error
tor_error = ''
tor_error += (util.insert_space(f'Torrent Name: {t_name}', 3)+'\n')
tor_error += (util.insert_space(f'Status: {msg_up}', 9)+'\n')
tor_error += (util.insert_space(f'Tracker: {tracker["url"]}', 8)+'\n')
tor_error += (util.insert_space(f"Added Tag: {tag_error}", 6)+'\n')
tor_error_summary += tor_error
num_tor_error += 1
attr = {
"function": "tag_tracker_error",
"title": "Tag Tracker Error Torrents",
"body": tor_error,
"torrent_name": t_name,
"torrent_category": t_cat,
"torrent_tag": tag_error,
"torrent_status": msg_up,
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"],
}
self.config.send_notifications(attr)
if not dry_run: torrent.add_tags(tags=tag_error)
def del_unregistered():
nonlocal dry_run, loglevel, del_tor, del_tor_cont, t_name, msg_up, tracker, t_cat, t_msg, t_status, torrent
body = []
body += print_line(util.insert_space(f'Torrent Name: {t_name}', 3), loglevel)
body += print_line(util.insert_space(f'Status: {msg_up}', 9), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
attr = {
"function": "rem_unregistered",
"title": "Removing Unregistered Torrents",
"torrent_name": t_name,
"torrent_category": t_cat,
"torrent_status": msg_up,
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"],
}
if t_count > 1:
# Checks if any of the original torrents are working
if '' in t_msg or 2 in t_status:
attr["torrents_deleted_and_contents"] = False
if not dry_run: self.tor_delete_recycle(torrent, attr)
body += print_line(util.insert_space('Deleted .torrent but NOT content files.', 8), loglevel)
del_tor += 1
else:
attr["torrents_deleted_and_contents"] = True
if not dry_run: self.tor_delete_recycle(torrent, attr)
body += print_line(util.insert_space('Deleted .torrent AND content files.', 8), loglevel)
del_tor_cont += 1
else:
attr["torrents_deleted_and_contents"] = True
if not dry_run: self.tor_delete_recycle(torrent, attr)
body += print_line(util.insert_space('Deleted .torrent AND content files.', 8), loglevel)
del_tor_cont += 1
attr["body"] = "\n".join(body)
self.config.send_notifications(attr)
self.torrentinfo[t_name]['count'] -= 1
if cfg_rem_unregistered or cfg_tag_error:
if cfg_tag_error: separator("Tagging Torrents with Tracker Errors", space=False, border=False)
elif cfg_rem_unregistered: separator("Removing Unregistered Torrents", space=False, border=False)
unreg_msgs = [
'UNREGISTERED',
'TORRENT NOT FOUND',
'TORRENT IS NOT FOUND',
'NOT REGISTERED',
'NOT EXIST',
'UNKNOWN TORRENT',
'TRUMP',
'RETITLED',
'TRUNCATED',
'TORRENT IS NOT AUTHORIZED FOR USE ON THIS TRACKER'
]
ignore_msgs = [
'YOU HAVE REACHED THE CLIENT LIMIT FOR THIS TORRENT',
'MISSING PASSKEY',
'MISSING INFO_HASH',
'PASSKEY IS INVALID',
'INVALID PASSKEY',
'EXPECTED VALUE (LIST, DICT, INT OR STRING) IN BENCODED STRING',
'COULD NOT PARSE BENCODED DATA',
'STREAM TRUNCATED'
]
for torrent in self.torrentvalid:
check_tags = util.get_list(torrent.tags)
# Remove any error torrents Tags that are no longer unreachable.
if tag_error in check_tags:
tracker = self.config.get_tags([x.url for x in torrent.trackers if x.url.startswith('http')])
num_untag += 1
body = []
body += print_line(f'Previous Tagged {tag_error} torrent currently has a working tracker.', loglevel)
body += print_line(util.insert_space(f'Torrent Name: {torrent.name}', 3), loglevel)
body += print_line(util.insert_space(f'Removed Tag: {tag_error}', 4), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
if not dry_run: torrent.remove_tags(tags=tag_error)
attr = {
"function": "untag_tracker_error",
"title": "Untagging Tracker Error Torrent",
"body": "\n".join(body),
"torrent_name": torrent.name,
"torrent_category": torrent.category,
"torrent_tag": tag_error,
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"]
}
self.config.send_notifications(attr)
for torrent in self.torrentissue:
t_name = torrent.name
t_cat = self.torrentinfo[t_name]['Category']
t_count = self.torrentinfo[t_name]['count']
t_msg = self.torrentinfo[t_name]['msg']
t_status = self.torrentinfo[t_name]['status']
check_tags = util.get_list(torrent.tags)
try:
for x in torrent.trackers:
if x.url.startswith('http'):
tracker = self.config.get_tags([x.url])
msg_up = x.msg.upper()
# Tag any error torrents
if cfg_tag_error:
if x.status == | |
'''
This file contains method for generating calibration related plots, eg. reliability plots.
References:
[1] <NAME>, <NAME>, <NAME>, and <NAME>. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import torch
from torch.nn import functional as F
from scipy.interpolate import make_interp_spline
plt.rcParams.update({'font.size': 20})
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def _bin_initializer(bin_dict, num_bins=10):
for i in range(num_bins):
bin_dict[i][COUNT] = 0
bin_dict[i][CONF] = 0
bin_dict[i][ACC] = 0
bin_dict[i][BIN_ACC] = 0
bin_dict[i][BIN_CONF] = 0
def _populate_bins(confs, preds, labels, num_bins=10):
bin_dict = {}
for i in range(num_bins):
bin_dict[i] = {}
_bin_initializer(bin_dict, num_bins)
num_test_samples = len(confs)
for i in range(0, num_test_samples):
confidence = confs[i]
prediction = preds[i]
label = labels[i]
binn = int(math.ceil(((num_bins * confidence) - 1)))
bin_dict[binn][COUNT] = bin_dict[binn][COUNT] + 1
bin_dict[binn][CONF] = bin_dict[binn][CONF] + confidence
bin_dict[binn][ACC] = bin_dict[binn][ACC] + \
(1 if (label == prediction) else 0)
for binn in range(0, num_bins):
if (bin_dict[binn][COUNT] == 0):
bin_dict[binn][BIN_ACC] = 0
bin_dict[binn][BIN_CONF] = 0
else:
bin_dict[binn][BIN_ACC] = float(
bin_dict[binn][ACC]) / bin_dict[binn][COUNT]
bin_dict[binn][BIN_CONF] = bin_dict[binn][CONF] / \
float(bin_dict[binn][COUNT])
return bin_dict
def reliability_plot(confs, preds, labels, save_plots_loc, dataset, model, trained_loss, num_bins=15, scaling_related='before', save=False):
'''
Method to draw a reliability plot from a model's predictions and confidences.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
y = []
for i in range(num_bins):
y.append(bin_dict[i][BIN_ACC])
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, bns, align='edge', width=0.05, color='pink', label='Expected')
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Actual')
plt.ylabel('Accuracy')
plt.xlabel('Confidence')
plt.legend()
if save:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'reliability_plot_{}_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.show()
def bin_strength_plot(confs, preds, labels, num_bins=15):
'''
Method to draw a plot for the number of samples in each confidence bin.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
num_samples = len(labels)
y = []
for i in range(num_bins):
n = (bin_dict[i][COUNT] / float(num_samples)) * 100
y.append(n)
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Percentage samples')
plt.ylabel('Percentage of samples')
plt.xlabel('Confidence')
plt.show()
def pos_neg_ece_bins_plot(bins_vec, bins_ece_over, bins_ece_under, bins_ece_over_after, bins_ece_under_after, save_plots_loc, dataset, model, trained_loss,
acc_check=False, scaling_related='before', const_temp=False):
plt.figure(figsize=(10, 8))
plt.scatter(bins_vec, bins_ece_over.cpu(), s=70)
plt.scatter(bins_vec, bins_ece_under.cpu(), s=70)
#plt.scatter(bins_vec, bins_ece_over_after.cpu())
#plt.scatter(bins_vec, bins_ece_under_after.cpu())
plt.xlabel('bins', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=18)
#plt.legend(('over-confidence classes', 'under-confidence classes', 'over-confidence classes after scaling', 'under-confidence classes after scaling'), fontsize=10)
plt.legend(('over-confidence classes', 'under-confidence classes'), fontsize=22)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_under_ece_bins_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def pos_neg_ece_plot(acc, csece_pos, csece_neg, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_related='before', const_temp=False):
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece_pos.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'pos_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece_neg.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'neg_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def ece_acc_plot(acc, csece, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_related='before', const_temp=False, unc=False):
plt.figure(figsize=(10, 8))
plt.scatter(acc, csece.cpu(), s=70)
plt.xlabel('accuracy', fontsize=26)
plt.xticks(fontsize=18)
plt.ylabel('ECE', fontsize=26)
plt.yticks(fontsize=16)
#plt.ylim(0, 0.01)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}_const_temp.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
if acc_check:
if unc:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'uncalibrated_ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=100)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}_acc.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
if unc:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'uncalibrated_ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_acc_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
plt.close()
def ece_iters_plot(scaled_model, save_plots_loc, dataset, model, trained_loss, init_temp, acc_check=False):
plt.figure()
plt.plot(range(scaled_model.iters + 1), scaled_model.ece_list)
plt.plot(range(scaled_model.iters + 1), scaled_model.ece*torch.ones((scaled_model.iters + 1)))
plt.legend(('class-based temp scaling', 'single temp scaling'), fontsize=10)
plt.xlabel('iterations', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('ECE', fontsize=10)
plt.yticks(fontsize=10)
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_{}_{}_{}_acc.pdf'.format(init_temp, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'ece_iters_{}_{}_{}_{}.pdf'.format(init_temp, dataset, model, trained_loss)), dpi=40)
plt.close()
def temp_acc_plot(acc, temp, single_temp, save_plots_loc, dataset, model, trained_loss, acc_check=False, const_temp=False):
plt.figure()
plt.scatter(acc, temp.cpu(), label='Class-based temperature')
plt.plot(acc, single_temp*torch.ones(len(acc)), color='red', label='Single temperature')
plt.xlabel('accuracy', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('Temperature', fontsize=10)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
if const_temp:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}_const_temp.pdf'.format(dataset, model, trained_loss)), dpi=40)
else:
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}_acc.pdf'.format(dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_acc_after_scaling_{}_{}_{}.pdf'.format(dataset, model, trained_loss)), dpi=40)
def diff_ece_plot(acc, csece1, csece2, save_plots_loc, dataset, model, trained_loss, acc_check=False, scaling_type='class_based'):
plt.figure()
plt.scatter(acc, (csece1 - csece2).cpu())
plt.xlabel('accuracy', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('ECE difference', fontsize=10)
plt.yticks(fontsize=10)
plt.axhline(y=0, color='r')
if acc_check:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'diff_{}_ece_acc_after_scaling_{}_{}_{}_acc.pdf'.format(scaling_type, dataset, model, trained_loss)), dpi=40)
else:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'diff_{}_ece_acc_after_scaling_{}_{}_{}.pdf'.format(scaling_type, dataset, model, trained_loss)), dpi=40)
def bins_over_conf_plot(bins, diff, save_plots_loc, dataset, model, trained_loss, scaling_related='before'):
plt.figure()
plt.plot(bins, diff)
plt.xlabel('bins', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('confidence - accuracy', fontsize=10)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'over_conf_bins_{}_scaling_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
def temp_bins_plot(single_T, bins_T, bin_boundaries, save_plots_loc, dataset, model, trained_loss, acc_check=False, const_temp=False, divide='reg_divide', ds='val', version=1, cross_validate='ECE', y_name='Temperature'):
bin_boundaries = torch.linspace(0, bins_T.shape[0], bins_T.shape[0] + 1)
bin_lowers = bin_boundaries[:-1]
plt.figure()
for i in range(bins_T.shape[1]):
#bin_lowers = bin_boundaries[i][:-1]
#x_new = np.linspace(1, bins_T.shape[0], 300)
#a_BSpline = make_interp_spline(bin_lowers, bins_T[:, i].cpu())
#y_new = a_BSpline(x_new)
plt.plot(bin_lowers, bins_T[:, i].cpu(), label='Iteration #{}'.format(i + 1))
#plt.plot(x_new, y_new, label='CBT ({})'.format(cross_validate))
#plt.plot(x_new, y_new, label='Iteration #{}'.format(i + 1))
#plt.plot(bin_lowers, torch.ones(bins_T.shape[0])*single_T, label='Single temperature')
#plt.plot(x_new, torch.ones(len(y_new)) * single_T, label='TS'.format(cross_validate))
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(y_name, fontsize=16)
plt.yticks(fontsize=10)
# plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_bins_{}_iters_{}_{}_{}_ver_{}_{}_{}_{}_smooth.pdf'.format(bins_T.shape[1], dataset, model, trained_loss, version, divide, ds, cross_validate)), dpi=40)
def ece_bin_plot(ece_bin, single_ece_bin, origin_ece_bin, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
origin_ece_bin = [i * 100 for i in origin_ece_bin]
single_ece_bin = [i * 100 for i in single_ece_bin]
ece_bin = [i * 100 for i in ece_bin]
plt.plot(range(len(ece_bin)), origin_ece_bin, label='ECE before scaling')
plt.plot(range(len(ece_bin)), single_ece_bin, label='ECE after single temp scaling')
plt.plot(range(len(ece_bin)), ece_bin, label='ECE after per bin temp scaling')
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('ECE(%)', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model),
'ece_bins_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(dataset, model, trained_loss, version,
divide, ds)), dpi=40)
def logits_diff_bin_plot(logits_diff_bin, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
plt.figure()
plt.plot(range(len(logits_diff_bin)), logits_diff_bin)
plt.xlabel('Bins', fontsize=10)
plt.xticks(fontsize=10)
plt.ylabel('Logits difference', fontsize=10)
plt.yticks(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model),
'logits_diff_bins_{}_{}_{}_ver_{}_{}_{}.pdf'.format(dataset, model, trained_loss, version,
divide, ds)), dpi=40)
def temp_bins_plot2(single_T, single_T2, bins_T, bins_T2, bin_boundaries, bin_boundaries2, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1, y_name='Temperature'):
bin_boundaries = torch.linspace(0, bins_T.shape[0], bins_T.shape[0] + 1)
bin_lowers = bin_boundaries[:-1]
plt.figure()
for i in range(bins_T.shape[1]):
#bin_lowers = bin_boundaries[i][:-1]
#bin_lowers2 = bin_boundaries2[i][:-1]
# x_new = np.linspace(1, bins_T.shape[0], 300)
# a_BSpline = make_interp_spline(bin_lowers, bins_T[:, i].cpu())
# a_BSpline2 = make_interp_spline(bin_lowers, bins_T2[:, i].cpu())
# y_new = a_BSpline(x_new)
# y_new2 = a_BSpline2(x_new)
plt.plot(bin_lowers, bins_T[:, i].cpu(), label='Weights')
plt.plot(bin_lowers, (1 / bins_T2[:, i]).cpu(), label=r'$1/Temperatures$')
# plt.plot(x_new, y_new, label='CBT ResNet-152')
# plt.plot(x_new, y_new2, label='CBT DenseNet-161')
#plt.plot(x_new, y_new, label='Iteration #{}'.format(i))
#plt.plot(bin_lowers, torch.ones(bins_T.shape[0])*single_T, label='Single temperature')
# plt.plot(x_new, torch.ones(len(y_new)) * single_T, label='TS ResNet-152')
# plt.plot(x_new, torch.ones(len(y_new2)) * single_T2, label='TS DenseNet-161')
plt.xlabel('Bins', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel(y_name, fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=10)
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'temp_bins_{}_iters_{}_{}_{}_ver_{}_{}_{}_smooth.pdf'.format(bins_T.shape[1], dataset, model, trained_loss, version, divide, ds)), dpi=40)
def exp_value(confidences, diff):
numerator = (-1 + torch.sqrt(1 + 4 * (1 - confidences) / confidences)) / 2
denominator = (-1 + torch.sqrt(1 + 4 * (1 - (confidences - diff)) / (confidences - diff))) / 2
return numerator, denominator
def plot_temp_different_bins(save_plots_loc):
confidences = torch.linspace(0.61, 1, 40)
#optim_temps = torch.log((1 - confidences) / confidences) / torch.log((1 - (confidences - 0.1)) / (confidences - 0.1))
numerator1, denominator1 = exp_value(confidences, 0.1)
numerator2, denominator2 = exp_value(confidences, 0.05)
numerator3, denominator3 = exp_value(confidences, 0.03)
#numerator4, denominator4 = exp_value(confidences, 0.2)
optim_temps1 = torch.log(numerator1) / torch.log(denominator1)
optim_temps2 = torch.log(numerator2) / torch.log(denominator2)
optim_temps3 = torch.log(numerator3) / torch.log(denominator3)
#optim_temps4 = torch.log(numerator4) / torch.log(denominator4)
plt.figure()
#plt.plot(confidences, optim_temps4, label='\u03B5=0.2')
plt.plot(confidences, optim_temps1, label='\u03B5=0.1')
plt.plot(confidences, optim_temps2, label='\u03B5=0.05')
plt.plot(confidences, optim_temps3, label='\u03B5=0.03')
plt.xlabel('Confidence', fontsize=16)
plt.xticks(fontsize=10)
plt.ylabel('Temperature', fontsize=16)
plt.yticks(fontsize=10)
plt.legend(fontsize=14)
plt.savefig(os.path.join(save_plots_loc, 'temp_movements_between_bins_3_classes.pdf'), dpi=40)
def ece_iters_plot2(single_ece, single_ece2, ece_list1, ece_list2, save_plots_loc, dataset, model, trained_loss, divide='reg_divide', ds='val', version=1):
if len(ece_list1) < len(ece_list2):
ece_list1 = ece_list1 + (len(ece_list2) - len(ece_list1)) * [ece_list1[-1]]
elif len(ece_list1) > len(ece_list2):
ece_list2 = ece_list2 + (len(ece_list1) - len(ece_list2)) * [ece_list2[-1]]
ece_list1 = [i * 100 for i in ece_list1]
ece_list2 = [i * 100 for i in ece_list2]
plt.figure()
plt.plot(range(len(ece_list1)), ece_list1, label='CBT ResNet-152')
plt.plot(range(len(ece_list2)), ece_list2, label='CBT DenseNet-161')
plt.plot(range(len(ece_list1)), torch.ones(len(ece_list1)) * single_ece, label='TS ResNet-152')
plt.plot(range(len(ece_list2)), torch.ones(len(ece_list2)) * single_ece2, label='TS DenseNet-161')
plt.xlabel('Iterations', fontsize=16)
| |
from math import exp
import os, sys # string, # noqa: E401
current_location = os.path.dirname(__file__)
#####################################################################################
# get parameter file name, with installed path
def getDreidingParamFile():
datadir = os.path.join(current_location, "..", "..", "data")
# cwd=os.path.dirname(os.path.abspath(sys.argv[0]))
# datadir=os.path.join(cwd, '..', 'data')
return os.path.join(datadir, "DreidingX6parameters.txt")
# return 'DreidingX6parameters.txt'
#####################################################################################
# get atom mass
def getAtommass(atomtypes):
atommass = []
flag = 0
Forcefieldfile = getDreidingParamFile()
fin = open(Forcefieldfile, "r")
dataline = fin.readline()
while dataline != "" and dataline != "\n" and flag == 0:
words = dataline[0 : len(dataline) - 1] # noqa: E203
if str(words).upper() == "ATOMTYPES":
flag = 1
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
while str(words[0]).upper() != "END":
atype = str(words[0])
amass = eval(words[2])
for i in range(len(atomtypes)):
atomtypeID = atomtypes[i][0]
atomtype = atomtypes[i][1]
if atype == atomtype:
atommass.append([atomtypeID, amass, atomtype])
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
dataline = fin.readline()
fin.close()
# if not assigned
assigned = []
for j in range(len(atommass)):
atomtypeID = atommass[j][0]
assigned.append(atomtypeID)
for i in range(len(atomtypes)):
atomtypeID = atomtypes[i][0]
atomtype = atomtypes[i][1]
if atomtypeID not in assigned:
atype = atomtype[0:2]
for j in range(len(atommass)):
# btypeID = atommass[j][0]
amass = atommass[j][1]
btype = atommass[j][2]
if atype == btype[0:2]:
atommass.append([atomtypeID, amass, atomtype])
break
return atommass
#####################################################################################
# get bond coeffs
def getBondCoeffs(bondtypes):
warning = ""
bondcoeffs = []
flag = 0
Forcefieldfile = getDreidingParamFile()
fin = open(Forcefieldfile, "r")
dataline = fin.readline()
while dataline != "" and dataline != "\n" and flag == 0:
words = dataline[0 : len(dataline) - 1] # noqa: E203
if str(words).upper() == "BOND_STRETCH":
flag = 1
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
while str(words[0]).upper() != "END":
atype1 = str(words[0])
atype2 = str(words[1])
kr = 0.5 * eval(
words[3]
) # here 1/2*kr*(r-r0)^2, in Lammps using kr*(r-r0)^2
r0 = eval(words[4])
for i in range(len(bondtypes)):
bondtypeID = bondtypes[i][0]
atom1type = bondtypes[i][1]
atom2type = bondtypes[i][2]
if atom1type == "H__HB":
atom1type = "H___b"
if atom2type == "H__HB":
atom2type = "H___b"
if atom1type == "H__HA":
atom1type = "H___A"
if atom2type == "H__HA":
atom2type = "H___A"
if (atype1 == atom1type and atype2 == atom2type) or (
atype2 == atom1type and atype1 == atom2type
):
bondcoeffs.append([bondtypeID, kr, r0, atom1type, atom2type])
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
dataline = fin.readline()
fin.close()
# if not assigned
assigned = []
for j in range(len(bondcoeffs)):
bondtypeID = bondcoeffs[j][0]
assigned.append(bondtypeID)
for i in range(len(bondtypes)):
bondtypeID = bondtypes[i][0]
atom1type = bondtypes[i][1]
atom2type = bondtypes[i][2]
if bondtypeID not in assigned:
warning = (
warning
+ " Bondtype "
+ str(bondtypeID)
+ " might be not assigned correctly."
)
flag = 0
a1type = atom1type[0]
a2type = atom2type[0]
for j in range(len(bondcoeffs)):
# btypeID = bondcoeffs[j][0]
kr = bondcoeffs[j][1]
r0 = bondcoeffs[j][2]
b1type = bondcoeffs[j][3]
b2type = bondcoeffs[j][4]
if (a1type == b1type[0] and a2type == b2type[0]) or (
a1type == b2type[0] and a2type == b1type[0]
):
bondcoeffs.append([bondtypeID, kr, r0, atom1type, atom2type])
flag = 1
break
if flag == 0:
bondcoeffs.append([bondtypeID, 350.0, 1.40, atom1type, atom2type])
# sorting
bondcoeffs.sort()
return bondcoeffs, warning
#####################################################################################
# get bond coeffs
def getAngleCoeffs(angletypes):
warning = ""
anglecoeffs = []
flag = 0
Forcefieldfile = getDreidingParamFile()
fin = open(Forcefieldfile, "r")
dataline = fin.readline()
while dataline != "" and dataline != "\n" and flag == 0:
words = dataline[0 : len(dataline) - 1] # noqa: E203
if str(words).upper() == "ANGLE_BEND":
flag = 1
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
while str(words[0]).upper() != "END":
atype = str(words[1])
ksita = 0.5 * eval(words[4])
sita0 = eval(words[5])
for i in range(len(angletypes)):
angletypeID = angletypes[i][0]
atomtype = angletypes[i][1]
if atype == atomtype:
anglecoeffs.append([angletypeID, ksita, sita0, atomtype])
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
dataline = fin.readline()
fin.close()
# if not assigned
assigned = []
for j in range(len(anglecoeffs)):
angletypeID = anglecoeffs[j][0]
assigned.append(angletypeID)
for i in range(len(angletypes)):
angletypeID = angletypes[i][0]
atomtype = angletypes[i][1]
if angletypeID not in assigned:
warning = (
warning
+ " Angletype "
+ str(angletypeID)
+ " might be not assigned correctly."
)
flag = 0
atype = atomtype[0]
for j in range(len(anglecoeffs)):
# btypeID = anglecoeffs[j][0]
ksita = anglecoeffs[j][1]
sita0 = anglecoeffs[j][2]
btype = anglecoeffs[j][3]
if atype == btype[0]:
anglecoeffs.append([angletypeID, ksita, sita0, atomtype])
flag = 1
break
if flag == 0:
anglecoeffs.append([angletypeID, 50.0, 109.4710, atomtype])
# sorting
anglecoeffs.sort()
return anglecoeffs, warning
#####################################################################################
# get dihs coeffs: harmonic
def getDihsCoeffs(dihstypes):
warning = ""
type_done = []
dihscoeffs = []
flag = 0
Forcefieldfile = getDreidingParamFile()
fin = open(Forcefieldfile, "r")
dataline = fin.readline()
while dataline != "" and dataline != "\n" and flag == 0:
words = dataline[0 : len(dataline) - 1] # noqa: E203
if str(words).upper() == "TORSIONS":
flag = 1
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
while str(words[0]).upper() != "END":
atype1 = str(words[1])
atype2 = str(words[2])
atype0 = str(words[0])
atype3 = str(words[3])
# napirs = 1
if len(atype1) > 2 and len(atype2) > 2:
a1third = atype1[2]
a2third = atype2[2]
if a1third == "R":
a1third = "2"
if a2third == "R":
a2third = "2"
if (a1third in ["2", "3"]) and (a2third in ["2", "3"]):
npairs = eval(a1third) * eval(a2third)
else:
npairs = 0
sys.exit("Error: Torsion bond has no torsion stiffness.")
kv = 0.5 * 1.0 / npairs * eval(words[5])
nv = eval(words[6])
dv = (-1) * eval(
words[7]
) # Lammps use a different equation for torsion,d has a opposite sign.
for i in range(len(dihstypes)):
dihstypeID = dihstypes[i][0]
atom1type = dihstypes[i][1]
atom2type = dihstypes[i][2]
atom3type = dihstypes[i][3]
atom4type = dihstypes[i][4]
if atom2type == "O_3" or atom3type == "O_3":
atom1type = "X"
atom4type = "X"
if atom2type == "C_R" or atom3type == "C_R":
atom1type = "X"
atom4type = "X"
if atom2type == "N_R" or atom3type == "C_2":
atom1type = "X"
atom4type = "X"
if atom2type == "N_2" or atom3type == "C_2":
atom1type = "X"
atom4type = "X"
if (
atype0[0:3] == atom1type[0:3]
and atype1 == atom2type
and atype2 == atom3type
and atype3[0:3] == atom4type[0:3]
):
if dihstypeID not in type_done:
type_done.append(dihstypeID)
dihscoeffs.append(
[
dihstypeID,
kv,
nv,
dv,
atom1type,
atom2type,
atom3type,
atom4type,
]
)
if (
atype0[0:3] == atom4type[0:3]
and atype2 == atom2type
and atype1 == atom3type
and atype3[0:3] == atom1type[0:3]
):
if dihstypeID not in type_done:
type_done.append(dihstypeID)
dihscoeffs.append(
[
dihstypeID,
kv,
nv,
dv,
atom1type,
atom2type,
atom3type,
atom4type,
]
)
dataline = fin.readline()
words = str.split(dataline[0 : len(dataline) - 1]) # noqa: E203
dataline = fin.readline()
fin.close()
# if not assigned
assigned = []
for j in range(len(dihscoeffs)):
dihstypeID = dihscoeffs[j][0]
assigned.append(dihstypeID)
for i in range(len(dihstypes)):
dihstypeID = dihstypes[i][0]
atom3type = dihstypes[i][3]
atom2type = dihstypes[i][2]
atom1type = dihstypes[i][4]
atom4type = dihstypes[i][1]
if dihstypeID not in assigned:
warning = (
warning
+ " Dihstype "
+ str(dihstypeID)
+ " might be not assigned correctly."
)
zero_kv_atomtypes = [
"H_",
"C_1",
"N_1",
"O_1",
"F_",
"Cl",
"Br",
"I_",
"Na",
"Ca",
"Fe",
"Zn",
]
if (atom2type in zero_kv_atomtypes) or (atom3type in zero_kv_atomtypes):
dihscoeffs.append(
[
dihstypeID,
0,
3.0,
1.0,
atom1type,
atom2type,
atom3type,
atom4type,
]
)
else:
flag = 0
a3type = atom3type[0]
a2type = atom2type[0]
for j in range(len(dihscoeffs)):
# btypeID = dihscoeffs[j][0]
kv = dihscoeffs[j][1]
nv = dihscoeffs[j][2]
dv = dihscoeffs[j][3]
b3type = dihscoeffs[j][6]
b2type = dihscoeffs[j][5]
if (a3type == b3type[0] and a2type == b2type[0]) or (
a3type == b2type[0] and a2type == b3type[0]
):
dihscoeffs.append(
[
dihstypeID,
kv,
nv,
dv,
atom1type,
atom2type,
atom3type,
atom4type,
]
)
flag = 1
break
if flag == 0:
dihscoeffs.append(
[
dihstypeID,
0.11111,
3.0,
1.0,
atom1type,
atom2type,
atom3type,
atom4type,
]
)
# sorting
dihscoeffs.sort()
return dihscoeffs, warning
#####################################################################################
# get bond coeffs
def getImpsCoeffs(impstypes):
warning = ""
impscoeffs = []
imps_nonzero = []
flag = 0
Forcefieldfile = getDreidingParamFile()
fin = open(Forcefieldfile, "r")
dataline = fin.readline()
while dataline != "" and dataline != "\n" and flag == 0:
words = dataline[0 : len(dataline) - 1] # noqa: E203
if str(words).upper() == | |
try:
self.state = 335
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 331
self.gTerm()
self.state = 332
self.gTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CheckSynthCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SygusParser.RULE_checkSynthCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCheckSynthCmd" ):
listener.enterCheckSynthCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCheckSynthCmd" ):
listener.exitCheckSynthCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCheckSynthCmd" ):
return visitor.visitCheckSynthCmd(self)
else:
return visitor.visitChildren(self)
def checkSynthCmd(self):
localctx = SygusParser.CheckSynthCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_checkSynthCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 337
self.match(SygusParser.T__0)
self.state = 338
self.match(SygusParser.T__18)
self.state = 339
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ConstraintCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_constraintCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConstraintCmd" ):
listener.enterConstraintCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConstraintCmd" ):
listener.exitConstraintCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConstraintCmd" ):
return visitor.visitConstraintCmd(self)
else:
return visitor.visitChildren(self)
def constraintCmd(self):
localctx = SygusParser.ConstraintCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_constraintCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 341
self.match(SygusParser.T__0)
self.state = 342
self.match(SygusParser.T__19)
self.state = 343
self.term()
self.state = 344
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SynthFunCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def nTDefPlus(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_synthFunCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSynthFunCmd" ):
listener.enterSynthFunCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSynthFunCmd" ):
listener.exitSynthFunCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSynthFunCmd" ):
return visitor.visitSynthFunCmd(self)
else:
return visitor.visitChildren(self)
def synthFunCmd(self):
localctx = SygusParser.SynthFunCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_synthFunCmd)
try:
self.state = 363
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,15,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(SygusParser.T__0)
self.state = 347
self.match(SygusParser.T__20)
self.state = 348
self.symbol()
self.state = 349
self.argList()
self.state = 350
self.sortExpr()
self.state = 351
self.match(SygusParser.T__0)
self.state = 352
self.nTDefPlus()
self.state = 353
self.match(SygusParser.T__2)
self.state = 354
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 356
self.match(SygusParser.T__0)
self.state = 357
self.match(SygusParser.T__20)
self.state = 358
self.symbol()
self.state = 359
self.argList()
self.state = 360
self.sortExpr()
self.state = 361
self.match(SygusParser.T__2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def literal(self):
return self.getTypedRuleContext(SygusParser.LiteralContext,0)
def gTermStar(self):
return self.getTypedRuleContext(SygusParser.GTermStarContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def letGTerm(self):
return self.getTypedRuleContext(SygusParser.LetGTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTerm" ):
listener.enterGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTerm" ):
listener.exitGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTerm" ):
return visitor.visitGTerm(self)
else:
return visitor.visitChildren(self)
def gTerm(self):
localctx = SygusParser.GTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_gTerm)
try:
self.state = 393
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 365
self.symbol()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 366
self.literal()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 367
self.match(SygusParser.T__0)
self.state = 368
self.symbol()
self.state = 369
self.gTermStar()
self.state = 370
self.match(SygusParser.T__2)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 372
self.match(SygusParser.T__0)
self.state = 373
self.match(SygusParser.T__21)
self.state = 374
self.sortExpr()
self.state = 375
self.match(SygusParser.T__2)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 377
self.match(SygusParser.T__0)
self.state = 378
self.match(SygusParser.T__22)
self.state = 379
self.sortExpr()
self.state = 380
self.match(SygusParser.T__2)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 382
self.match(SygusParser.T__0)
self.state = 383
self.match(SygusParser.T__23)
self.state = 384
self.sortExpr()
self.state = 385
self.match(SygusParser.T__2)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 387
self.match(SygusParser.T__0)
self.state = 388
self.match(SygusParser.T__24)
self.state = 389
self.sortExpr()
self.state = 390
self.match(SygusParser.T__2)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 392
self.letGTerm()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetGTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTermPlus(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusContext,0)
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letGTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetGTerm" ):
listener.enterLetGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetGTerm" ):
listener.exitLetGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetGTerm" ):
return visitor.visitLetGTerm(self)
else:
return visitor.visitChildren(self)
def letGTerm(self):
localctx = SygusParser.LetGTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_letGTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 395
self.match(SygusParser.T__0)
self.state = 396
self.match(SygusParser.T__17)
self.state = 397
self.match(SygusParser.T__0)
self.state = 398
self.letBindingGTermPlus()
self.state = 399
self.match(SygusParser.T__2)
self.state = 400
self.gTerm()
self.state = 401
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermContext,0)
def letBindingGTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTermPlus" ):
listener.enterLetBindingGTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTermPlus" ):
listener.exitLetBindingGTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTermPlus" ):
return visitor.visitLetBindingGTermPlus(self)
else:
return visitor.visitChildren(self)
def letBindingGTermPlus(self):
localctx = SygusParser.LetBindingGTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_letBindingGTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 403
self.letBindingGTerm()
self.state = 404
self.letBindingGTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermContext,0)
def letBindingGTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTermPlusTail" ):
listener.enterLetBindingGTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTermPlusTail" ):
listener.exitLetBindingGTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTermPlusTail" ):
return visitor.visitLetBindingGTermPlusTail(self)
else:
return visitor.visitChildren(self)
def letBindingGTermPlusTail(self):
localctx = SygusParser.LetBindingGTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_letBindingGTermPlusTail)
try:
self.state = 410
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 406
self.letBindingGTerm()
self.state = 407
self.letBindingGTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTerm" ):
listener.enterLetBindingGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTerm" ):
listener.exitLetBindingGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTerm" ):
return visitor.visitLetBindingGTerm(self)
else:
return visitor.visitChildren(self)
def letBindingGTerm(self):
localctx = SygusParser.LetBindingGTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_letBindingGTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 412
self.match(SygusParser.T__0)
self.state = 413
self.symbol()
self.state = 414
self.sortExpr()
self.state = 415
self.gTerm()
self.state = 416
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermStar(self):
return self.getTypedRuleContext(SygusParser.GTermStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermStar" ):
listener.enterGTermStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermStar" ):
listener.exitGTermStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermStar" ):
return visitor.visitGTermStar(self)
else:
return visitor.visitChildren(self)
def gTermStar(self):
localctx = SygusParser.GTermStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_gTermStar)
try:
self.state = 422
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 418
self.gTerm()
self.state = 419
self.gTermStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SynthInvCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def nTDefPlus(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_synthInvCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSynthInvCmd" ):
listener.enterSynthInvCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSynthInvCmd" ):
listener.exitSynthInvCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSynthInvCmd" ):
return visitor.visitSynthInvCmd(self)
else:
return visitor.visitChildren(self)
def synthInvCmd(self):
localctx = SygusParser.SynthInvCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_synthInvCmd)
try:
self.state = 439
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,19,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = | |
# Version 3.1; <NAME>; Polar Geospatial Center, University of Minnesota; 2018
from __future__ import division
import inspect
import os
import re
import sys
import warnings
from glob import glob
from warnings import warn
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
import ogr, osr
import numpy as np
from PIL import Image
from scipy.misc import imread as scipy_imread
from tifffile import imread, imsave
from batch_scenes2strips import getDemSuffix
import lib.raster_array_tools as rat
from testing import TESTDIR, PREFIX_RUNNUM
warnings.simplefilter('always', UserWarning)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class TestingError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
def stringifyThisFunctionForExec(*args):
exec_script = ''
this_funcName = inspect.stack()[0][3]
this_funcReturn = 'return {}('.format(this_funcName)
caller_funcName = inspect.stack()[1][3]
caller_funcDef = 'def {}('.format(caller_funcName)
this_file_fp = open(__file__.replace('.pyc', '.py'), 'r')
this_file_txt = this_file_fp.read()
this_file_fp.close()
this_file_fp = StringIO(this_file_txt)
line = this_file_fp.readline()
indent = ''
# Find the function definition in this file.
found = False
while not found and line != '':
if line.startswith(caller_funcDef):
found = True
line = this_file_fp.readline()
if not found:
raise TestingError("Could not find function definition matching '{}'".format(caller_funcDef))
# Find the return statement that called this function.
found = False
while not found and line != '':
if line.lstrip().startswith(this_funcReturn):
found = True
# Assuming the return statement is indented once beyond the function definition,
# capture the indentation schema so that one indent may be removed from every line of
# the string of code that is returned.
indent = line[:line.find(this_funcReturn)]
line = this_file_fp.readline()
if not found:
raise TestingError("Could not find return statement matching '{}' within function '{}'".format(
this_funcReturn, this_funcName))
# Add all code that follows that first return statement to a string variable,
# stopping when the next function definition is read or EOF is reached.
done = False
while not done and line != '':
if line.startswith('def '):
done = True
else:
exec_script += line.replace(indent, '', 1)
line = this_file_fp.readline()
this_file_fp.close()
# Place all arguments into their proper places in the script.
# NOTE: Arguments must be evaluated to perform these substitutions, SO BE CAREFUL!!
for i, arg in enumerate(args):
exec_script = exec_script.replace('__arg{}__'.format(i), arg)
return exec_script
def cv():
"""
Check Vars
*** to be executed while debugging ***
"""
return stringifyThisFunctionForExec()
cv_test_vars = None
cv_test_var = None
cv_test_expr = None
cv_test_var_shape = None
cv_test_vars = (
'x', 'y', 'z', 'm', 'o', 'md', '-',
'X', 'Y', 'Z', 'M', 'O', '-',
'Xsub', 'Ysub', 'Zsub', 'Msub', 'Osub'
)
print('')
for cv_test_var in cv_test_vars:
if cv_test_var in vars():
cv_test_expr = 'str({}.dtype)'.format(cv_test_var)
print('> {}.dtype = {}'.format(cv_test_var, eval(cv_test_expr)))
cv_test_expr = '{}.shape'.format(cv_test_var)
cv_test_var_shape = eval(cv_test_expr)
if len(cv_test_var_shape) == 1:
cv_test_var_shape = (1, cv_test_var_shape[0])
print(' shape = {}'.format(str(cv_test_var_shape).replace('L', '')))
cv_test_expr = 'np.nanmin({})'.format(cv_test_var)
print(' min = {}'.format(eval(cv_test_expr)))
cv_test_expr = 'np.nanmax({})'.format(cv_test_var)
print(' max = {}'.format(eval(cv_test_expr)))
elif cv_test_var == '-':
print('------------------')
print('')
del cv_test_vars, cv_test_var, cv_test_var_shape, cv_test_expr
def sg(varNames_csv):
"""
Set Globals
*** to be executed while debugging ***
::
varNames_csv must be a comma-delimited string of variable names
accessible in the current namespace.
"""
if not isinstance(varNames_csv, str):
raise InvalidArgumentError("`varNames_csv` must be a string")
return stringifyThisFunctionForExec('"{}"'.format(varNames_csv))
sg_varNames_list = None
sg_testVname = None
sg_i = None
sg_v = None
if 'sg_testVnames_list' in vars():
for sg_v in sg_testVnames_list:
exec('del {}'.format(sg_v))
sg_testVnames_list = []
sg_varNames_list = __arg0__.split(',')
for sg_i, sg_v in enumerate(sg_varNames_list):
sg_testVname = '{}{}_{}'.format('sg_testVar_', sg_i, sg_v.strip())
exec('global {}'.format(sg_testVname))
exec('{} = {}'.format(sg_testVname, sg_v))
sg_testVnames_list.append(sg_testVname)
del sg_varNames_list, sg_testVname, sg_i, sg_v
def getTestVarsFromGlobals(debug_globals):
testVname_pattern_str = "{}\d+_(.+)".format('sg_testVar_')
testVname_pattern = re.compile(testVname_pattern_str)
testVar_names = []
testVar_values = []
g_keys = debug_globals.keys()
g_keys.sort()
for varName in g_keys:
m = re.match(testVname_pattern, varName)
if m is not None:
testVar_names.append(m.group(1))
testVar_values.append(debug_globals[varName])
return testVar_names, testVar_values
def splitTupleString(tup_string):
tup_pattern = re.compile("\((.*)\)")
search_result = re.search(tup_pattern, tup_string)
if search_result is None:
return None
else:
return tuple(element.strip() for element in search_result.group(1).split(','))
def splitArgsString(args_string):
lefts = '([{'
rights = ')]}'
lefts_count = np.array([0, 0, 0])
rights_count = np.array([0, 0, 0])
quotes = ("'", '"')
curr_string_type = -1
args = []
arg_start_index = 0
i = 0
for c in args_string:
if curr_string_type == -1:
if c == ',':
if np.array_equal(lefts_count, rights_count):
args.append(args_string[arg_start_index:i])
arg_start_index = i + 1
elif c in lefts:
lefts_count[lefts.find(c)] += 1
elif c in rights:
rights_count[rights.find(c)] += 1
elif c in quotes:
curr_string_type = quotes.index(c)
elif c == quotes[curr_string_type]:
# We've reached the end of a string.
curr_string_type = -1
i += 1
if arg_start_index < i:
args.append(args_string[arg_start_index:i])
return tuple(a.strip() for a in args)
# Doesn't work correctly in newest release of Python2.7... :'(
def getCalledFunctionArgs(depth=1, funcName=None):
stack = inspect.stack()
func_frame_record = None
try:
if depth == 0 or depth == float('inf'):
if funcName is None:
raise InvalidArgumentError("`funcName` must be provided when depth is not certain")
stack_iterable = range(len(stack))
if depth != 0:
stack_iterable = reversed(stack_iterable)
for i in stack_iterable:
fr_funcName = stack[i][3]
if fr_funcName == funcName:
func_frame_record = stack[i+1]
break
if func_frame_record is None:
raise InvalidArgumentError("`funcName` '{}' could not be found in the stack".format(funcName))
else:
try:
func_frame_record = stack[depth+1]
except IndexError:
raise InvalidArgumentError("Invalid `depth` index for stack: {}".format(depth))
if funcName is not None and stack[depth][3] != funcName:
raise InvalidArgumentError("`funcName` '{}' could not be found in the stack "
"at index {}".format(funcName, depth))
funcCall = ''.join([str(line).strip() for line in func_frame_record[4]])
except:
print("STACK AT ERROR:")
for fr in stack:
print(fr)
raise
args_pattern_str = "\w" if funcName is None else funcName
args_pattern_str += "\s*\((.+)\)"
args_pattern = re.compile(args_pattern_str)
search_result = re.search(args_pattern, funcCall)
if search_result is None:
return ()
else:
args_string = search_result.group(1)
return splitArgsString(args_string)
def findTestFile(fname_or_file):
testFile = fname_or_file
if not os.path.isfile(testFile):
testFile = fname_or_file+'.tif'
if not os.path.isfile(testFile):
testFile = os.path.join(TESTDIR, fname_or_file)
if not os.path.isfile(testFile):
testFile += '.tif'
if not os.path.isfile(testFile):
raise InvalidArgumentError("Cannot find `testFile`: '{}'".format(fname_or_file))
return testFile
def getRunnum():
runnum = -1
runnumFiles = glob(os.path.join(TESTDIR, PREFIX_RUNNUM+'*'))
try:
if len(runnumFiles) == 0:
runnum = setRunnum()
elif len(runnumFiles) == 1:
runnum_fname = os.path.basename(runnumFiles[0])
runnum = int(runnum_fname[15:18])
else:
raise ValueError
except ValueError:
raise TestingError(
"One dummy file must exist in the test directory"
" with a name indicating the current runnum for comparison!"
" e.g. 'CURRENT_RUNNUM_001'"
)
return runnum
def getLastRunnum():
testFiles = glob(os.path.join(TESTDIR, 'run*'))
runnums = [int(os.path.basename(f)[3:6]) for f in testFiles]
last_runnum = max(runnums) if len(testFiles) > 0 else None
return last_runnum
def setRunnum(new_runnum=None, increment=False, concurrent=False):
if new_runnum is None:
new_runnum = getLastRunnum()
if new_runnum is None:
new_runnum = 0
if increment:
new_runnum += 1
runnumFile_new = os.path.join(TESTDIR, PREFIX_RUNNUM+'{:03d}{}'.format(new_runnum, '_CC'*concurrent))
runnumFiles = glob(os.path.join(TESTDIR, PREFIX_RUNNUM+'*'))
if len(runnumFiles) == 0:
runnumFile_fp = open(runnumFile_new, 'w')
runnumFile_fp.close()
elif len(runnumFiles) == 1:
runnumFile_current = runnumFiles[0]
if concurrent:
runnumFname_current = os.path.basename(runnumFile_current)
if '_CC' in runnumFname_current:
runnumFile_new = os.path.join(TESTDIR, runnumFname_current.replace('_CC', ''))
os.rename(runnumFile_current, runnumFile_new)
else:
getRunnum() # Get error message from this function.
return new_runnum
def incRunnum(concurrent=False):
return setRunnum(increment=True, concurrent=concurrent)
def getNextImgnum(runnum=None, compare=False, concurrent=False):
if runnum is None:
runnum = getRunnum()
next_imgnum = -1
testFiles = glob(os.path.join(TESTDIR, 'run{:03d}_*'.format(runnum)))
if concurrent:
testFiles = [f for f in testFiles if '_py_' in f]
if len(testFiles) == 0:
next_imgnum = 1 if not compare else None
else:
imgnums = [int(os.path.basename(f)[7:10]) for f in testFiles]
next_imgnum = max(imgnums)+1 if not compare else max(imgnums)
return next_imgnum
def validateTestFileSave(path, allow_existing=False):
if os.path.basename(path) == path:
path_full = os.path.join(TESTDIR, path)
if not os.path.isdir(TESTDIR):
print("Creating 'testFiles' directory: {}".format(TESTDIR))
print("Modify `testing` module init file to change directory location");
os.makedirs(TESTDIR)
else:
path_full = path
if not allow_existing:
while os.path.isfile(path_full):
opt = input("Test file '{}' already exists. "
"Overwrite/append? (y/n): ".format(path_full.replace(TESTDIR, '{TESTDIR}')))
if opt.strip().lower() == 'y':
break
else:
opt = input("Append description to filename (or press [ENTER] to cancel): ")
if opt == '':
return None
else:
path_fname_root, path_ext = os.path.splitext(path_full)
path_full = '{}~{}{}'.format(path_fname_root, opt.replace(' ', '-'), path_ext)
return path_full
def interpretImageRasterFlavor(flavor):
flavor_name = ''
image_PILmode = None
raster_format = None
raster_nodata = None
if flavor is not None and flavor != '':
if flavor in ('dem', 'dem_array', 'z', 'Z', 'Zsub'):
flavor_name = 'dem'
image_PILmode = 'F'
raster_format = 'float32'
raster_nodata = -9999
elif flavor in ('match', 'match_array', 'm', 'M', 'Msub'):
flavor_name = 'match'
image_PILmode = 'L'
raster_format = 'uint8'
raster_nodata = 0
elif flavor in ('ortho', 'ortho_array', 'o', 'or', 'O', 'Osub'):
flavor_name = 'ortho'
image_PILmode = 'I'
raster_format = 'int16'
raster_nodata = 0
elif flavor in ('mask', 'mask_array'):
flavor_name = 'mask'
image_PILmode = 'L'
raster_format = 'uint8'
raster_nodata = 0
| |
<filename>examples/Efficient Cavity Control with SNAP Gates.py
#!/usr/bin/env python
# coding: utf-8
# # Introduction
#
# This tutorial reproduces part of [<NAME> et al. (2020)](https://arxiv.org/abs/2004.14256),
# titled **Efficient cavity control with SNAP gates**. The general
# idea is to start in an initial state and apply a sequence of
# operators to reach the desired target state.
#
# In the paper, the authors use a vacuum state, $|0 \rangle$,
# with a Hilbert space cutoff of 10.
#
# Authors apply sequence of operators as blocks, $\hat{B}$,
# where
#
# \begin{equation}
# \hat B = D(\alpha) \hat S(\vec \theta) D(\alpha)^{\dagger}
# \end{equation}
#
#
# In the equation above, $D(\alpha)$ is a Displace operation
# given by
#
# \begin{equation}
# D(\alpha) = e^{\alpha a^{\dagger} - \alpha^* \hat a}
# \end{equation}
#
# where $\alpha$ is the complex displacement parameter,
# and $a^{\dagger}$ and $\hat a$ are the bosonic creation
# and annihilation operators respectively.
#
# $S(\vec \theta)$ in the block definition of $\hat{B}$
# above is the selective number-dependent-snap arbitrary
# phase (SNAP) gate defined as
#
# \begin{equation}\label{snap-gate}
# \hat S(\vec{\theta}) = \sum_{0}^{n} e^{i \theta^{(n)}} |n\rangle \langle n|
# \end{equation}
#
# where $\vec{\theta}$ is the real vector containing SNAP
# parameters, and $|n\rangle$ is the number state.
#
#
# With the operator definitions laid out, the idea now is to
# apply this sequence of $D(\alpha)$, $S(\vec{\theta})$, and
# $D(\alpha)^{\dagger}$ operations to $|0 \rangle$, in order
# to reach the target binomial state, which in this case is
#
# \begin{equation}
# b_{1} = \frac{\sqrt 3 |3\rangle + |9\rangle}{2}
# \end{equation}
#
# **Note:** We do not follow the _exact_ same scheme as used in
# [<NAME> et al. (2020)](https://arxiv.org/abs/2004.14256),
# in that we use a simpler cost function (without regularization)
# and do not add blocks in a Breadth-First manner.
import numpy as onp # this is ordinary numpy that we will
# use with QuTiP for visualization only.
# onp should not used for gradient
# calculations. Instead jax.numpy
# should be used.
import jax.numpy as jnp
from jax import grad, jit
from jax.experimental import optimizers
# Visualization
import matplotlib.pyplot as plt
from qutip.visualization import plot_wigner, hinton
from qutip import (
Qobj,
) # imported purely for visualization purposes due to QuTiP-JAX non-compatibility
from qgrad.qgrad_qutip import basis, to_dm, dag, Displace, fidelity
from tqdm.auto import tqdm
def pad_thetas(hilbert_size, thetas):
"""
Pads zeros to the end of a theta vector to
fill it upto the Hilbert space cutoff.
Args:
hilbert_size (int): Size of the hilbert space
thetas (:obj:`jnp.ndarray`): List of angles thetas
Returns:
:obj:`jnp.ndarray`: List of angles padded with zeros
in place of Hilbert space cutoff
"""
if len(thetas) != hilbert_size:
thetas = jnp.pad(thetas, (0, hilbert_size - len(thetas)), mode="constant")
return thetas
def snap(hilbert_size, thetas):
"""
Constructs the matrix for a SNAP gate operation
that can be applied to a state.
Args:
hilbert_size (int): Hilbert space cuttoff
thetas (:obj:`jnp.ndarray`): A vector of theta values to
apply SNAP operation
Returns:
:obj:`jnp.ndarray`: matrix representing the SNAP gate
"""
op = 0 * jnp.eye(hilbert_size)
for i, theta in enumerate(thetas):
op += jnp.exp(1j * theta) * to_dm(basis(hilbert_size, i))
return op
# # Insertion of Blocks
#
# $T$ number of displace-snap-displace blocks,
# $\hat B = D(\alpha) \hat S(\vec \theta) D(\alpha)^{\dagger}$
# are applied on the initial vacuum state, $|0 \rangle$
#
# In this reconstruction we choose $T = 3$, so $3$
# $\hat B$'s are applied to the initial state. The
# aim is to tune $\alpha$'s and $\vec{\theta}$'s
# such that three repeated applications of
# $\hat B$ on $|0 \rangle$
#
# \begin{equation}
# D(\alpha) \hat S(\vec \theta) D(\alpha)^{\dagger}
# D(\alpha) \hat S(\vec \theta) D(\alpha)^{\dagger}
# D(\alpha) \hat S(\vec \theta) D(\alpha)^{\dagger} |0 \rangle
# \end{equation}
#
# lead to the desired
# binomial state $b_{1}$
#
# **Note on Displace operator**: We provide a fast implementation
# of the displacement operation using a diagonal decomposition
# such that a `Displace` class initialises the operator which
# can then be applied for different $\alpha$ repeatedly. This
# construct also supports autodiff with JAX.
#
def apply_blocks(alphas, thetas, initial_state):
"""Applies blocks of displace-snap-displace
operators to the initial state.
Args:
alphas (list): list of alpha paramters
for the Displace operation
thetas (list): vector of thetas for the
SNAP operation
initial_state(:obj:`jnp.array`): initial
state to apply the blocks on
Returns:
:obj:`jnp.array`: evolved state after
applying T blocks to the initial
state
"""
if len(alphas) != len(thetas):
raise ValueError("The number of alphas and theta vectors should be same")
N = initial_state.shape[0]
displace = Displace(N)
x = initial_state
for t in range(len(alphas)):
x = jnp.dot(displace(alphas[t]), x)
x = jnp.dot(snap(N, thetas[t]), x)
x = jnp.dot(
displace(-alphas[t]), x
) # displace(alpha)^{\dagger} = displace(-alpha)
return x
# # Visualizing the state evolution under $\hat B$
#
# Before we move on to make learning routines, it might be a
# good idea to see what `apply_blocks` does to the initial
# state, $|0 \rangle$ with randomly initialized parameters
# $\alpha$ and $\vec{\theta}$
def show_state(state):
"""Shows the Hinton plot and Wigner function for the state"""
fig, ax = plt.subplots(1, 2, figsize=(11, 5))
if state.shape[1] == 1: # State is a ket
dm = Qobj(onp.array(jnp.dot(state, dag(state))))
hinton(dm, ax=ax[0])
plot_wigner(dm, ax=ax[1])
plt.show()
N = 10 # Hilbert space cutoff
initial_state = basis(N, 0) # initial vacuum state
show_state(initial_state)
alphas = jnp.array([1.0, 0.5, 1.0]) # Displace parameters
theta1, theta2, theta3 = [0.5], [0.5, 1.5, 0.5], [0.5, 1.5, 0.5, 1.3] # SNAP parameters
# NOTE: No input values to JAX differentiable functions should be int
thetas = jnp.array([pad_thetas(N, p) for p in [theta1, theta2, theta3]])
evolved_state = apply_blocks(alphas, thetas, initial_state)
show_state(evolved_state)
# The target state that we aim to reach is visualized below.
# The aim is to act `apply_blocks` function to the initial
# state with the optimized parameters, say $\alpha_{opt}$
# and $\theta_{opt}$ such that we land extremely close to the
# desired binomial state $b_{1}$ as defined above.
target_state = (
jnp.sqrt(3) * basis(N, 3) + basis(N, 9)
) / 2.0 # target state b1 as shown above|
show_state(target_state)
def cost(params, initial, target):
"""
Calculates the cost between the target state and
the one evolved by the action of three blocks.
Args:
-----
params (jnp.array): alpha and theta params of Displace and SNAP respectively
initial (jnp.array): initial state to apply the blocks on
target (jnp.array): desired state
Returns:
--------
cost (float): cost at a particular parameter vector
"""
alphas, thetas = params[0], params[1]
evo = apply_blocks(alphas, thetas, initial)
return 1 - fidelity(target, evo)[0][0]
# # Optimization using Adam -- case in point for `qgrad`
#
# This is where the power of `qgrad` comes in.
# Since qgrad's functions used in this notebook -- basis,
# to_dm, dag, Displace, fidelity -- support JAX, we can
# evaluate the gradient of the cost function in one line
# using JAX's `grad`. This saves us painstakingly evaluating
# the derivative of the cost function analytically.
#
alphas = jnp.array([1, 0.2, 0.1 - 1j]) # Displace parameters
theta1, theta2, theta3 = ([0.5], [0.0, 0.5], [0.1, 0.0, 0.0, 0.1])
# NOTE: No input values to JAX differentiable functions should be int
thetas = jnp.array([pad_thetas(N, p) for p in [theta1, theta2, theta3]])
init_params = [alphas, thetas]
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-2)
opt_state = opt_init([alphas, thetas])
def step(i, opt_state, opt_update):
params = get_params(opt_state)
g = grad(cost)(params, initial_state, target_state)
return opt_update(i, g, opt_state)
epochs = 150
pbar = tqdm(range(epochs))
fidel_hist = []
params_hist = []
for i in pbar:
opt_state = step(i, opt_state, opt_update)
params = get_params(opt_state)
params_hist.append(params)
f = 1 - cost(params, initial_state, target_state)
fidel_hist.append(f)
pbar.set_description("Fidelity {}".format(f))
def display_evolution(parameters, num_plots=4):
"""
Displays the intermediate states during the learning schedule.
Args:
parameters (list): List of device arrays of parameters
`alpha` and `theta`
num_plots (int): Number of plots to make generate (except
the initial and final state plots)
Returns:
Evolution of the state at `num_plots` equidistant
times during the training
"""
show_state(initial_state)
diff = epochs // int(num_plots)
for i in range(0, epochs, diff):
alphas, thetas = parameters[i]
x = apply_blocks(alphas, thetas, initial_state)
show_state(x)
# final parameters after last epoch
alphas, thetas = parameters[-1]
x = apply_blocks(alphas, thetas, initial_state)
show_state(x)
display_evolution(params_hist)
# # Conclusion
#
# We see that starting from a vacuum state $|0 \rangle$,
# we efficiently learn the target state
# $b_{1} = \frac{\sqrt 3 |3> + |9>}{2}$, as is corroborated
# by the fidelity plot below.
#
# The desired target state's Hinton plot and Wigner function
# are shown before the learning scedule starts. It can be
# seen that the last row above is _almost_ the same as the
# target state, implying | |
<gh_stars>0
"""
(B^t F^t Cov^-1 d)^a(z) (D dxi_unl/da D^t B^t F^t Cov^-1 d)_a(z)
Only lib_skys enter this. Sign is correct for pot. estimate, not gradient.
This can written as (D_f (Res lms))(z) (D_f P_a Res lms)(z) * |M_f|(z)
Similarly the mean field can be written as the diagonal
|M_f|(z) (i k_a P D^t B^t Covi B D)(f(z),f(z))
= |M_f|(z) (i k_a (Pi + D^tB^tNiBD)^{-1}P^{-1})(f(z),f(z))
"""
from __future__ import print_function
import numpy as np
from lensit.misc.misc_utils import timer
from lensit.ffs_deflect.ffs_deflect import ffs_id_displacement
from lensit.ffs_covs import ffs_specmat as SM
verbose = False
typs = ['T', 'QU', 'TQU']
def get_qlms_wl(typ, lib_sky, TQU_Mlik, ResTQU_Mlik, lib_qlm, f=None,lib_sky2 =None, subtract_zeromode=False, use_Pool=0, **kwargs):
"""
Stand alone qlm estimator starting from lib_sky and unlensed Cls
Likelihood gradient (from the quadratic part).
(B^t F^t Cov^-1 d)^a(z) (D dxi_unl/da D^t B^t F^t Cov^-1 d)_a(z)
Only lib_skys enter this.
Sign is correct for pot. estimate, not gradient.
This can written as (D_f (Res lms))(z) (D_f P_a Res lms)(z) * |M_f|(z)
Only forward displacement is needed.
Res lms is here D^t B^t Cov^-1 data. This can be written in general
as D^t B^t Ni (data - B D MLIK(data)) in T E B space. For non-singular modes
this may be written as P_TEB^{-1} MLIK. (but we can't use pseudo inverse
for singular modes.)
P_a Res lms are always the max. likelihood modes however.
N0 * output is normalized qest for MV estimates
1/2 (VX WY + VY WX)
1/2 VX WY + VY WX
1/4 (VVt WW^t + VVt WWt + WV^t VW^t + V W^t WV^t)
We can get something without having to lens any weird maps through
( B^t Ni (data - B D Xmap))(z) (D ika Xmap)(z)
"""
lib_sky2 = lib_sky if lib_sky2 is None else lib_sky
if typ in ['EE','EB','BE','BB']:
TEB_Mlik = lib_sky.QUlms2EBalms(TQU_Mlik)
TEB_Res = lib_sky.QUlms2EBalms(ResTQU_Mlik)
TEB_Mlik[{'E':1,'B':0}[typ[0]]] *= 0.
TEB_Res[{'E':1,'B':0}[typ[1]]] *= 0.
return get_qlms_wl('QU',lib_sky,lib_sky.EBlms2QUalms(TEB_Mlik),lib_sky2.EBlms2QUalms(TEB_Res),lib_qlm,
f = f,use_Pool=use_Pool,lib_sky2 = lib_sky2)
assert len(TQU_Mlik) == len(typ) and len(ResTQU_Mlik) == len(typ)
t = timer(verbose, prefix=__name__)
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
def left(id):
assert id in range(len(typ)), (id, typ)
return lib_sky.alm2map(ResTQU_Mlik[id])
def Right(S_id, axis):
assert S_id in range(len(typ)), (S_id, typ)
assert axis in [0, 1]
kfunc = lib_sky2.get_ikx if axis == 1 else lib_sky2.get_iky
return f.alm2lenmap(lib_sky2, TQU_Mlik[S_id] * kfunc(), use_Pool=use_Pool)
retdx = left(0) * Right(0, 1)
for _i in range(1, len(typ)): retdx += left(_i) * Right(_i, 1)
t.checkpoint("get_likgrad::Cart. gr. x done. (%s map(s) lensed, %s fft(s)) " % (len(typ), 2 * len(typ) + 1))
retdy = left(0) * Right(0, 0)
for _i in range(1, len(typ)): retdy += left(_i) * Right(_i, 0)
t.checkpoint("get_likgrad::Cart. gr. y done. (%s map(s) lensed, %s fft(s)) " % (len(typ), 2 * len(typ) + 1))
if subtract_zeromode:
zro_x = np.sum(retdx)
zro_y = np.sum(retdy)
print('zero mode:', zro_x,zro_y)
retdx[0, 0] -= zro_x
retdy[0, 0] -= zro_y
retdx = lib_qlm.map2alm(retdx)
retdy = lib_qlm.map2alm(retdy)
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is normalized qest
def _Mlik2ResTQUMlik_diag(field, ninv_filt, TQUMlik, data, f, fi):
"""
Produces B^t Ni (data - B D Mlik) in TQU space,
that is fed into the qlm estimator.
"""
assert field in ['T', 'Q', 'U']
f_id = ffs_id_displacement(ninv_filt.lib_skyalm.shape, ninv_filt.lib_skyalm.lsides)
ninv_filt.set_ffi(f, fi)
_map = data - ninv_filt.apply_R(field, TQUMlik)
ninv_filt.apply_map(f, _map, inplace=True)
ninv_filt.set_ffi(f_id, f_id)
return ninv_filt.apply_Rt(field, _map)
def get_response(typ,lib_datalm,cls_len,NlevT_uKamin,NlevP_uKamin,cl_transf,
wAl = None,wBl = None,fAl = None,fBl = None,lib_qlm = None):
""" Q. estimator response """
assert typ[0] in ['T','E','B'] and typ[1] in ['T','E','B']
assert typ[0] in ['E','B'] and typ[1] in ['E','B'], "T not implemented"
assert 'eb' not in cls_len.keys() and 'be' not in cls_len.keys()
assert 'tb' not in cls_len.keys() and 'bt' not in cls_len.keys()
lmax = lib_datalm.ellmax
if wAl is None: wAl = np.ones(lmax + 1,dtype = float)
if wBl is None: wBl = cls_len[(typ[1] + typ[1]).lower()][:lmax + 1]
if fAl is None:
Nlev = NlevT_uKamin if typ[0] == 'T' else NlevP_uKamin
ii = np.where(cl_transf[:lmax + 1] > 0.)
fAl = np.zeros(lmax + 1,dtype = float)
fAl[ii] = 1./ (cls_len[(typ[0] + typ[0]).lower()][ii] + ( (Nlev / 60. /180. * np.pi)/ cl_transf[ii]) ** 2)
if fBl is None:
Nlev = NlevT_uKamin if typ[1] == 'T' else NlevP_uKamin
ii = np.where(cl_transf[:lmax + 1] > 0.)
fBl = np.zeros(lmax + 1,dtype = float)
fBl[ii] = 1./ (cls_len[(typ[1] + typ[1]).lower()][ii] + ( (Nlev / 60. /180. * np.pi)/ cl_transf[ii]) ** 2)
if lib_qlm is None: lib_qlm = lib_datalm
def get_pmat(A, i, j, clA):
if A == 'T':
if i == 0 and j == 0:
return clA[lib_datalm.reduced_ellmat()]
else:
assert 0,('zero',i,j)
elif A == 'E':
cos, sin = lib_datalm.get_cossin_2iphi()
if i == 1 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * cos ** 2
elif i == 2 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * sin ** 2
elif i == 2 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * cos * sin
elif i == 1 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * cos * sin
else:
assert 0,('zero',i,j)
elif A == 'B':
cos, sin = lib_datalm.get_cossin_2iphi()
if i == 1 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * sin ** 2
elif i == 2 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * cos ** 2
elif i == 1 and j == 2:
return -clA[lib_datalm.reduced_ellmat()] * cos * sin
elif i == 2 and j == 1:
return -clA[lib_datalm.reduced_ellmat()] * cos * sin
else:
assert 0,('zero',i,j)
else:
assert 0,(A,['T','E','B'])
retxx = np.zeros(lib_datalm.shape,dtype = float)
retyy = np.zeros(lib_datalm.shape,dtype = float)
retxy = np.zeros(lib_datalm.shape,dtype = float)
retyx = np.zeros(lib_datalm.shape,dtype = float)
_2map = lambda alm : lib_datalm.alm2map(alm)
ikx = lambda : lib_datalm.get_ikx()
iky = lambda: lib_datalm.get_iky()
clB = wBl * fBl * cls_len[(typ[1] + typ[1]).lower()][:lmax + 1]
clA = wAl * fAl
for i, j in [(1, 1),(1, 2),(2, 1),(2, 2)]:
retxx += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() ** 2 * get_pmat(typ[1],j,i,clB ))
retyy += _2map(get_pmat(typ[0],i,j, clA)) * _2map(iky() ** 2 * get_pmat(typ[1],j,i,clB ))
retxy += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() * iky() * get_pmat(typ[1],j,i,clB ))
retyx += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() * iky() * get_pmat(typ[1],j,i,clB ))
clB = wBl * fBl
clA = wAl * fAl * cls_len[(typ[0] + typ[0]).lower()][:lmax + 1]
for i, j in [(1, 1), (1, 2), (2, 1), (2, 2)]:
retxx += _2map(ikx() * get_pmat(typ[0], i, j, clA)) * _2map(ikx() * get_pmat(typ[1], j, i, clB))
retyy += _2map(iky() * get_pmat(typ[0], i, j, clA)) * _2map(iky() * get_pmat(typ[1], j, i, clB))
retxy += _2map(ikx() * get_pmat(typ[0], i, j, clA)) * _2map(iky() * get_pmat(typ[1], j, i, clB))
retyx += _2map(iky() * get_pmat(typ[0], i, j, clA)) * _2map(ikx() * get_pmat(typ[1], j, i, clB))
fac = 1. / np.sqrt(np.prod(lib_datalm.lsides))
_2alm = lambda _map : lib_qlm.map2alm(_map)
retxx = _2alm(retxx)
retyy = _2alm(retyy)
retxy = _2alm(retxy)
retyx = _2alm(retyx)
ikx = lambda : lib_qlm.get_ikx()
iky = lambda : lib_qlm.get_iky()
return np.array([fac * (retxx * ikx() ** 2 + retyy * iky() ** 2 + (retxy + retyx) * ikx() * iky()),
fac * (retxx * iky() ** 2 + retyy * ikx() ** 2 - (retxy + retyx) * ikx() * iky()) ])
class MFestimator:
def __init__(self, ninv_filt, opfilt, mchain, lib_qlm, pix_pha=None, cmb_pha=None, use_Pool=0):
self.ninv_filt = ninv_filt
self.opfilt = opfilt
self.mchain = mchain
self.lib_qlm = lib_qlm
self.pix_pha = pix_pha
self.cmb_pha = cmb_pha
self.use_Pool = use_Pool
def npix(self):
return self.ninv_filt.npix
def get_MFqlms(self, typ, MFkey, idx, soltn=None):
lib_sky = self.ninv_filt.lib_skyalm
lib_dat = self.ninv_filt.lib_datalm
assert lib_sky.lsides == lib_dat.lsides
self.opfilt.typ = typ
if hasattr(self.ninv_filt, 'f'):
print("******* I am using displacement for ninvfilt in MFest")
else:
print("******* Using id displacement in MFest")
f = getattr(self.ninv_filt, 'f', ffs_id_displacement(lib_sky.shape, lib_sky.lsides))
if MFkey == 12:
# B^t M^t X (x) (D ika P D^t B^t Covi X )(x). Second term are just the deflected gradients of the recontructed
assert self.pix_pha is not None
if soltn is None:
soltn = np.zeros((self.opfilt.TEBlen(typ), self.ninv_filt.lib_skyalm.alm_size), dtype=complex)
phas = self.pix_pha.get_sim(idx)[0:len(typ)]
for i, _f in enumerate(typ): phas[i] *= self.ninv_filt.get_mask(_f.lower())
self.mchain.solve(soltn, phas, finiop='MLIK')
TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.ninv_filt)
norm = np.prod(lib_dat.shape) / (np.prod(lib_dat.lsides))
def Left(id):
_alm = lib_sky.udgrade(lib_dat, lib_dat.map2alm(phas[id]))
return lib_sky.alm2map(lib_sky.almxfl(_alm, norm * | |
<reponame>lucidworks/solr-scale-tk<gh_stars>10-100
from fabric.api import *
from fabric.exceptions import NetworkError as _NetworkError
from fabric.colors import green as _green, blue as _blue, red as _red, yellow as _yellow
from fabric.contrib.files import append as _fab_append, exists as _fab_exists
from fabric.contrib.console import confirm
from StringIO import StringIO as _strio
import boto, boto3
from boto.s3.key import Key as _S3Key
import boto.emr
from boto.emr.step import InstallPigStep as _InstallPigStep
from boto.emr.step import PigStep as _PigStep
from random import shuffle as _shuffle
from urllib import urlretrieve as _urlretrieve
import boto.ec2
import time
import sys
import os
import urllib2
import getpass
import json
import pysolr
import os.path
import fnmatch
import datetime
import dateutil.parser
import shutil
import socket
import boto.vpc
import ntpath
from distutils.version import StrictVersion
# Global constants used in this module; only change this if you know what you're doing ;-)
CLUSTER_TAG = 'cluster'
USERNAME_TAG = 'username'
INSTANCE_STORES_TAG = 'numInstanceStores'
AWS_HVM_AMI_ID = 'ami-4d767836'
AWS_AZ = 'us-west-2b'
AWS_INSTANCE_TYPE = 'r3.large'
AWS_SECURITY_GROUP = 'solr-scale-tk'
AWS_KEY_NAME = 'solr-scale-tk'
ssh_user = 'ec2-user'
user_home = '/home/' + ssh_user
ssh_keyfile_path_on_local = '~/.ssh/solr-scale-tk.pem'
zk_data_dir = '/vol0/data'
CTL_SCRIPT = 'solr-ctl.sh'
ENV_SCRIPT = 'solr-ctl-env.sh'
# default config settings if not specifically overridden in the user's ~/.sstk file
_config = {}
_config['provider'] = 'ec2'
_config['user_home'] = user_home
_config['ssh_keyfile_path_on_local'] = ssh_keyfile_path_on_local
_config['ssh_user'] = ssh_user
_config['solr_java_home'] = '${user_home}/jdk1.8.0_172'
_config['solr_tip'] = '${user_home}/solr-7.3.1'
_config['zk_home'] = '${user_home}/zookeeper-3.4.10'
_config['zk_data_dir'] = zk_data_dir
_config['sstk_cloud_dir'] = '${user_home}/cloud'
_config['SSTK_ENV'] = '${sstk_cloud_dir}/' + ENV_SCRIPT
_config['SSTK'] = '${sstk_cloud_dir}/' + CTL_SCRIPT
_config['AWS_HVM_AMI_ID'] = AWS_HVM_AMI_ID
_config['AWS_AZ'] = AWS_AZ
_config['AWS_SECURITY_GROUP'] = AWS_SECURITY_GROUP
_config['AWS_INSTANCE_TYPE'] = AWS_INSTANCE_TYPE
_config['AWS_KEY_NAME'] = AWS_KEY_NAME
_config['fusion_home'] = '${user_home}/fusion/4.0.0'
_config['fusion_vers'] = '4.0.0'
_config['connector_memory_in_gb'] = '1'
_config['owner'] = getpass.getuser()
instanceStoresByType = {'m2.small':0, 't2.medium':0, 't2.large':0, 't2.xlarge':0,
'm3.medium':1, 'm3.large':1, 'm3.xlarge':2, 'm3.2xlarge':2,
'i2.4xlarge':4,'i2.2xlarge':2, 'i2.8xlarge':8,
'r3.large':1, 'r3.xlarge':1, 'r3.2xlarge':1, 'r3.4xlarge':1, 'c3.2xlarge':2,
'r4.large':0, 'r4.xlarge':0, 'r4.2xlarge':0, 'r4.4xlarge':0, 'r4.8xlarge':0,
'm4.large':0, 'm4.xlarge':0, 'm4.2xlarge':0, 'm4.4xlarge':0, 'm4.8xlarge':0 }
class _HeadRequest(urllib2.Request):
def get_method(self):
return 'HEAD'
def _status(msg):
print(_yellow(msg))
def _info(msg):
print(_green(msg))
def _warn(msg):
print(_blue('WARN: ' + msg))
def _error(msg):
sys.stderr.write(_red('\n\t************************'))
sys.stderr.write(_red('\n\tERROR: %s\n' % str(msg)))
sys.stderr.write(_red('\t************************\n\n'))
# Helper to log a message and kill the application after a fatal error occurs.
def _fatal(msg):
_error(msg)
exit(1)
def _copy_dir(src, dest):
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
def _runbg( command, out_file="/dev/null", err_file=None, shell=True, pty=False ):
_info('_runbg: nohup %s >%s 2>%s </dev/null &' % (command, out_file, err_file or '&1'))
run('nohup %s >%s 2>%s </dev/null &' % (command, out_file, err_file or '&1'), shell, pty)
def _save_config():
sstk_cfg = _get_config()
sstkCfg = os.path.expanduser('~/.sstk')
sstkCfgFile = open(sstkCfg, 'w')
sstkCfgFile.write(json.dumps(sstk_cfg, indent=2))
sstkCfgFile.close()
def _expand_config_var(cluster, val):
if len(val) > 0:
startVar = val.find('${')
while startVar != -1:
endVar = val.find('}',startVar+1)
varStr = val[startVar:endVar+1]
varVal = _env(cluster, varStr[2:len(varStr)-1])
val = val.replace(varStr, varVal)
startVar = val.find('${')
return val
def _get_config():
if _config.has_key('sstk_cfg'):
return _config['sstk_cfg']
sstkCfg = os.path.expanduser('~/.sstk')
if os.path.isfile(sstkCfg) is False:
_config['sstk_cfg'] = {}
else:
sstkCfgFile = open(sstkCfg)
sstkJson = json.load(sstkCfgFile)
sstkCfgFile.close()
_config['sstk_cfg'] = sstkJson
return _config['sstk_cfg']
# resolves an environment property by first checking the cluster specific settings,
# then user specific settings, then global settings
def _env(cluster, key):
return _env(cluster, key, None)
def _env(cluster, key, defaultValue=None):
if key is None:
_fatal('Property key is required!')
val = None
sstk_cfg = _get_config()
if cluster is not None:
if sstk_cfg.has_key('clusters') and sstk_cfg['clusters'].has_key(cluster):
if sstk_cfg['clusters'][cluster].has_key(key):
val = sstk_cfg['clusters'][cluster][key]
if val is None:
if sstk_cfg.has_key(key):
val = sstk_cfg[key]
if val is None:
if _config.has_key(key):
val = _config[key]
if val is None:
val = defaultValue
if val is None:
_fatal('Unable to resolve required property setting: '+key)
return _expand_config_var(cluster, val)
def _get_ec2_provider(region):
_info("Region used: " + str(region))
if region is not None:
return boto.ec2.connect_to_region(region)
else:
return boto.connect_ec2()
class _LocalProvider:
"""Local provider (instead of EC2)"""
def type(self):
return 'local'
def get_all_instances(self,filters=None):
return []
def close(self):
return
# Get a handle to a Cloud Provider API (or mock for local mode)
def _provider_api(cluster ='ec2'):
sstk_cfg = _get_config()
if sstk_cfg.has_key('clusters') and sstk_cfg['clusters'].has_key(cluster):
if sstk_cfg['clusters'][cluster].has_key('provider') is False:
sstk_cfg['provider'] = 'ec2' # default
else:
sstk_cfg['provider'] = sstk_cfg['clusters'][cluster]['provider']
else:
sstk_cfg['provider'] = 'ec2'
provider = sstk_cfg['provider']
if provider == 'ec2':
region = None
if sstk_cfg.has_key('region'):
region = sstk_cfg['region']
return _get_ec2_provider(region)
elif provider == 'local':
return _LocalProvider()
else:
_fatal(provider+' not supported! Please correct your ~/.sstk configuration file.')
return None
# Polls until instances are running, up to a max wait
def _poll_for_running_status(rsrv, maxWait=180):
_info('Waiting for ' + str(len(rsrv)) + ' instances to start (will wait for a max of 3 minutes) ...')
startedAt = time.time()
waitTime = 0
sleepInterval = 15
runningSet = set([])
allRunning = False
while allRunning is False and waitTime < maxWait:
allRunning = True
for inst in rsrv:
if inst.id in runningSet:
continue
status = inst.update()
_status('Instance %s has status %s after %d seconds' % (inst.id, status, (time.time() - startedAt)))
if status == 'running':
runningSet.add(inst.id)
else:
allRunning = False
if allRunning is False:
time.sleep(sleepInterval)
waitTime = round(time.time() - startedAt)
if allRunning:
_info('Took %d seconds to launch %d instances.' % (time.time() - startedAt, len(runningSet)))
else:
_warn('Only %d of %d instances running after waiting %d seconds' % (len(runningSet), len(rsrv.instances), waitTime))
return len(runningSet)
def _find_instances_in_cluster(cloud, cluster, onlyIfRunning=True):
tagged = {}
byTag = cloud.get_all_instances(filters={'tag:' + CLUSTER_TAG:cluster})
_info("find_instance by tag: {0} for cloud: {1} and cluster: {2}".format(byTag, cloud, cluster))
for rsrv in byTag:
for inst in rsrv.instances:
_info("Checking instance: {0}".format(inst))
if (onlyIfRunning and inst.state == 'running') or onlyIfRunning is False:
if inst.public_dns_name:
tagged[inst.id] = inst.public_dns_name
elif inst.private_ip_address: #we may be launching in a private subnet
tagged[inst.id] = inst.private_ip_address
return tagged
def _find_all_instances(cloud, onlyIfRunning=True):
tagged = {}
byTag = cloud.get_all_instances(filters={'tag-key':'cluster','tag-key':'username'})
for rsrv in byTag:
for inst in rsrv.instances:
if (onlyIfRunning and inst.state == 'running') or onlyIfRunning is False:
tagged[inst.id] = inst
return tagged
def _find_user_instances(cloud, username, onlyIfRunning=True):
tagged = {}
byTag = cloud.get_all_instances(filters={'tag:' + USERNAME_TAG:username})
numFound = len(byTag)
if numFound == 0:
time.sleep(1)
byTag = cloud.get_all_instances(filters={'tag:' + USERNAME_TAG:username})
numFound = len(byTag)
if numFound > 0:
_warn('AWS API is acting flakey! First call to find instances for '+username+' found 0, now it found: '+str(numFound))
for rsrv in byTag:
for inst in rsrv.instances:
if (onlyIfRunning and inst.state == 'running') or onlyIfRunning is False:
tagged[inst.id] = inst
return tagged
def _is_solr_up(hostAndPort):
isSolrUp = False
try:
urllib2.urlopen(_HeadRequest('http://%s/solr/#/' % hostAndPort))
# if no exception on the ping, assume the HTTP listener is up
_info('Solr at ' + hostAndPort + ' is online.')
isSolrUp = True
except:
# ignore it as we're just checking if the HTTP listener is up
# print "Unexpected error:", sys.exc_info()[0]
isSolrUp = False
return isSolrUp
# Test for SSH connectivity to an instance
def _ssh_to_new_instance(host):
sshOk = False
with settings(host_string=host), hide('output', 'running', 'warnings'):
try:
run('whoami')
sshOk = True
except _NetworkError as e:
print e
sskOk = False
except:
print "Unexpected error:", sys.exc_info()[0]
sshOk = False
return sshOk
def _cluster_hosts(cloud, cluster):
clusterHosts = None
sstkCfg = _get_config()
if sstkCfg.has_key('clusters') and sstkCfg['clusters'].has_key(cluster):
if sstkCfg['clusters'][cluster].has_key('hosts'):
clusterHosts = sstkCfg['clusters'][cluster]['hosts']
_info("Cluster Hosts: {0}".format(clusterHosts))
if clusterHosts is None:
# not cached locally ... must hit provider API
clusterHosts = []
taggedInstances = _find_instances_in_cluster(cloud, cluster)
for key in taggedInstances.keys():
clusterHosts.append(taggedInstances[key])
if len(clusterHosts) == 0:
_fatal('No active hosts found for cluster ' + cluster + '! Check your command line args and re-try')
# use a predictable order each time
clusterHosts.sort()
# setup the Fabric env for SSH'ing to this cluster
ssh_user = _env(cluster, 'ssh_user')
ssh_keyfile = _env(cluster, 'ssh_keyfile_path_on_local')
if len(ssh_keyfile) > 0 and os.path.isfile(os.path.expanduser(ssh_keyfile)) is False:
_fatal('SSH key file %s not found!' % ssh_keyfile)
env.hosts = []
env.user = ssh_user
env.key_filename = ssh_keyfile
return clusterHosts
def _verify_ssh_connectivity(hosts, maxWait=120):
# if using localhost for this cluster, no need to SSH
if len(hosts) == 1 and hosts[0] == 'localhost':
return
_status('Verifying SSH connectivity to %d hosts (will wait up to %d secs) ... please be patient as this can take a few minutes if EC2 is being cranky!' % (len(hosts), maxWait))
waitTime = 0
startedAt = time.time()
hasConn = False
sshSet = set([])
while hasConn is False and waitTime < maxWait:
hasConn = True # assume true and prove false with SSH failure
for host in hosts:
_info("Trying to connect to " + host)
if (host in sshSet) is False:
if _ssh_to_new_instance(host):
sshSet.add(host)
else:
hasConn = False
if hasConn is False:
time.sleep(5)
waitTime = round(time.time() - startedAt)
_status('Waited %d seconds so far to verify SSH connectivity to %d hosts' % (waitTime, len(hosts)))
if hasConn:
_info('Verified SSH connectivity to %d hosts.' % len(sshSet))
else:
_warn('SSH connectivity verification timed out after %d seconds! Verified %d | |
<reponame>qtl-bodc/COAsT<filename>coast/general_utils.py
from dask import delayed
from dask import array
import xarray as xr
import numpy as np
from dask.distributed import Client
from warnings import warn
import copy
import scipy as sp
from .logging_util import get_slug, debug, info, warn, error
import sklearn.neighbors as nb
def subset_indices_by_distance_BT(longitude, latitude, centre_lon, centre_lat,
radius: float, mask=None
):
"""
Returns the indices of points that lie within a specified radius (km) of
central latitude and longitudes. This makes use of BallTree.query_radius.
Parameters
----------
longitude : (numpy.ndarray) longitudes in degrees
latitude : (numpy.ndarray) latitudes in degrees
centre_lon : Central longitude. Can be single value or array of values
centre_lat : Central latitude. Can be single value or array of values
radius : (float) Radius in km within which to find indices
mask : (numpy.ndarray) of same dimension as longitude and latitude.
If specified, will mask out points from the routine.
Returns
-------
Returns an array of indices corresponding to points within radius.
If more than one central location is specified, this will be a list
of index arrays. Each element of which corresponds to one centre.
If longitude is 1D:
Returns one array of indices per central location
If longitude is 2D:
Returns arrays of x and y indices per central location.
ind_y corresponds to row indices of the original input arrays.
"""
# change inputs to numpy
longitude = np.array(longitude)
latitude = np.array(latitude)
centre_lon = np.array(centre_lon)
centre_lat = np.array(centre_lat)
# Calculate radius in radians
earth_radius = 6371
r_rad = radius/earth_radius
# For reshaping indices at the end
original_shape = longitude.shape
# Check if radius centres are numpy arrays. If not, make them into ndarrays
if not isinstance(centre_lon, (np.ndarray)):
centre_lat = np.array(centre_lat)
centre_lon = np.array(centre_lon)
# Determine number of centres provided
n_pts = 1 if centre_lat.shape==() else len(centre_lat)
# If a mask is supplied, remove indices from arrays. Flatten input ready
# for BallTree
if mask is None:
longitude = longitude.flatten()
latitude = latitude.flatten()
else:
longitude[mask] = np.nan
latitude[mask] = np.nan
longitude = longitude.flatten()
latitude = latitude.flatten()
# Put lons and lats into 2D location arrays for BallTree: [lat, lon]
locs = np.vstack((latitude, longitude)).transpose()
locs = np.radians(locs)
# Construct central input to BallTree.query_radius
if n_pts==1:
centre = np.array([[centre_lat, centre_lon]])
else:
centre = np.vstack((centre_lat, centre_lon)).transpose()
centre = np.radians(centre)
# Do nearest neighbour interpolation using BallTree (gets indices)
tree = nb.BallTree(locs, leaf_size=2, metric='haversine')
ind_1d = tree.query_radius(centre, r = r_rad)
if len(original_shape) == 1:
return ind_1d
else:
# Get 2D indices from 1D index output from BallTree
ind_y = []
ind_x = []
for ii in np.arange(0,n_pts):
x_tmp, y_tmp = np.unravel_index(ind_1d[ii], original_shape)
ind_x.append(x_tmp.squeeze())
ind_y.append(y_tmp.squeeze())
if n_pts==1:
return ind_x[0], ind_y[0]
else:
return ind_x, ind_y
def subset_indices_by_distance(
longitude, latitude, centre_lon: float, centre_lat: float,
radius: float
):
"""
This method returns a `tuple` of indices within the `radius` of the
lon/lat point given by the user.
Scikit-learn BallTree is used to obtain indices.
:param centre_lon: The longitude of the users central point
:param centre_lat: The latitude of the users central point
:param radius: The haversine distance (in km) from the central point
:return: All indices in a `tuple` with the haversine distance of the
central point
"""
# Calculate the distances between every model point and the specified
# centre. Calls another routine dist_haversine.
dist = calculate_haversine_distance(centre_lon, centre_lat,
longitude, latitude)
indices_bool = dist < radius
indices = np.where(indices_bool)
if len(longitude.shape) == 1:
return xr.DataArray(indices[0])
else:
return xr.DataArray(indices[0]), xr.DataArray(indices[1])
def compare_angles(a1,a2,degrees=True):
'''
# Compares the difference between two angles. e.g. it is 2 degrees between
# 359 and 1 degree. If degrees = False then will treat angles as radians.
'''
if not degrees:
a1 = np.degrees(a1)
a2 = np.degrees(a2)
diff = 180 - np.abs(np.abs(a1-a2)-180)
if not degrees:
a1 = np.radians(a1)
a2 = np.radians(a2)
return diff
def cart2polar(x, y, degrees = True):
'''
# Conversion of cartesian to polar coordinate system
# Output theta is in radians
'''
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
if degrees:
theta = np.rad2deg(theta)
return(r, theta)
def polar2cart(r, theta, degrees=True):
'''
# Conversion of polar to cartesian coordinate system
# Input theta must be in radians
'''
if degrees:
theta = np.deg2rad(theta)
x = r * np.cos(theta)
y = r * np.sin(theta)
return(x, y)
def subset_indices_lonlat_box(array_lon, array_lat, lonmin, lonmax,
latmin, latmax):
ind_lon = np.logical_and(array_lon <= lonmax, array_lon >= lonmin)
ind_lat = np.logical_and(array_lat <= latmax, array_lat >= latmin)
ind = np.where( np.logical_and(ind_lon, ind_lat) )
return ind
def calculate_haversine_distance(lon1, lat1, lon2, lat2):
'''
# Estimation of geographical distance using the Haversine function.
# Input can be single values or 1D arrays of locations. This
# does NOT create a distance matrix but outputs another 1D array.
# This works for either location vectors of equal length OR a single loc
# and an arbitrary length location vector.
#
# lon1, lat1 :: Location(s) 1.
# lon2, lat2 :: Location(s) 2.
'''
# Convert to radians for calculations
lon1 = xr.ufuncs.deg2rad(lon1)
lat1 = xr.ufuncs.deg2rad(lat1)
lon2 = xr.ufuncs.deg2rad(lon2)
lat2 = xr.ufuncs.deg2rad(lat2)
# Latitude and longitude differences
dlat = (lat2 - lat1) / 2
dlon = (lon2 - lon1) / 2
# Haversine function.
distance = xr.ufuncs.sin(dlat) ** 2 + xr.ufuncs.cos(lat1) * \
xr.ufuncs.cos(lat2) * xr.ufuncs.sin(dlon) ** 2
distance = 2 * 6371.007176 * xr.ufuncs.arcsin(xr.ufuncs.sqrt(distance))
return distance
def remove_indices_by_mask(A, mask):
'''
Removes indices from a 2-dimensional array, A, based on true elements of
mask. A and mask variable should have the same shape.
'''
A = np.array(A).flatten()
mask = np.array(mask, dtype=bool).flatten()
array_removed = A[~mask]
return array_removed
def reinstate_indices_by_mask(array_removed, mask, fill_value=np.nan):
'''
Rebuilds a 2D array from a 1D array created using remove_indices_by_mask().
False elements of mask will be populated using array_removed. MAsked
indices will be replaced with fill_value
'''
array_removed = np.array(array_removed)
original_shape = mask.shape
mask = np.array(mask, dtype=bool).flatten()
A = np.zeros(mask.shape)
A[~mask] = array_removed
A[mask] = fill_value
A = A.reshape(original_shape)
return A
def nearest_indices_2D(mod_lon, mod_lat, new_lon, new_lat,
mask = None):
'''
Obtains the 2 dimensional indices of the nearest model points to specified
lists of longitudes and latitudes. Makes use of sklearn.neighbours
and its BallTree haversine method. Ensure there are no NaNs in
input longitude/latitude arrays (or mask them using "mask"")
Example Useage
----------
# Get indices of model points closest to altimetry points
ind_x, ind_y = nemo.nearest_indices(altimetry.dataset.longitude,
altimetry.dataset.latitude)
# Nearest neighbour interpolation of model dataset to these points
interpolated = nemo.dataset.isel(x_dim = ind_x, y_dim = ind_y)
Parameters
----------
mod_lon (2D array): Model longitude (degrees) array (2-dimensional)
mod_lat (2D array): Model latitude (degrees) array (2-dimensions)
new_lon (1D array): Array of longitudes (degrees) to compare with model
new_lat (1D array): Array of latitudes (degrees) to compare with model
mask (2D array): Mask array. Where True (or 1), elements of array will
not be included. For example, use to mask out land in
case it ends up as the nearest point.
Returns
-------
Array of x indices, Array of y indices
'''
# Cast lat/lon to numpy arrays in case xarray things
new_lon = np.array(new_lon)
new_lat = np.array(new_lat)
mod_lon = np.array(mod_lon)
mod_lat = np.array(mod_lat)
original_shape = mod_lon.shape
# If a mask is supplied, remove indices from arrays.
if mask is None:
mod_lon = mod_lon.flatten()
mod_lat = mod_lat.flatten()
else:
mod_lon = remove_indices_by_mask(mod_lon, mask)
mod_lat = remove_indices_by_mask(mod_lat, mask)
# If we are masking, we want to preserve the original indices so that
# we can get them back at the end (since masked points are removed).
cc, rr = np.meshgrid( np.arange(0,original_shape[1]),
np.arange(0,original_shape[0]))
cc = remove_indices_by_mask(cc, mask)
rr = remove_indices_by_mask(rr, mask)
# Put lons and lats into 2D location arrays for BallTree: [lat, lon]
mod_loc = np.vstack((mod_lat, mod_lon)).transpose()
new_loc = np.vstack((new_lat, new_lon)).transpose()
# Convert lat/lon to radians for BallTree
mod_loc = np.radians(mod_loc)
new_loc = np.radians(new_loc)
# Do nearest neighbour interpolation using BallTree (gets indices)
tree = nb.BallTree(mod_loc, leaf_size=5, metric='haversine')
_, ind_1d = tree.query(new_loc, k=1)
if mask is None:
# Get 2D indices from 1D index output from BallTree
ind_y, ind_x = np.unravel_index(ind_1d, original_shape)
else:
| |
<gh_stars>1-10
"""Copyright (c) 2018, <NAME>
2021, <NAME>"""
import warnings
import numpy as np
import pandas as pd
from bites.utils import ipcw
from bites.utils import utils, admin
from bites.utils.concordance import concordance_td
class EvalSurv:
"""Class for evaluating predictions.
Arguments:
surv {pd.DataFrame} -- Survival predictions.
durations {np.array} -- Durations of test set.
events {np.array} -- Events of test set.
Keyword Arguments:
censor_surv {str, pd.DataFrame, EvalSurv} -- Censoring distribution.
If provided data frame (survival function for censoring) or EvalSurv object,
this will be used.
If 'km', we will fit a Kaplan-Meier to the dataset.
(default: {None})
censor_durations {np.array}: -- Administrative censoring times. (default: {None})
steps {str} -- For durations between values of `surv.index` choose the higher index 'pre'
or lower index 'post'. For a visualization see `help(EvalSurv.steps)`. (default: {'post'})
"""
def __init__(self, surv, durations, events, censor_surv=None, censor_durations=None, steps='post'):
assert (type(durations) == type(events) == np.ndarray), 'Need `durations` and `events` to be arrays'
self.surv = surv
self.durations = durations
self.events = events
self.censor_surv = censor_surv
self.censor_durations = censor_durations
self.steps = steps
assert pd.Series(self.index_surv).is_monotonic
@property
def censor_surv(self):
"""Estimated survival for censorings.
Also an EvalSurv object.
"""
return self._censor_surv
@censor_surv.setter
def censor_surv(self, censor_surv):
if isinstance(censor_surv, EvalSurv):
self._censor_surv = censor_surv
elif type(censor_surv) is str:
if censor_surv == 'km':
self.add_km_censor()
else:
raise ValueError(f"censor_surv cannot be {censor_surv}. Use e.g. 'km'")
elif censor_surv is not None:
self.add_censor_est(censor_surv)
else:
self._censor_surv = None
@property
def index_surv(self):
return self.surv.index.values
@property
def steps(self):
"""How to handle predictions that are between two indexes in `index_surv`.
For a visualization, run the following:
ev = EvalSurv(pd.DataFrame(np.linspace(1, 0, 7)), np.empty(7), np.ones(7), steps='pre')
ax = ev[0].plot_surv()
ev.steps = 'post'
ev[0].plot_surv(ax=ax, style='--')
ax.legend(['pre', 'post'])
"""
return self._steps
@steps.setter
def steps(self, steps):
vals = ['post', 'pre']
if steps not in vals:
raise ValueError(f"`steps` needs to be {vals}, got {steps}")
self._steps = steps
def add_censor_est(self, censor_surv, steps='post'):
"""Add censoring estimates so one can use inverse censoring weighting.
`censor_surv` are the survival estimates trained on (durations, 1-events),
Arguments:
censor_surv {pd.DataFrame} -- Censor survival curves.
Keyword Arguments:
round {str} -- For durations between values of `surv.index` choose the higher index 'pre'
or lower index 'post'. If `None` use `self.steps` (default: {None})
"""
if not isinstance(censor_surv, EvalSurv):
censor_surv = self._constructor(censor_surv, self.durations, 1 - self.events, None,
steps=steps)
self.censor_surv = censor_surv
return self
def add_km_censor(self, steps='post'):
"""Add censoring estimates obtained by Kaplan-Meier on the test set
(durations, 1-events).
"""
km = utils.kaplan_meier(self.durations, 1 - self.events)
surv = pd.DataFrame(np.repeat(km.values.reshape(-1, 1), len(self.durations), axis=1),
index=km.index)
return self.add_censor_est(surv, steps)
@property
def censor_durations(self):
"""Administrative censoring times."""
return self._censor_durations
@censor_durations.setter
def censor_durations(self, val):
if val is not None:
assert (self.durations[self.events == 0] == val[self.events == 0]).all(), \
'Censored observations need same `durations` and `censor_durations`'
assert (self.durations[self.events == 1] <= val[self.events == 1]).all(), \
'`durations` cannot be larger than `censor_durations`'
if (self.durations == val).all():
warnings.warn("`censor_durations` are equal to `durations`." +
" `censor_durations` are likely wrong!")
self._censor_durations = val
else:
self._censor_durations = val
@property
def _constructor(self):
return EvalSurv
def __getitem__(self, index):
if not (hasattr(index, '__iter__') or type(index) is slice):
index = [index]
surv = self.surv.iloc[:, index]
durations = self.durations[index]
events = self.events[index]
new = self._constructor(surv, durations, events, None, steps=self.steps)
if self.censor_surv is not None:
new.censor_surv = self.censor_surv[index]
return new
def plot_surv(self, **kwargs):
"""Plot survival estimates.
kwargs are passed to `self.surv.plot`.
"""
if len(self.durations) > 50:
raise RuntimeError("We don't allow to plot more than 50 lines. Use e.g. `ev[1:5].plot()`")
if 'drawstyle' in kwargs:
raise RuntimeError(f"`drawstyle` is set by `self.steps`. Remove from **kwargs")
return self.surv.plot(drawstyle=f"steps-{self.steps}", **kwargs)
def idx_at_times(self, times):
"""Get the index (iloc) of the `surv.index` closest to `times`.
I.e. surv.loc[tims] (almost)= surv.iloc[idx_at_times(times)].
Useful for finding predictions at given durations.
"""
return utils.idx_at_times(self.index_surv, times, self.steps)
def _duration_idx(self):
return self.idx_at_times(self.durations)
def surv_at_times(self, times):
idx = self.idx_at_times(times)
return self.surv.iloc[idx]
# def prob_alive(self, time_grid):
# return self.surv_at_times(time_grid).values
def concordance_td(self, method='adj_antolini'):
"""Time dependent concorance index from
<NAME>.; <NAME>.; and <NAME>. 2005. A time-dependent discrimination
index for survival data. Statistics in Medicine 24:3927–3944.
If 'method' is 'antolini', the concordance from Antolini et al. is computed.
If 'method' is 'adj_antolini' (default) we have made a small modifications
for ties in predictions and event times.
We have followed step 3. in Sec 5.1. in Random Survival Forests paper, except for the last
point with "T_i = T_j, but not both are deaths", as that doesn't make much sense.
See 'metrics._is_concordant'.
Keyword Arguments:
method {str} -- Type of c-index 'antolini' or 'adj_antolini' (default {'adj_antolini'}).
Returns:
float -- Time dependent concordance index.
"""
return concordance_td(self.durations, self.events, self.surv.values,
self._duration_idx(), method)
def brier_score(self, time_grid, max_weight=np.inf):
"""Brier score weighted by the inverse censoring distribution.
See Section 3.1.2 or [1] for details of the wighting scheme.
Arguments:
time_grid {np.array} -- Durations where the brier score should be calculated.
Keyword Arguments:
max_weight {float} -- Max weight value (max number of individuals an individual
can represent (default {np.inf}).
References:
[1] <NAME> and <NAME>. The Brier Score under Administrative Censoring: Problems
and Solutions. arXiv preprint arXiv:1912.08581, 2019.
https://arxiv.org/pdf/1912.08581.pdf
"""
if self.censor_surv is None:
raise ValueError("""Need to add censor_surv to compute Brier score. Use 'add_censor_est'
or 'add_km_censor' for Kaplan-Meier""")
bs = ipcw.brier_score(time_grid, self.durations, self.events, self.surv.values,
self.censor_surv.surv.values, self.index_surv,
self.censor_surv.index_surv, max_weight, True, self.steps,
self.censor_surv.steps)
return pd.Series(bs, index=time_grid).rename('brier_score')
def nbll(self, time_grid, max_weight=np.inf):
"""Negative binomial log-likelihood weighted by the inverse censoring distribution.
See Section 3.1.2 or [1] for details of the wighting scheme.
Arguments:
time_grid {np.array} -- Durations where the brier score should be calculated.
Keyword Arguments:
max_weight {float} -- Max weight value (max number of individuals an individual
can represent (default {np.inf}).
References:
[1] <NAME> and <NAME>. The Brier Score under Administrative Censoring: Problems
and Solutions. arXiv preprint arXiv:1912.08581, 2019.
https://arxiv.org/pdf/1912.08581.pdf
"""
if self.censor_surv is None:
raise ValueError("""Need to add censor_surv to compute the score. Use 'add_censor_est'
or 'add_km_censor' for Kaplan-Meier""")
bll = ipcw.binomial_log_likelihood(time_grid, self.durations, self.events, self.surv.values,
self.censor_surv.surv.values, self.index_surv,
self.censor_surv.index_surv, max_weight, True, self.steps,
self.censor_surv.steps)
return pd.Series(-bll, index=time_grid).rename('nbll')
def integrated_brier_score(self, time_grid, max_weight=np.inf):
"""Integrated Brier score weighted by the inverse censoring distribution.
Essentially an integral over values obtained from `brier_score(time_grid, max_weight)`.
Arguments:
time_grid {np.array} -- Durations where the brier score should be calculated.
Keyword Arguments:
max_weight {float} -- Max weight value (max number of individuals an individual
can represent (default {np.inf}).
"""
if self.censor_surv is None:
raise ValueError("Need to add censor_surv to compute briser score. Use 'add_censor_est'")
return ipcw.integrated_brier_score(time_grid, self.durations, self.events, self.surv.values,
self.censor_surv.surv.values, self.index_surv,
self.censor_surv.index_surv, max_weight, self.steps,
self.censor_surv.steps)
def integrated_nbll(self, time_grid, max_weight=np.inf):
"""Integrated negative binomial log-likelihood weighted by the inverse censoring distribution.
Essentially an integral over values obtained from `nbll(time_grid, max_weight)`.
Arguments:
time_grid {np.array} -- Durations where the brier score should be calculated.
Keyword Arguments:
max_weight {float} -- Max weight value (max number of individuals an individual
can represent (default {np.inf}).
"""
if self.censor_surv is None:
raise ValueError("Need to add censor_surv to compute the score. Use 'add_censor_est'")
ibll = ipcw.integrated_binomial_log_likelihood(time_grid, self.durations, self.events, self.surv.values,
self.censor_surv.surv.values, self.index_surv,
self.censor_surv.index_surv, max_weight, self.steps,
self.censor_surv.steps)
return -ibll
def brier_score_admin(self, time_grid):
"""The Administrative Brier score proposed by [1].
Removes individuals as they are administratively censored, event if they have experienced an
event.
Arguments:
time_grid {np.array} -- Durations where the brier score should be calculated.
References:
[1] <NAME> and <NAME>. The Brier Score under Administrative Censoring: Problems
and Solutions. arXiv preprint arXiv:1912.08581, 2019.
https://arxiv.org/pdf/1912.08581.pdf
"""
if self.censor_durations is None:
raise ValueError("Need to provide `censor_durations` (censoring durations) to use this method")
bs = admin.brier_score(time_grid, self.durations, self.censor_durations, self.events,
self.surv.values, self.index_surv, True, self.steps)
return pd.Series(bs, index=time_grid).rename('brier_score')
def integrated_brier_score_admin(self, time_grid):
"""The Integrated administrative Brier score proposed by [1].
Removes individuals as they are administratively censored, event if they have experienced an
event.
Arguments:
time_grid {np.array} -- Durations where the brier score should be calculated.
References:
[1] <NAME> and <NAME>. The Brier Score under Administrative Censoring: Problems
and Solutions. arXiv preprint arXiv:1912.08581, 2019.
https://arxiv.org/pdf/1912.08581.pdf
"""
if self.censor_durations is None:
raise ValueError("Need to provide `censor_durations` (censoring durations) to use this method")
ibs = admin.integrated_brier_score(time_grid, self.durations, self.censor_durations, self.events,
self.surv.values, self.index_surv, self.steps)
return ibs
def nbll_admin(self, time_grid):
"""The negative administrative binomial log-likelihood proposed by [1].
Removes individuals as they are administratively censored, event if they | |
sample in out:
sample.input = tuple(reversed(sample.input))
return out
def sample_inputs_std_var(op_info, device, dtype, requires_grad):
tensor_nd = make_tensor((S, S, S), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
tensor_1d = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
return [
SampleInput(tensor_nd),
SampleInput(tensor_nd, kwargs=dict(dim=1)),
SampleInput(tensor_nd, kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=False, keepdim=False)),
]
def _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):
"""
This function generates input for torch.svd with distinct singular values so that autograd is always stable.
Matrices of different size:
square matrix - S x S size
tall marix - S x (S-2)
wide matrix - (S-2) x S
and batched variants of above are generated.
Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd
It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
# svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice
# along different dimensions when needed (this is used by
# test_cases2:wide_all and wide_all_batched below)
if is_linalg_svd:
def slice_V(v):
return v[..., :(S - 2), :]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0]
return u00 * v00_conj
else:
def slice_V(v):
return v[..., :, :(S - 2)]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0].conj()
return u00 * v00_conj
test_cases1 = ( # some=True (default)
# loss functions for complex-valued svd have to be "gauge invariant",
# i.e. loss functions shouldn't change when sigh of the singular vectors change.
# the simplest choice to satisfy this requirement is to apply 'abs'.
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: usv[1]), # 'check_grad_s'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[0])), # 'check_grad_u'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[2])), # 'check_grad_v'
# this test is important as it checks the additional term that is non-zero only for complex-valued inputs
# and when the loss function depends both on 'u' and 'v'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
uv_loss), # 'check_grad_uv'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device),
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'
)
test_cases2 = ( # some=False
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'
)
out = []
for a, out_fn in test_cases1:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': False}
else:
kwargs = {'some': True}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
for a, out_fn in test_cases2:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': True}
else:
kwargs = {'some': False}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
return out
def sample_inputs_svd(op_info, device, dtype, requires_grad=False):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)
def sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)
def sample_inputs_pinverse(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.pinverse with distinct singular values so that autograd is always stable.
Implementation of torch.pinverse depends on torch.svd, therefore it's sufficient to check only square S x S matrix
and the batched (3 x S x S) input.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
test_cases = (
random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device), # pinverse
random_fullrank_matrix_distinct_singular_value(S, 3, dtype=dtype).to(device), # pinverse 'batched'
)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_flip(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
samples = [SampleInput(tensor, kwargs={'dims': dim}) for tensor, dim in product(tensors, dims)]
return samples
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_diag(op_info, device, dtype, requires_grad):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor, args=arg))
return samples + [vec_sample]
def sample_inputs_logit(op_info, device, dtype, requires_grad):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad))),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad))),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(bernoulli_scalar().to(device),
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad))),
)
return samples
def sample_inputs_masked_select(op_info, device, dtype, requires_grad):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',
dtypes=[torch.cfloat, torch.cdouble]),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.cfloat, torch.cdouble, torch.bfloat16]),
SkipInfo('TestCommon', 'test_jit_alias_remapping',
dtypes=[torch.cfloat, torch.cdouble, torch.bfloat16]),
),
test_inplace_grad=False,
assert_autodiffed=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
handles_complex_extremals=False,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
default_test_dtypes=[torch.long, torch.half, torch.bfloat16, torch.float32, torch.cfloat],
skip_bfloat16_grad=True,
assert_autodiffed=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
ref=np.arccosh,
domain=(1, float('inf')),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
test_inplace_grad=False,
skips=(
# RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16'
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
OpInfo('addmm',
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
# BFloat16 support on CUDA requires CUDA 11 and SM53
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.bfloat16, torch.float16, torch.cfloat, torch.cdouble]),),
sample_inputs_func=sample_inputs_addmm),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
# Reference: https://github.com/pytorch/pytorch/issues/50747
test_inplace_grad=False,
skips=(
SkipInfo('TestCommon', 'test_variant_consistency_jit',
dtypes=[torch.float16, torch.cfloat, torch.cdouble, torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/50747
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),),
sample_inputs_func=sample_inputs_addr),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
assert_autodiffed=True,
skip_bfloat16_grad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS)
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
test_inplace_grad=False,
skips=(
# RuntimeError: "rsqrt_cuda" | |
#En cmd se ejecuta con los siguientes comandos
#py -3.1 "C:\Users\juanz\Google Drive\Semestre 6\Laboratorio Electronica Digital\ProyectoTurnero\Clases.py"
import sys
import time
import serial
import pygame
#--------------------------------------------------------------
#---- Inicia --- ClassTurnosDisponibles -----------------------
class ClassTurnosDisponibles():
"""docstring for TurnosDisponibles
Esta Clase va a administrar los turnos que se van a utilizar
en el proyecto, tiene capacidad para
"""
ContadorTurnos = 0
LimiteTurnos = 0
def __init__(self):
self.ListaTurnosDisponibles = []
def UtilizarTurno(self):
if len(self.ListaTurnosDisponibles) == 0:
if self.ContadorTurnos < self.LimiteTurnos:
self.ContadorTurnos += 1
if str(self.ContadorTurnos)[-1] == "0":
if str(self.ContadorTurnos)[-2] == "0":
self.ContadorTurnos += 1
return self.ContadorTurnos
else:
return 0
else:
return self.ListaTurnosDisponibles.pop(-1)
#self.PrintEstado()
def DevolverTurno(self, Turno):
if Turno != 0:
self.ListaTurnosDisponibles.append(Turno)
def PrintEstado(self):
print("ContadorTurnos = %d \nLimiteTurnos = %d \nListaTurnosDisponibles = %s" % (self.ContadorTurnos,self.LimiteTurnos,str(self.ListaTurnosDisponibles)))
#---- Termina --- ClassTurnosDisponibles ----------------------
#--------------------------------------------------------------
#--------------------------------------------------------------
#1 -> 1, 100 -> 1, 1000 -> 1, 10000 -> 1
def ValueTo100(valor):
valorstr = str(valor)
return int(valorstr[len(valorstr)-2:len(valorstr)])
#--------------------------------------------------------------
#--------------------------------------------------------------
def IntTo8Bytes(valor):
VectorBytes = bytearray(8)
valor_temp = valor
Terminar = False
if valor_temp==0 or valor_temp > 128:
print("ERROR - El valor ingresado no es correcto " + str(valor))
else:
while not Terminar:
if valor_temp == 0:
Terminar = True
if valor_temp < 128:
if valor_temp < 64:
if valor_temp < 32:
if valor_temp < 16:
if valor_temp < 8:
if valor_temp < 4:
if valor_temp < 2:
if valor_temp == 1:
VectorBytes[7] = 1
valor_temp -= 1
else:
Terminar = True
else:
VectorBytes[6] = 1
valor_temp -= 2
else:
VectorBytes[5] = 1
valor_temp -= 4
else:
VectorBytes[4] = 1
valor_temp -= 8
else:
VectorBytes[3] = 1
valor_temp -= 16
else:
VectorBytes[2] = 1
valor_temp -= 32
else:
VectorBytes[1] = 1
valor_temp -= 64
else:
VectorBytes[0] = 1
valor_temp -= 128
return(VectorBytes)
#--------------------------------------------------------------
def IntToHexa(valor):
if valor == 0:
return b'\x00'
elif valor == 0:
return b'\x00'
elif valor == 1:
return b'\x01'
elif valor == 2:
return b'\x02'
elif valor == 3:
return b'\x03'
elif valor == 4:
return b'\x04'
elif valor == 5:
return b'\x05'
elif valor == 6:
return b'\x06'
elif valor == 7:
return b'\x07'
elif valor == 8:
return b'\x08'
elif valor == 9:
return b'\x09'
elif valor == 10:
return b'\x0A'
elif valor == 11:
return b'\x0B'
elif valor == 12:
return b'\x0C'
elif valor == 13:
return b'\x0D'
elif valor == 14:
return b'\x0E'
elif valor == 15:
return b'\x0F'
elif valor == 16:
return b'\x10'
elif valor == 17:
return b'\x11'
elif valor == 18:
return b'\x12'
elif valor == 19:
return b'\x13'
elif valor == 20:
return b'\x14'
elif valor == 21:
return b'\x15'
elif valor == 22:
return b'\x16'
elif valor == 23:
return b'\x17'
elif valor == 24:
return b'\x18'
elif valor == 25:
return b'\x19'
elif valor == 26:
return b'\x1A'
elif valor == 27:
return b'\x1B'
elif valor == 28:
return b'\x1C'
elif valor == 29:
return b'\x1D'
elif valor == 30:
return b'\x1E'
elif valor == 31:
return b'\x1F'
elif valor == 32:
return b'\x20'
elif valor == 33:
return b'\x21'
elif valor == 34:
return b'\x22'
elif valor == 35:
return b'\x23'
elif valor == 36:
return b'\x24'
elif valor == 37:
return b'\x25'
elif valor == 38:
return b'\x26'
elif valor == 39:
return b'\x27'
elif valor == 40:
return b'\x28'
elif valor == 41:
return b'\x29'
elif valor == 42:
return b'\x2A'
elif valor == 43:
return b'\x2B'
elif valor == 44:
return b'\x2C'
elif valor == 45:
return b'\x2D'
elif valor == 46:
return b'\x2E'
elif valor == 47:
return b'\x2F'
elif valor == 48:
return b'\x30'
elif valor == 49:
return b'\x31'
elif valor == 50:
return b'\x32'
elif valor == 51:
return b'\x33'
elif valor == 52:
return b'\x34'
elif valor == 53:
return b'\x35'
elif valor == 54:
return b'\x36'
elif valor == 55:
return b'\x37'
elif valor == 56:
return b'\x38'
elif valor == 57:
return b'\x39'
elif valor == 58:
return b'\x3A'
elif valor == 59:
return b'\x3B'
elif valor == 60:
return b'\x3C'
elif valor == 61:
return b'\x3D'
elif valor == 62:
return b'\x3E'
elif valor == 63:
return b'\x3F'
elif valor == 64:
return b'\x40'
elif valor == 65:
return b'\x41'
elif valor == 66:
return b'\x42'
elif valor == 67:
return b'\x43'
elif valor == 68:
return b'\x44'
elif valor == 69:
return b'\x45'
elif valor == 70:
return b'\x46'
elif valor == 71:
return b'\x47'
elif valor == 72:
return b'\x48'
elif valor == 73:
return b'\x49'
elif valor == 74:
return b'\x4A'
elif valor == 75:
return b'\x4B'
elif valor == 76:
return b'\x4C'
elif valor == 77:
return b'\x4D'
elif valor == 78:
return b'\x4E'
elif valor == 79:
return b'\x4F'
elif valor == 80:
return b'\x50'
elif valor == 81:
return b'\x51'
elif valor == 82:
return b'\x52'
elif valor == 83:
return b'\x53'
elif valor == 84:
return b'\x54'
elif valor == 85:
return b'\x55'
elif valor == 86:
return b'\x56'
elif valor == 87:
return b'\x57'
elif valor == 88:
return b'\x58'
elif valor == 89:
return b'\x59'
elif valor == 90:
return b'\x5A'
elif valor == 91:
return b'\x5B'
elif valor == 92:
return b'\x5C'
elif valor == 93:
return b'\x5D'
elif valor == 94:
return b'\x5E'
elif valor == 95:
return b'\x5F'
elif valor == 96:
return b'\x60'
elif valor == 97:
return b'\x61'
elif valor == 98:
return b'\x62'
elif valor == 99:
return b'\x63'
elif valor == 100:
return b'\x64'
elif valor == 101:
return b'\x65'
elif valor == 102:
return b'\x66'
elif valor == 103:
return b'\x67'
elif valor == 104:
return b'\x68'
elif valor == 105:
return b'\x69'
elif valor == 106:
return b'\x6A'
elif valor == 107:
return b'\x6B'
elif valor == 108:
return b'\x6C'
elif valor == 109:
return b'\x6D'
elif valor == 110:
return b'\x6E'
elif valor == 111:
return b'\x6F'
elif valor == 112:
return b'\x70'
elif valor == 113:
return b'\x71'
elif valor == 114:
return b'\x72'
elif valor == 115:
return b'\x73'
elif valor == 116:
return b'\x74'
elif valor == 117:
return b'\x75'
elif valor == 118:
return b'\x76'
elif valor == 119:
return b'\x77'
elif valor == 120:
return b'\x78'
elif valor == 121:
return b'\x79'
elif valor == 122:
return b'\x7A'
elif valor == 123:
return b'\x7B'
elif valor == 124:
return b'\x7C'
elif valor == 125:
return b'\x7D'
elif valor == 126:
return b'\x7E'
elif valor == 127:
return b'\x7F'
elif valor == 128:
return b'\x80'
elif valor == 129:
return b'\x81'
elif valor == 130:
return b'\x82'
elif valor == 131:
return b'\x83'
elif valor == 132:
return b'\x84'
elif valor == 133:
return b'\x85'
elif valor == 134:
return b'\x86'
elif valor == 135:
return b'\x87'
elif valor == 136:
return b'\x88'
elif valor == 137:
return b'\x89'
elif valor == 138:
return b'\x8A'
elif valor == 139:
return b'\x8B'
elif valor == 140:
return b'\x8C'
elif valor == 141:
return b'\x8D'
elif valor == 142:
return b'\x8E'
elif valor == 143:
return b'\x8F'
elif valor == 144:
return b'\x90'
elif valor == 145:
return b'\x91'
elif valor == 146:
return b'\x92'
elif valor == 147:
return b'\x93'
elif valor == 148:
return b'\x94'
elif valor == 149:
return b'\x95'
elif valor == 150:
return b'\x96'
elif valor == 151:
return b'\x97'
elif valor == 152:
return b'\x98'
elif valor == 153:
return b'\x99'
elif valor == 154:
return b'\x9A'
elif valor == 155:
return b'\x9B'
elif valor == 156:
return b'\x9C'
elif valor == 157:
return b'\x9D'
elif valor == 158:
return b'\x9E'
elif valor == 159:
return b'\x9F'
elif valor == 160:
return b'\xA0'
elif valor == 161:
return b'\xA1'
elif valor == 162:
return b'\xA2'
elif valor == 163:
return b'\xA3'
elif valor == 164:
return b'\xA4'
elif valor == 165:
return b'\xA5'
elif valor == 166:
return b'\xA6'
elif valor == 167:
return b'\xA7'
elif valor == | |
<filename>source/tomopy/util/extern/recon.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015-2019. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""
Module for recon library wrappers.
"""
import numpy as np
import tomopy.util.dtype as dtype
from . import c_shared_lib
from .accel import c_accel_mlem
from .accel import c_accel_sirt
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2015, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['c_project',
'c_project2',
'c_project3',
'c_art',
'c_bart',
'c_fbp',
'c_mlem',
'c_osem',
'c_ospml_hybrid',
'c_ospml_quad',
'c_pml_hybrid',
'c_pml_quad',
'c_sirt',
'c_tv',
'c_grad',
'c_tikh',
'c_vector',
'c_vector2',
'c_vector3']
LIB_TOMOPY_RECON = c_shared_lib("libtomopy-recon")
def c_project(obj, center, tomo, theta):
# TODO: we should fix this elsewhere...
# TOMO object must be contiguous for c function to work
contiguous_tomo = np.require(tomo, requirements="AC")
if len(obj.shape) == 2:
# no y-axis (only one slice)
oy = 1
ox, oz = obj.shape
else:
oy, ox, oz = obj.shape
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.project.restype = dtype.as_c_void_p()
LIB_TOMOPY_RECON.project(
dtype.as_c_float_p(obj),
dtype.as_c_int(oy),
dtype.as_c_int(ox),
dtype.as_c_int(oz),
dtype.as_c_float_p(contiguous_tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta))
tomo[:] = contiguous_tomo[:]
def c_project2(objx, objy, center, tomo, theta):
# TODO: we should fix this elsewhere...
# TOMO object must be contiguous for c function to work
contiguous_tomo = np.require(tomo, requirements="AC")
if len(objx.shape) == 2:
# no y-axis (only one slice)
oy = 1
ox, oz = objx.shape
else:
oy, ox, oz = objx.shape
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.project2.restype = dtype.as_c_void_p()
LIB_TOMOPY_RECON.project2(
dtype.as_c_float_p(objx),
dtype.as_c_float_p(objy),
dtype.as_c_int(oy),
dtype.as_c_int(ox),
dtype.as_c_int(oz),
dtype.as_c_float_p(contiguous_tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta))
tomo[:] = contiguous_tomo[:]
def c_project3(objx, objy, objz, center, tomo, theta, axis):
# TODO: we should fix this elsewhere...
# TOMO object must be contiguous for c function to work
contiguous_tomo = np.require(tomo, requirements="AC")
if len(objx.shape) == 2:
# no y-axis (only one slice)
oy = 1
ox, oz = objx.shape
else:
oy, ox, oz = objx.shape
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.project3.restype = dtype.as_c_void_p()
LIB_TOMOPY_RECON.project3(
dtype.as_c_float_p(objx),
dtype.as_c_float_p(objy),
dtype.as_c_float_p(objz),
dtype.as_c_int(oy),
dtype.as_c_int(ox),
dtype.as_c_int(oz),
dtype.as_c_float_p(contiguous_tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_int(axis))
tomo[:] = contiguous_tomo[:]
def c_art(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.art.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.art(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']))
def c_bart(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.bart.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.bart(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_int(kwargs['num_block']),
dtype.as_c_float_p(kwargs['ind_block'])) # TODO: I think this should be int_p
def c_fbp(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.fbp.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.fbp(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_char_p(kwargs['filter_name']),
dtype.as_c_float_p(kwargs['filter_par'])) # filter_par
def c_mlem(tomo, center, recon, theta, **kwargs):
if kwargs['accelerated']:
return c_accel_mlem(tomo, center, recon, theta, **kwargs)
else:
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.mlem.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.mlem(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']))
def c_osem(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.osem.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.osem(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_int(kwargs['num_block']),
dtype.as_c_float_p(kwargs['ind_block'])) # TODO: should be int?
def c_ospml_hybrid(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.ospml_hybrid.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.ospml_hybrid(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_float_p(kwargs['reg_par']),
dtype.as_c_int(kwargs['num_block']),
dtype.as_c_float_p(kwargs['ind_block'])) # TODO: should be int?
def c_ospml_quad(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.ospml_quad.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.ospml_quad(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_float_p(kwargs['reg_par']),
dtype.as_c_int(kwargs['num_block']),
dtype.as_c_float_p(kwargs['ind_block'])) # TODO: should be int?
def c_pml_hybrid(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.pml_hybrid.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.pml_hybrid(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_float_p(kwargs['reg_par']))
def c_pml_quad(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.pml_quad.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.pml_quad(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_float_p(kwargs['reg_par']))
def c_sirt(tomo, center, recon, theta, **kwargs):
if kwargs['accelerated']:
return c_accel_sirt(tomo, center, recon, theta, **kwargs)
else:
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.sirt.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.sirt(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']))
def c_tv(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.tv.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.tv(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_float_p(kwargs['reg_par']))
def c_grad(tomo, center, recon, theta, **kwargs):
if len(tomo.shape) == 2:
# no y-axis (only one slice)
dy = 1
dt, dx = tomo.shape
else:
dy, dt, dx = tomo.shape
LIB_TOMOPY_RECON.grad.restype = dtype.as_c_void_p()
return LIB_TOMOPY_RECON.grad(
dtype.as_c_float_p(tomo),
dtype.as_c_int(dy),
dtype.as_c_int(dt),
dtype.as_c_int(dx),
dtype.as_c_float_p(center),
dtype.as_c_float_p(theta),
dtype.as_c_float_p(recon),
dtype.as_c_int(kwargs['num_gridx']),
dtype.as_c_int(kwargs['num_gridy']),
dtype.as_c_int(kwargs['num_iter']),
dtype.as_c_float_p(kwargs['reg_par']))
def c_tikh(tomo, | |
start = time.time()
self.mode = mode
self.sampling_mode = sampling_mode
self.negative_size = negative_size
self.max_pos_size = max_pos_size
self.expand_factor = expand_factor
self.cache_refresh_time = cache_refresh_time
self.normalize_embed = normalize_embed
self.test_topk = test_topk
self.node_features = graph_dataset.g_full.ndata['x']
full_graph = graph_dataset.g_full.to_networkx()
train_node_ids = graph_dataset.train_node_ids
roots = [node for node in full_graph.nodes() if full_graph.in_degree(node) == 0]
if len(roots) > 1:
self.root = len(full_graph.nodes)
for r in roots:
full_graph.add_edge(self.root, r)
root_vector = torch.mean(self.node_features[roots], dim=0, keepdim=True)
self.node_features = torch.cat((self.node_features, root_vector), 0)
self.vocab = graph_dataset.vocab + ['root', 'leaf']
train_node_ids.append(self.root)
else:
self.root = roots[0]
self.vocab = graph_dataset.vocab + ['leaf']
self.full_graph = full_graph
if mode == 'train':
# add pseudo leaf node to core graph
self.core_subgraph = self._get_holdout_subgraph(train_node_ids)
self.pseudo_leaf_node = len(full_graph.nodes)
for node in list(self.core_subgraph.nodes()):
self.core_subgraph.add_edge(node, self.pseudo_leaf_node)
self.leaf_nodes = [node for node in self.core_subgraph.nodes() if self.core_subgraph.out_degree(node) == 1]
# for pseudo leaf node
leaf_vector = torch.zeros((1, self.node_features.size(1))) # zero vector works best
self.node_features = torch.cat((self.node_features, leaf_vector), 0)
if self.normalize_embed:
self.node_features = F.normalize(self.node_features, p=2, dim=1)
# add interested node list and subgraph
# remove supersource nodes (i.e., nodes without in-degree 0)
interested_node_set = set(train_node_ids) - set([self.root])
self.node_list = list(interested_node_set)
# build node2pos, node2nbs, node2edge
self.node2pos, self.node2edge = {}, {}
self.node2parents, self.node2children, self.node2nbs = {}, {}, {self.pseudo_leaf_node:[]}
for node in interested_node_set:
parents = set(self.core_subgraph.predecessors(node))
children = set(self.core_subgraph.successors(node))
if len(children) > 1:
children = [i for i in children if i != self.pseudo_leaf_node]
node_pos_edges = [(pre, suc) for pre in parents for suc in children if pre!=suc]
self.node2edge[node] = set(self.core_subgraph.in_edges(node)).union(set(self.core_subgraph.out_edges(node)))
self.node2pos[node] = node_pos_edges
self.node2parents[node] = parents
self.node2children[node] = children
self.node2nbs[node] = parents.union(children)
self.node2nbs[self.root] = set([n for n in self.core_subgraph.successors(self.root) if n != self.pseudo_leaf_node])
self.valid_node_list = graph_dataset.validation_node_ids
holdout_subgraph = self._get_holdout_subgraph(graph_dataset.train_node_ids + graph_dataset.validation_node_ids)
self.valid_node2pos = self._find_insert_posistion(graph_dataset.validation_node_ids, holdout_subgraph)
self.test_node_list = graph_dataset.test_node_ids
holdout_subgraph = self._get_holdout_subgraph(graph_dataset.train_node_ids + graph_dataset.test_node_ids)
self.test_node2pos = self._find_insert_posistion(graph_dataset.test_node_ids, holdout_subgraph)
# used for sampling negative positions during train/validation stage
self.pointer = 0
self.all_edges = list(self._get_candidate_positions(self.core_subgraph))
self.edge2dist = {(u, v): nx.shortest_path_length(self.core_subgraph, u, v) for (u, v) in self.all_edges}
random.shuffle(self.all_edges)
elif mode == 'test':
# add pseudo leaf node to core graph
self.core_subgraph = self.full_graph
self.pseudo_leaf_node = len(full_graph.nodes)
self.node_list = list(self.core_subgraph.nodes())
for node in self.node_list:
self.core_subgraph.add_edge(node, self.pseudo_leaf_node)
self.leaf_nodes = [node for node in self.core_subgraph.nodes() if
self.core_subgraph.out_degree(node) == 1]
# for pseudo leaf node
leaf_vector = torch.zeros((1, self.node_features.size(1))) # zero vector works best
self.node_features = torch.cat((self.node_features, leaf_vector), 0)
if self.normalize_embed:
self.node_features = F.normalize(self.node_features, p=2, dim=1)
# used for sampling negative positions during train/validation stage
self.all_edges = list(self._get_candidate_positions(self.core_subgraph))
end = time.time()
print(f"Finish loading dataset ({end - start} seconds)")
def __str__(self):
return f"{self.__class__.__name__} mode:{self.mode}"
def __len__(self):
return len(self.node_list)
def __getitem__(self, idx):
""" Generate an data instance based on train/validation/test mode.
One data instance is a list of (anchor_egonet, query_node_feature, label) triplets.
If self.sampling_mode == 0:
This list may contain more than one triplets with label = 1
If self.sampling_mode == 1:
This list contain one and ONLY one triplet with label = 1, others have label = 0
"""
res = []
query_node = self.node_list[idx]
# generate positive triplet(s)
if self.sampling_mode == 0:
pos_positions = self.node2pos[query_node]
if len(pos_positions) > self.max_pos_size and self.mode == 'train':
pos_positions = random.sample(pos_positions, k=self.max_pos_size)
for u, v in pos_positions:
res.append([u, v, query_node, (1, 1, 1, 1)])
elif self.sampling_mode > 0:
u, v = random.choice(self.node2pos[query_node])
res.append([u, v, query_node, (1, 1, 1, 1)])
# select negative parents
negative_size = len(res) if self.negative_size == -1 else self.negative_size
negative_anchors = self._get_negative_anchors(query_node, negative_size)
# generate negative triplets
for u, v in negative_anchors:
u_flag = int(u in self.node2parents[query_node])
v_flag = int(v in self.node2children[query_node])
e_flag = int(self.edge2dist[(u, v)] <= 2)
res.append([u, v, query_node, (0, u_flag, v_flag, e_flag)])
return tuple(res)
def _get_holdout_subgraph(self, node_ids):
node_to_remove = [n for n in self.full_graph.nodes if n not in node_ids]
subgraph = self.full_graph.subgraph(node_ids).copy()
for node in node_to_remove:
parents = set()
children = set()
ps = deque(self.full_graph.predecessors(node))
cs = deque(self.full_graph.successors(node))
while ps:
p = ps.popleft()
if p in subgraph:
parents.add(p)
else:
ps += list(self.full_graph.predecessors(p))
while cs:
c = cs.popleft()
if c in subgraph:
children.add(c)
else:
cs += list(self.full_graph.successors(c))
for p in parents:
for c in children:
subgraph.add_edge(p, c)
# remove jump edges
node2descendants = {n: set(descendants(subgraph, n)) for n in subgraph.nodes}
for node in subgraph.nodes():
if subgraph.out_degree(node) > 1:
successors1 = set(subgraph.successors(node))
successors2 = set(chain.from_iterable([node2descendants[n] for n in successors1]))
checkset = successors1.intersection(successors2)
if checkset:
for s in checkset:
subgraph.remove_edge(node, s)
return subgraph
def _get_candidate_positions(self, graph):
node2descendants = {n: set(descendants(graph, n)) for n in graph.nodes}
candidates = set(chain.from_iterable([[(n, d) for d in ds] for n, ds in node2descendants.items()]))
return candidates
def _find_insert_posistion(self, node_ids, holdout_graph, ignore=[]):
node2pos = {}
subgraph = self.core_subgraph
for node in node_ids:
if node in ignore:
continue
parents = set()
children = set()
ps = deque(holdout_graph.predecessors(node))
cs = deque(holdout_graph.successors(node))
while ps:
p = ps.popleft()
if p in subgraph:
parents.add(p)
else:
ps += list(holdout_graph.predecessors(p))
while cs:
c = cs.popleft()
if c in subgraph:
children.add(c)
else:
cs += list(holdout_graph.successors(c))
if not children:
children.add(self.pseudo_leaf_node)
position = [(p, c) for p in parents for c in children if p!=c]
node2pos[node] = position
return node2pos
def _get_negative_anchors(self, query_node, negative_size):
if self.sampling_mode == 0:
return self._get_at_most_k_negatives(query_node, negative_size)
elif self.sampling_mode == 1:
return self._get_exactly_k_negatives(query_node, negative_size)
def _get_at_most_k_negatives(self, query_node, negative_size):
""" Generate AT MOST negative_size samples for the query node
"""
if self.pointer == 0:
random.shuffle(self.all_edges)
while True:
negatives = [ele for ele in self.all_edges[self.pointer: self.pointer + negative_size] if
ele not in self.node2pos[query_node] and ele not in self.node2edge[query_node]]
if len(negatives) > 0:
break
self.pointer += negative_size
if self.pointer >= len(self.all_edges):
self.pointer = 0
return negatives
def _get_exactly_k_negatives(self, query_node, negative_size, ignore=[]):
""" Generate EXACTLY negative_size samples for the query node
"""
if self.pointer == 0:
random.shuffle(self.all_edges)
negatives = []
while len(negatives) != negative_size:
n_lack = negative_size - len(negatives)
negatives.extend([ele for ele in self.all_edges[self.pointer: self.pointer + n_lack] if
ele not in self.node2pos[query_node] and ele not in self.node2edge[query_node] and ele not in ignore])
self.pointer += n_lack
if self.pointer >= len(self.all_edges):
self.pointer = 0
random.shuffle(self.all_edges)
if len(negatives) > negative_size:
negatives = negatives[:negative_size]
return negatives
class GraphDataset(RawDataset):
def __init__(self, graph_dataset, mode="train", sampling_mode=1, negative_size=32, max_pos_size=100,
expand_factor=64, cache_refresh_time=128, normalize_embed=False, test_topk=-1):
super(GraphDataset, self).__init__(graph_dataset, mode, sampling_mode, negative_size, max_pos_size,
expand_factor, cache_refresh_time, normalize_embed, test_topk)
# used for caching local subgraphs
self.cache = {} # if g = self.cache[anchor_node], then g is the egonet centered on the anchor_node
self.cache_counter = {} # if n = self.cache[anchor_node], then n is the number of times you used this cache
lg = dgl.DGLGraph()
lg.add_nodes(1, {"_id": torch.tensor([self.pseudo_leaf_node]), "pos": torch.tensor([1])})
lg.add_edges(lg.nodes(), lg.nodes())
self.cache[self.pseudo_leaf_node] = lg
def __getitem__(self, idx):
""" Generate an data instance based on train/validation/test mode.
One data instance is a list of (anchor_egonet, query_node_feature, label) triplets.
If self.sampling_mode == 0:
This list may contain more than one triplets with label = 1
If self.sampling_mode == 1:
This list contain one and ONLY one triplet with label = 1, others have label = 0
"""
res = []
query_node = self.node_list[idx]
# generate positive triplet(s)
if self.sampling_mode == 0:
pos_positions = self.node2pos[query_node]
if len(pos_positions) > self.max_pos_size and self.mode == 'train':
pos_positions = random.sample(pos_positions, k=self.max_pos_size)
for u, v in pos_positions:
u_egonet, v_egonet = self._get_subgraph_and_node_pair(query_node, u, v)
res.append([u, v, u_egonet, v_egonet, query_node, (1, 1, 1, 1)])
elif self.sampling_mode > 0:
u, v = random.choice(self.node2pos[query_node])
u_egonet, v_egonet = self._get_subgraph_and_node_pair(query_node, u, v)
res.append([u, v, u_egonet, v_egonet, query_node, (1, 1, 1, 1)])
# select negative parents
negative_size = len(res) if self.negative_size == -1 else self.negative_size
negative_anchors = self._get_negative_anchors(query_node, negative_size)
# generate negative triplets
for u, v in negative_anchors:
u_egonet, v_egonet = self._get_subgraph_and_node_pair(query_node, u, v)
u_flag = int(u in self.node2parents[query_node])
v_flag = int(v in self.node2children[query_node])
e_flag = int(self.edge2dist[(u, v)] <= 2)
res.append([u, v, u_egonet, v_egonet, query_node, (0, u_flag, v_flag, e_flag)])
return tuple(res)
def _check_cache_flag(self, node):
return (node in self.cache) and (self.cache_counter[node] < self.cache_refresh_time)
def _get_subgraph_and_node_pair(self, query_node, anchor_node_u, anchor_node_v):
""" Generate anchor_egonet and obtain query_node feature
instance_mode: 0 means negative example, 1 means positive example
"""
# [IMPORTANT]
# if anchor_node_u == self.pseudo_leaf_node:
# return self.cache[anchor_node_u]
if anchor_node_u == self.pseudo_leaf_node:
g_u = self.cache[anchor_node_u]
else:
u_cache_flag = self._check_cache_flag(anchor_node_u)
u_flag = ((query_node < 0) or (anchor_node_u not | |
#!/usr/bin/env python
'''
This scripts uses python 3 and the following libraries need to be installed 'pandas', 'ete3' and 'argparse' installed.
The blastn file needs NO modification. As long as blastn format 6 output with options
"query.id", "query.length", "pident", "subject.id", "subject.GBid", "evalue", "bit.score","staxids", "sscinames", "sblastnames", "qcovs", "qcovhsp"
are used
If you use the taxonly option because you do not want to or do not have the ASV/OTU table, the output taxonomy file may be missing some entries compared to the number of ASVs/OTUs you are expecting. This is because some ASVs/OTUs will either have no hits with blastn or that no taxonomy could be returned (e.g. minimun coverage not respected, conflicting taxonomy at Kingdom level, etc.).
'''
import pandas as pd
import csv
from ete3 import NCBITaxa
import re
import argparse
import numpy as np
import sys
import time
from tqdm import tqdm
# Parser program
def handle_program_options():
"""Parses the given options passed in at the command line."""
parser = argparse.ArgumentParser(
description='Takes blastn multiple hits, trim uncultured or unidentified hits or environmental samples, assign taxo to feature based on percent similarity (for species) and Last Common Ancestor method')
parser.add_argument('-b', "--btbl", required=True,
help='blastn format 6 output with options "query.id", "query.length", "pident", "subject.id", "subject.GBid", "evalue", "bit.score","staxids", "sscinames", "sblastnames", "qcovs", "qcovhsp", [REQUIRED]')
parser.add_argument("-f", "--ftbl", required=False,
help="Feature id table in txt format [REQUIRED]")
parser.add_argument("-o", "--output_ftbl", required=True,
help="Output path for the transformed feature_id table. [REQUIRED]")
parser.add_argument('--minSim', default=97, type=int,
help='Minimum similarity to assign species hits (default: 97)')
parser.add_argument('--minCov', default=80, type=int,
help='Minimum coverage to keep hits (default: 80)')
parser.add_argument('--update', default="False", type=str,
help='Should the taxonomy database be updated')
parser.add_argument('--pident', default='no', type=str,
help='To reduce taxonomy assingment according to default percent identity thresholds. Options are: before or after LCA assingment')
parser.add_argument('--pgenus', default=95, type=int,
help='Minimum similarity to assign genus (default: 95)')
parser.add_argument('--pfamily', default=87, type=int,
help='Minimum similarity to assign family (default: 87)')
parser.add_argument('--porder', default=83, type=int,
help='Minimum similarity to assign order (default: 83)')
parser.add_argument('--pclass', default=81, type=int,
help='Minimum similarity to assign class (default: 81)')
parser.add_argument('--pphylum', default=79, type=int,
help='Minimum similarity to assign phylum (default: 79)')
parser.add_argument('--pkingdom', default=71, type=int,
help='Minimum similarity to assign kingdom (default: 71)')
parser.add_argument('--taxonly', default="False", type=str,
help='Do not require the ASV/OTU table')
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
# Progress bar
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
# Function to retrieve taxonomy
def get_desired_ranks(ncbi, taxid, desired_ranks):
lineage = ncbi.get_lineage(taxid)
names = ncbi.get_taxid_translator(lineage)
lineage2ranks = ncbi.get_rank(names)
ranks2lineage = dict((rank, taxid)
for (taxid, rank) in lineage2ranks.items())
taxo = []
for rank in desired_ranks:
if rank in ranks2lineage:
taxo.append(names[ranks2lineage[rank]])
else:
taxo.append("NA")
return ";".join(taxo)
# Insert taxonomy into blast table
def taxo_assignment(tabl, dict_blast):
print('\nAssigning full taxonomy to blast table')
taxo = []
for index, row in tabl.iterrows():
taxo.append(dict_blast[tabl.loc[index]['staxids']])
print('\nAssigning full taxonomy to blast table is completed')
return taxo
# PIDENT
# Function to reduce taxonomy assignment depending on pident value
def pidentThresholds(row):
# print(row.taxonomy)
if row['pident'] < pkingdom:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus',
'species']] = "NA", "NA", "NA", "NA", "NA", "NA", "NA"
return row
elif row['pident'] < pphylum:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus',
'species']] = row["kingdom"], "NA", "NA", "NA", "NA", "NA", "NA"
return row
elif row['pident'] < pclass:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus', 'species']
] = row["kingdom"], row['phylum'], "NA", "NA", "NA", "NA", "NA"
return row
elif row['pident'] < porder:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus', 'species']
] = row["kingdom"], row['phylum'], row['class'], "NA", "NA", "NA", "NA"
return row
elif row['pident'] < pfamily:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus', 'species']
] = row["kingdom"], row['phylum'], row['class'], row['order'], "NA", "NA", "NA"
return row
elif row['pident'] < pgenus:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus', 'species']
] = row["kingdom"], row['phylum'], row['class'], row['order'], row['family'], "NA", "NA"
return row
elif row['pident'] < minSim:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus', 'species']
] = row["kingdom"], row['phylum'], row['class'], row['order'], row['family'], row['genus'], "NA"
return row
else:
row[["kingdom", 'phylum', 'class', 'order', 'family', 'genus', 'species']
] = row["kingdom"], row['phylum'], row['class'], row['order'], row['family'], row['genus'], row['species']
return row
# LCA
# If similarity of best hit => 97%, assign to species level, otherwise assign to last common ancestor
def taxo_consensus(tabl, tabl2, minSim):
print('\nKeeping species name if %similarity is >= minSim (default 97%) otherwise find LCA\n')
new = tabl
#new['species'] = ["" if new[new.index == ind]["pident"].iat[0] < minSim else new[new.index == ind]["species"].iat[0] for ind in new.index]
def Remove(sets):
sets.discard("")
return(sets)
rankLevel = 0
listRanks = ['species', 'genus', 'family', 'order',
'class', 'phylum', 'kingdom', 'superkingdom']
#t0 = time.time()
for i in tqdm(range(len(listRanks))):
#t1 = time.time()
#progress(rankLevel,len(listRanks[i]), status = "Progress")
for query, row in tqdm(new.iterrows()):
setTaxo = set(tabl2[tabl2['query.id'] == query][listRanks[i]])
setTaxo = Remove(setTaxo)
if row['pident'] < minSim and len(setTaxo) > 1:
new.loc[query, listRanks[i]] = ""
x = rankLevel
while x > 0:
new.loc[query, listRanks[i-x]] = ""
x -= 1
elif row['pident'] < minSim:
s = list(setTaxo)
s = ['' if v is None else v for v in s]
s = ''.join(str(s))
new.loc[query, listRanks[i]] = s
rankLevel += 1
#t2 = time.time()
#print(" {}s (estimated remaining time: {}m,s)".format(t2-t1, t2-t0))
for query, row in new.iterrows():
a = new.columns.get_loc('superkingdom')
b = new.columns.get_loc('species')
c = str(row[a:b + 1].str.cat(sep=';'))
c = c.replace("{", "").replace("}", "").replace("[", "").replace("]", "").replace("'", "").replace(
" ,", "").replace(", ", "").replace(",", "").replace("NA", "").replace("nan", "")
c = re.sub(' sp\..*', ' sp.', c)
# need to correspond to number of ranks...
c = re.sub('^;;;;;;;', 'Unknown', c)
new.loc[query, 'taxonomy'] = c
return new
# Functions for taxo assignment based on any of the 3 options
def pident_bef_LCA(b_trimmed, mS):
b_trimmed = b_trimmed[b_trimmed.taxonomy != "NA"]
# LCA assingment. If similarity of best hit => 97%, assign to species level, otherwise assign to last common ancestor
b_trimmed = b_trimmed.apply(pidentThresholds, axis=1)
b_trimmed = b_trimmed.replace(r'NA', "", regex=True)
dummy2 = b_trimmed.groupby('query.id', group_keys=False).apply(
lambda x: x.loc[x.evalue.idxmin()])
f_btbl = taxo_consensus(dummy2, b_trimmed, mS)
print('\nPident trimming and LCA completed')
return f_btbl
def LCA_bef_pident(b_trimmed, mS):
# LCA assingment. If similarity of best hit => 97%, assign to species level, otherwise assign to last common ancestor
b_trimmed = b_trimmed.replace(r'NA', np.nan, regex=True)
dummy2 = b_trimmed.groupby('query.id', group_keys=False).apply(
lambda x: x.loc[x.evalue.idxmin()])
f_btbl = taxo_consensus(dummy2, b_trimmed, mS)
# Pident thresholds
f_btbl = f_btbl[f_btbl.taxonomy != "NA"]
f_btbl = f_btbl.apply(pidentThresholds, axis=1)
#f_btbl = f_btbl.replace([None], ['NA'], regex=True)
f_btbl = f_btbl.replace(r'^\s*$', 'NA', regex = True)
f_btbl = tt.replace(np.nan, 'NA', regex=True)
f_btbl['taxonomy'] = f_btbl[['superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']].apply(lambda x: ';'.join(x), axis=1)
f_btbl['taxonomy'] = f_btbl.taxonomy.str.replace("[", "").str.replace("]", "").str.replace("'", "").str.replace(" ,", "").str.replace(", ", "").str.replace(",", "").str.replace("nan", "").str.replace("NA", "").str.replace("^;;;;;;;", "Unknown")
print('\nLCA and Pident trimming completed')
return f_btbl
def LCA_only(b_trimmed, mS):
# LCA assingment. If similarity of best hit => 97%, assign to species level, otherwise assign to last common ancestor
b_trimmed = b_trimmed.replace(r'NA', np.nan, regex=True)
dummy2 = b_trimmed.groupby('query.id', group_keys=False).apply(
lambda x: x.loc[x.evalue.idxmin()])
f_btbl = taxo_consensus(dummy2, b_trimmed, mS)
print('\nLCA completed')
return f_btbl
# Assign taxonomy to each feature-id
def blast_to_feature_tbl(btbl, ftbl):
print('\nAssign full taxonomy to each feature-id')
new_ftbl = ftbl
new_ftbl['taxonomy'] = ""
new_ftbl["Percent_Identity"] = ""
new_ftbl["Sequence_coverage"] = ""
for query, row in new_ftbl.iterrows():
if row['#OTU ID'] in list(btbl['query.id']):
otu_x = row['#OTU ID']
a = btbl[btbl['query.id'] == otu_x]['taxonomy'].iat[0]
b = btbl[btbl['query.id'] == otu_x]['pident'].iat[0]
c = btbl[btbl['query.id'] == otu_x]['qcovs'].iat[0]
new_ftbl.at[query, 'taxonomy'] = a
new_ftbl.at[query, 'Percent_Identity'] = round(b)
new_ftbl.at[query, 'Sequence_coverage'] = round(c)
else:
new_ftbl.at[query, 'taxonomy'] = "No hit"
# To return only taxo assingment information
new_ftbl = new_ftbl[["#OTU ID", "taxonomy",
"Percent_Identity", "Sequence_coverage"]]
return new_ftbl
# Create taxonomy table only
def blast_to_taxonomy_tbl(btbl, ftbl):
print('\nAssign full taxonomy to each feature-id')
#new_ftbl = ftbl["ASVs"].to_frame(name=None)
new_ftbl = ftbl["ASVs"].to_frame()
new_ftbl['taxonomy'] = ""
new_ftbl["Percent_Identity"] = ""
new_ftbl["Sequence_coverage"] = ""
for query, row in new_ftbl.iterrows():
if row['ASVs'] in list(btbl['query.id']):
otu_x = row['ASVs']
a = btbl[btbl['query.id'] == otu_x]['taxonomy'].iat[0]
b = btbl[btbl['query.id'] == otu_x]['pident'].iat[0]
c = btbl[btbl['query.id'] == otu_x]['qcovs'].iat[0]
new_ftbl.at[query, 'taxonomy'] = a
new_ftbl.at[query, 'Percent_Identity'] = round(b)
new_ftbl.at[query, 'Sequence_coverage'] = round(c)
else:
new_ftbl.at[query, 'taxonomy'] = "No hit"
# To return only taxo assingment information
new_ftbl = new_ftbl[["ASVs", "taxonomy",
"Percent_Identity", "Sequence_coverage"]]
return new_ftbl
############################## MAIN #################################################
def main():
args = handle_program_options()
minSim = args.minSim
minCov = args.minCov
taxonly = args.taxonly.lower()
update = args.update.lower()
blast = pd.read_table(args.btbl,
names=["query.id", "query.length", "pident", "subject.id", "subject.GBid", "evalue",
"bit.score", "staxids", "sscinames", "sblastnames", "qcovs", | |
dwCookie: The connection cookie previously returned from
System.Runtime.InteropServices.UCOMIConnectionPoint.Advise(System.Object,System.
Int32@).
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIConnectionPointContainer:
""" Use System.Runtime.InteropServices.ComTypes.IConnectionPointContainer instead. """
def EnumConnectionPoints(self, ppEnum):
"""
EnumConnectionPoints(self: UCOMIConnectionPointContainer) -> UCOMIEnumConnectionPoints
Creates an enumerator of all the connection points supported in the connectable
object, one connection point per IID.
"""
pass
def FindConnectionPoint(self, riid, ppCP):
"""
FindConnectionPoint(self: UCOMIConnectionPointContainer, riid: Guid) -> (Guid, UCOMIConnectionPoint)
Asks the connectable object if it has a connection point for a particular IID,
and if so, returns the IConnectionPoint interface pointer to that connection
point.
riid: A reference to the outgoing interface IID whose connection point is being
requested.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIEnumConnectionPoints:
""" Use System.Runtime.InteropServices.ComTypes.IEnumConnectionPoints instead. """
def Clone(self, ppenum):
"""
Clone(self: UCOMIEnumConnectionPoints) -> UCOMIEnumConnectionPoints
Creates another enumerator that contains the same enumeration state as the
current one.
"""
pass
def Next(self, celt, rgelt, pceltFetched):
"""
Next(self: UCOMIEnumConnectionPoints, celt: int) -> (int, Array[UCOMIConnectionPoint], int)
Retrieves a specified number of items in the enumeration sequence.
celt: The number of IConnectionPoint references to return in rgelt.
Returns: S_OK if the pceltFetched parameter equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def Reset(self):
"""
Reset(self: UCOMIEnumConnectionPoints) -> int
Resets the enumeration sequence to the beginning.
Returns: An HRESULT with the value S_OK.
"""
pass
def Skip(self, celt):
"""
Skip(self: UCOMIEnumConnectionPoints, celt: int) -> int
Skips over a specified number of items in the enumeration sequence.
celt: The number of elements to skip in the enumeration.
Returns: S_OK if the number of elements skipped equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIEnumConnections:
""" Use System.Runtime.InteropServices.ComTypes.IEnumConnections instead. """
def Clone(self, ppenum):
"""
Clone(self: UCOMIEnumConnections) -> UCOMIEnumConnections
Creates another enumerator that contains the same enumeration state as the
current one.
"""
pass
def Next(self, celt, rgelt, pceltFetched):
"""
Next(self: UCOMIEnumConnections, celt: int) -> (int, Array[CONNECTDATA], int)
Retrieves a specified number of items in the enumeration sequence.
celt: The number of System.Runtime.InteropServices.CONNECTDATA structures to return
in rgelt.
Returns: S_OK if the pceltFetched parameter equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def Reset(self):
"""
Reset(self: UCOMIEnumConnections)
Resets the enumeration sequence to the beginning.
"""
pass
def Skip(self, celt):
"""
Skip(self: UCOMIEnumConnections, celt: int) -> int
Skips over a specified number of items in the enumeration sequence.
celt: The number of elements to skip in the enumeration.
Returns: S_OK if the number of elements skipped equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIEnumMoniker:
""" Use System.Runtime.InteropServices.ComTypes.IEnumMoniker instead. """
def Clone(self, ppenum):
"""
Clone(self: UCOMIEnumMoniker) -> UCOMIEnumMoniker
Creates another enumerator that contains the same enumeration state as the
current one.
"""
pass
def Next(self, celt, rgelt, pceltFetched):
"""
Next(self: UCOMIEnumMoniker, celt: int) -> (int, Array[UCOMIMoniker], int)
Retrieves a specified number of items in the enumeration sequence.
celt: The number of monikers to return in rgelt.
Returns: S_OK if the pceltFetched parameter equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def Reset(self):
"""
Reset(self: UCOMIEnumMoniker) -> int
Resets the enumeration sequence to the beginning.
Returns: An HRESULT with the value S_OK.
"""
pass
def Skip(self, celt):
"""
Skip(self: UCOMIEnumMoniker, celt: int) -> int
Skips over a specified number of items in the enumeration sequence.
celt: The number of elements to skip in the enumeration.
Returns: S_OK if the number of elements skipped equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIEnumString:
""" Use System.Runtime.InteropServices.ComTypes.IEnumString instead. """
def Clone(self, ppenum):
"""
Clone(self: UCOMIEnumString) -> UCOMIEnumString
Creates another enumerator that contains the same enumeration state as the
current one.
"""
pass
def Next(self, celt, rgelt, pceltFetched):
"""
Next(self: UCOMIEnumString, celt: int) -> (int, Array[str], int)
Retrieves a specified number of items in the enumeration sequence.
celt: The number of strings to return in rgelt.
Returns: S_OK if the pceltFetched parameter equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def Reset(self):
"""
Reset(self: UCOMIEnumString) -> int
Resets the enumeration sequence to the beginning.
Returns: An HRESULT with the value S_OK.
"""
pass
def Skip(self, celt):
"""
Skip(self: UCOMIEnumString, celt: int) -> int
Skips over a specified number of items in the enumeration sequence.
celt: The number of elements to skip in the enumeration.
Returns: S_OK if the number of elements skipped equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIEnumVARIANT:
""" Use System.Runtime.InteropServices.ComTypes.IEnumVARIANT instead. """
def Clone(self, ppenum):
"""
Clone(self: UCOMIEnumVARIANT, ppenum: int)
Creates another enumerator that contains the same enumeration state as the
current one.
ppenum: On successful return, a reference to the newly created enumerator.
"""
pass
def Next(self, celt, rgvar, pceltFetched):
"""
Next(self: UCOMIEnumVARIANT, celt: int, rgvar: int, pceltFetched: int) -> int
Retrieves a specified number of items in the enumeration sequence.
celt: The number of elements to return in rgelt.
rgvar: On successful return, a reference to the enumerated elements.
pceltFetched: On successful return, a reference to the actual number of elements enumerated
in rgelt.
Returns: S_OK if the pceltFetched parameter equals the celt parameter; otherwise,
S_FALSE.
"""
pass
def Reset(self):
"""
Reset(self: UCOMIEnumVARIANT) -> int
Resets the enumeration sequence to the beginning.
Returns: An HRESULT with the value S_OK.
"""
pass
def Skip(self, celt):
"""
Skip(self: UCOMIEnumVARIANT, celt: int) -> int
Skips over a specified number of items in the enumeration sequence.
celt: The number of elements to skip in the enumeration.
Returns: S_OK if the number of elements skipped equals celt parameter; otherwise,
S_FALSE.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class UCOMIMoniker:
""" Use System.Runtime.InteropServices.ComTypes.IMoniker instead. """
def BindToObject(self, pbc, pmkToLeft, riidResult, ppvResult):
"""
BindToObject(self: UCOMIMoniker, pbc: UCOMIBindCtx, pmkToLeft: UCOMIMoniker, riidResult: Guid) -> (Guid, object)
Uses the moniker to bind to the object it identifies.
pbc: A reference to the IBindCtx interface on the bind context object used in this
binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker, if the moniker is part
of a composite moniker.
riidResult: The interface identifier (IID) of the interface the client intends to use to
communicate with the object that the moniker identifies.
"""
pass
def BindToStorage(self, pbc, pmkToLeft, riid, ppvObj):
"""
BindToStorage(self: UCOMIMoniker, pbc: UCOMIBindCtx, pmkToLeft: UCOMIMoniker, riid: Guid) -> (Guid, object)
Retrieves an interface pointer to the storage that contains the object
identified by the moniker.
pbc: A reference to the IBindCtx interface on the bind context object used during
this binding operation.
pmkToLeft: A reference to the moniker to the left of this moniker, if the moniker is part
of a composite moniker.
riid: The interface identifier (IID) of the storage interface requested.
"""
pass
def CommonPrefixWith(self, pmkOther, ppmkPrefix):
"""
CommonPrefixWith(self: UCOMIMoniker, | |
to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_options_namespaced_pod_12" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_options_namespaced_pod_12`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_options_namespaced_pod_12`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `proxy_options_namespaced_pod_12`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/pods/{name}/{path}'.replace('{format}', 'json')
method = 'OPTIONS'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
if 'path' in params:
path_params['path'] = params['path']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_get_namespaced_service(self, namespace, name, **kwargs):
"""
proxy GET requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_get_namespaced_service(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_get_namespaced_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_get_namespaced_service`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_get_namespaced_service`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_head_namespaced_service(self, namespace, name, **kwargs):
"""
proxy HEAD requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_head_namespaced_service(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_head_namespaced_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_head_namespaced_service`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_head_namespaced_service`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}'.replace('{format}', 'json')
method = 'HEAD'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_put_namespaced_service(self, namespace, name, **kwargs):
"""
proxy PUT requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_put_namespaced_service(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_put_namespaced_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_put_namespaced_service`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_put_namespaced_service`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_post_namespaced_service(self, namespace, name, **kwargs):
"""
proxy POST requests to Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_post_namespaced_service(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the Service (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_post_namespaced_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `proxy_post_namespaced_service`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_post_namespaced_service`")
resource_path = '/api/v1/proxy/namespaces/{namespace}/services/{name}'.replace('{format}', 'json')
method = 'POST'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response | |
"""Test CZT package.
To run:
pytest test_czt.py -v
To run (with coverage):
pytest --cov . --cov-report html test_czt.py
"""
import numpy as np
import matplotlib.pyplot as plt
import pytest
import scipy
import czt
def test_compare_different_czt_methods(debug=False):
print("Compare different CZT calculation methods")
# Create time-domain signal
t = np.arange(0, 20e-3, 1e-4)
x = _signal_model(t)
# Calculate CZT using different methods
X_czt0 = _czt(x)
X_czt1 = czt.czt(x, simple=True)
X_czt2 = czt.czt(x, t_method='ce')
X_czt3 = czt.czt(x, t_method='pd')
X_czt4 = czt.czt(x, t_method='mm')
X_czt5 = czt.czt(x, t_method='scipy')
X_czt6 = czt.czt(x, t_method='ce', f_method='recursive')
X_czt7 = czt.czt(x, t_method='pd', f_method='recursive')
# Try unsupported t_method
with pytest.raises(ValueError):
czt.czt(x, t_method='unsupported_t_method')
# Try unsupported f_method
with pytest.raises(ValueError):
czt.czt(x, t_method='ce', f_method='unsupported_f_method')
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary component")
plt.plot(X_czt1.imag, label="simple")
plt.plot(X_czt2.imag, label="ce")
plt.plot(X_czt3.imag, label="pd")
plt.plot(X_czt4.imag, label="mm")
plt.plot(X_czt5.imag, label="scipy")
plt.plot(X_czt6.imag, label="ce / recursive")
plt.plot(X_czt7.imag, label="pd / recursive")
plt.legend()
plt.figure()
plt.title("Real component")
plt.plot(X_czt1.real, label="simple")
plt.plot(X_czt2.real, label="ce")
plt.plot(X_czt3.real, label="pd")
plt.plot(X_czt4.real, label="mm")
plt.plot(X_czt5.real, label="scipy")
plt.plot(X_czt6.real, label="ce / recursive")
plt.plot(X_czt7.real, label="pd / recursive")
plt.legend()
plt.figure()
plt.title("Absolute value")
plt.plot(np.abs(X_czt1), label="simple")
plt.plot(np.abs(X_czt2), label="ce")
plt.plot(np.abs(X_czt3), label="pd")
plt.plot(np.abs(X_czt4), label="mm")
plt.plot(np.abs(X_czt5), label="scipy")
plt.plot(np.abs(X_czt6), label="ce / recursive")
plt.plot(np.abs(X_czt7), label="pd / recursive")
plt.legend()
plt.show()
# Compare Toeplitz matrix multiplication methods
np.testing.assert_almost_equal(X_czt0, X_czt1, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt2, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt3, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt4, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt5, decimal=12)
# Compare FFT methods
np.testing.assert_almost_equal(X_czt1, X_czt6, decimal=12)
np.testing.assert_almost_equal(X_czt1, X_czt7, decimal=12)
def test_compare_czt_fft_dft(debug=False):
print("Compare CZT, FFT and DFT")
# Create time-domain signal
t = np.arange(0, 20e-3 + 1e-10, 1e-4)
x = _signal_model(t)
dt = t[1] - t[0]
fs = 1 / dt
# Frequency sweep
f = np.fft.fftshift(np.fft.fftfreq(len(t)) * fs)
# CZT (defaults to FFT settings)
X_czt = np.fft.fftshift(czt.czt(x))
# FFT
X_fft = np.fft.fftshift(np.fft.fft(x))
# DFT (defaults to FFT settings)
_, X_dft = czt.dft(t, x)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(f, X_czt.imag, label='CZT')
plt.plot(f, X_fft.imag, label='FFT', ls='--')
plt.plot(f, X_dft.imag, label='DFT', ls='--')
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(f, X_czt.real, label='CZT')
plt.plot(f, X_fft.real, label='FFT', ls='--')
plt.plot(f, X_dft.real, label='DFT', ls='--')
plt.legend()
plt.figure()
plt.title("Absolute")
plt.plot(f, np.abs(X_czt), label='CZT')
plt.plot(f, np.abs(X_fft), label='FFT', ls='--')
plt.plot(f, np.abs(X_dft), label='DFT', ls='--')
plt.legend()
plt.show()
# Compare
np.testing.assert_almost_equal(X_czt, X_fft, decimal=12)
np.testing.assert_almost_equal(X_czt, X_dft, decimal=12)
def test_czt_to_iczt(debug=False):
print("Test CZT -> ICZT")
# Create time-domain signal
t = np.arange(0, 20e-3, 1e-4)
x = _signal_model(t)
# CZT (defaults to FFT)
X_czt = czt.czt(x)
# ICZT
x_iczt1 = czt.iczt(X_czt)
x_iczt2 = czt.iczt(X_czt, simple=False)
# Try unsupported t_method
with pytest.raises(ValueError):
czt.iczt(X_czt, simple=False, t_method='unsupported_t_method')
# Try M != N
with pytest.raises(ValueError):
czt.iczt(X_czt, simple=False, N=len(X_czt)+1)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t*1e3, x.imag)
plt.plot(t*1e3, x_iczt1.imag)
plt.plot(t*1e3, x_iczt2.imag)
plt.figure()
plt.title("Real")
plt.plot(t*1e3, x.real)
plt.plot(t*1e3, x_iczt1.real)
plt.plot(t*1e3, x_iczt2.real)
plt.show()
# Compare
np.testing.assert_almost_equal(x, x_iczt1, decimal=12)
np.testing.assert_almost_equal(x, x_iczt2, decimal=12)
def test_time_to_freq_to_time(debug=False):
print("Test time -> freq -> time")
# Create time-domain data
t1 = np.arange(0, 20e-3, 1e-4)
x1 = _signal_model(t1)
# Frequency domain
f, X = czt.time2freq(t1, x1)
# Back to time domain
t2, x2 = czt.freq2time(f, X, t=t1)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t1, x1.imag, 'k', label='Original')
plt.plot(t2, x2.imag, 'r', label='Recovered')
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(t1, x1.real, 'k', label='Original')
plt.plot(t2, x2.real, 'r', label='Recovered')
plt.legend()
plt.show()
# Compare
np.testing.assert_almost_equal(x1, x2, decimal=12)
def test_compare_iczt_idft(debug=False):
print("Compare ICZT and IDFT")
# Create time-domain signal
t = np.arange(0, 20e-3, 1e-4)
x = _signal_model(t)
# Frequency domain using DFT
f, X = czt.dft(t, x)
# Get time-domain using ICZT
_, x_iczt = czt.freq2time(f, X, t)
# Get time-domain using IDFT
_, x_idft = czt.idft(f, X, t)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t, x.imag, 'k', label="Original")
plt.plot(t, x_iczt.imag, 'g:', label="ICZT")
plt.plot(t, x_idft.imag, 'r--', label="IDFT")
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(t, x.real, 'k', label="Original")
plt.plot(t, x_iczt.real, 'g:', label="ICZT")
plt.plot(t, x_idft.real, 'r--', label="IDFT")
plt.legend()
plt.figure()
plt.title("Real: error")
plt.plot(t, x_iczt.real - x.real, 'k', label="Original")
plt.show()
# Compare
np.testing.assert_almost_equal(x_iczt, x, decimal=12)
np.testing.assert_almost_equal(x_idft, x, decimal=12)
np.testing.assert_almost_equal(x_iczt, x_idft, decimal=12)
def test_frequency_zoom(debug=False):
print("Test frequency-domain zoom")
# Create time-domain signal
t = np.arange(0, 20e-3 + 1e-10, 1e-4)
x = _signal_model(t)
dt = t[1] - t[0]
# Standard FFT frequency range
f = np.fft.fftshift(np.fft.fftfreq(len(t), dt))
# DFT
f, X_dft1 = czt.dft(t, x, f=f)
# CZT
f, X_czt1 = czt.time2freq(t, x, f=f)
# Truncate
idx1, idx2 = 110, 180
f_zoom = f[idx1:idx2]
X_czt1, X_dft1 = X_czt1[idx1:idx2], X_dft1[idx1:idx2]
# Zoom DFT
_, X_dft2 = czt.dft(t, x, f_zoom)
# Zoom CZT
_, X_czt2 = czt.time2freq(t, x, f_zoom)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(f_zoom, np.imag(X_czt1), 'c', label='CZT')
plt.plot(f_zoom, np.imag(X_dft1), 'k--', label='DFT')
plt.plot(f_zoom, np.imag(X_czt2), 'r--', label='CZT (zoom)')
plt.plot(f_zoom, np.imag(X_dft2), 'b:', label='DFT (zoom)')
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(f_zoom, np.real(X_czt1), 'c', label='CZT')
plt.plot(f_zoom, np.real(X_dft1), 'k--', label='DFT')
plt.plot(f_zoom, np.real(X_czt2), 'r--', label='CZT (zoom)')
plt.plot(f_zoom, np.real(X_dft2), 'b:', label='DFT (zoom)')
plt.legend()
plt.figure()
plt.title("Absolute")
plt.plot(f_zoom, np.abs(X_czt1), 'c', label='CZT')
plt.plot(f_zoom, np.abs(X_dft1), 'k--', label='DFT')
plt.plot(f_zoom, np.abs(X_czt2), 'r--', label='CZT (zoom)')
plt.plot(f_zoom, np.abs(X_dft2), 'b:', label='DFT (zoom)')
plt.legend()
plt.show()
# Compare
np.testing.assert_almost_equal(X_czt1, X_czt2, decimal=12)
np.testing.assert_almost_equal(X_czt1, X_dft1, decimal=12)
np.testing.assert_almost_equal(X_czt1, X_dft2, decimal=12)
def test_time_zoom(debug=False):
print("Test time-domain zoom")
# Create time-domain data
t = np.arange(0, 20e-3 + 1e-10, 1e-4)
x = _signal_model(t)
# dt = t[1] - t[0]
# Generate frequency-domain signal using DFT
f, X = czt.dft(t, x)
# Time domain
t1, x1 = czt.freq2time(f, X, t=t)
# Time domain: zoom
mask = (0.001 <= t1) & (t1 <= 0.002)
t2, x2 = czt.freq2time(f, X, t=t1[mask])
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t, np.imag(x), 'k-', label='Original')
plt.plot(t1, np.imag(x1), 'ro:', label='freq2time: full')
plt.plot(t2, np.imag(x2), 'bv-', label='freq2time: zoom')
plt.xlim([0, 0.003])
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(t, np.real(x), 'k-', label='Original')
plt.plot(t1, np.real(x1), 'ro:', label='freq2time: full')
plt.plot(t2, np.real(x2), 'bv-', label='freq2time: zoom')
plt.xlim([0, 0.003])
plt.legend()
plt.show()
# Compare
np.testing.assert_almost_equal(x, x1, decimal=12)
np.testing.assert_almost_equal(x[mask], x2, decimal=12)
def test_compare_czt_to_analytic_expression(debug=False):
print("Compare CZT to analytic expression")
# Create time-domain data
t = np.linspace(0, 50, 10001) * 1e-3
x = _signal_model(t)
# CZT
f, X_czt = czt.time2freq(t, x)
# Build frequency domain signal
X = _signal_model_f(f, len(t))
# Transform back to time-domain
_, x_iczt = czt.freq2time(f, X_czt, t=t)
# Truncate
mask = (0 < f) & (f < 5e3)
f, X, X_czt = f[mask], X[mask], X_czt[mask]
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Freq-Domain: Imaginary")
plt.plot(f/1e3, X_czt.imag, label="CZT")
plt.plot(f/1e3, X.imag, 'r--', label="Analytic")
plt.legend()
plt.figure()
plt.title("Freq-Domain: Real")
plt.plot(f / 1e3, X_czt.real, label="CZT")
plt.plot(f / 1e3, X.real, 'r--', label="Analytic")
plt.legend()
plt.figure()
plt.title("Freq-Domain: Absolute")
plt.plot(f / 1e3, np.abs(X_czt), label="CZT")
plt.plot(f / 1e3, np.abs(X), 'r--', label="Analytic")
plt.legend()
plt.show()
# Compare
np.testing.assert_allclose(X, X_czt, atol=0.1)
np.testing.assert_almost_equal(x, x_iczt, decimal=12)
def _signal_model(tt):
"""Generate time-domain signal for tests.
Exponentially decaying sine wave with distortion from higher-order
frequencies.
Args:
tt (np.ndarray): time sweep
Returns:
np.ndarray: time-domain signal
"""
output = (1.0 * np.sin(2 * np.pi * 1e3 * tt) +
0.3 * np.sin(2 * np.pi * 2e3 * tt) +
0.1 * np.sin(2 * np.pi * 3e3 * tt)) * np.exp(-1e3 * tt)
return output
def _signal_model_f(ff, t_npts):
"""Generate frequency-domain response for tests.
Build frequency-domain signal by convolving frequency components.
This is the frequency-domain response of _signal_model
Args:
ff (np.ndarray): frequency sweep
t_npts (int): number of points in time-domain signal
Returns:
np.ndarray: frequency-domain signal
"""
X1 = np.zeros_like(ff, dtype=complex)
idx = np.abs(ff - 1e3).argmin()
X1[idx] = 1 / 2j
idx = np.abs(ff + 1e3).argmin()
X1[idx] = -1 / 2j
idx = np.abs(ff - 2e3).argmin()
X1[idx] = 0.3 / 2j
idx = np.abs(ff + 2e3).argmin()
X1[idx] = -0.3 / 2j
idx = np.abs(ff - 3e3).argmin()
X1[idx] = 0.1 / 2j
idx = np.abs(ff + 3e3).argmin()
X1[idx] = -0.1 / 2j
X2 = 1 / (1e3 + 2j * np.pi * ff)
X = np.convolve(X1, X2)
X = X[len(X) // 4:-len(X) // 4 + 1]
X *= (ff[1] - ff[0]) * t_npts
return X
def _czt(x, M=None, W=None, A=1.0):
"""Calculate CZT (Stripped down to the basics)."""
# Unpack arguments
N = len(x)
if M is None:
M = N
if W is None:
W = np.exp(-2j * np.pi / M)
A = np.complex128(A)
W = np.complex128(W)
# CZT algorithm
k = np.arange(max(M, N))
Wk22 = W ** (-(k ** 2) / 2)
r = Wk22[:N]
c = Wk22[:M]
X = A ** -k[:N] * x / r
X = scipy.linalg.matmul_toeplitz((c, r), X)
X /= c
return X
if __name__ == "__main__":
test_compare_different_czt_methods(debug=True)
# test_compare_czt_fft_dft(debug=True)
# test_czt_to_iczt(debug=True)
| |
<reponame>benjamindeleener/brainhack_sc_detection
#!/usr/bin/env python
# check if needed Python libraries are already installed or not
import os
import getopt
import commands
import math
import sys
import scipy
import scipy.signal
import scipy.fftpack
import pylab as pl
import sct_utils as sct
from sct_nurbs import *
from sct_utils import fsloutput
try:
import nibabel
except ImportError:
print '--- nibabel not installed! Exit program. ---'
sys.exit(2)
try:
import numpy as np
except ImportError:
print '--- numpy not installed! Exit program. ---'
sys.exit(2)
#=======================================================================================================================
# class definition
#=======================================================================================================================
class label_class:
def __init__(self,contrast):
# PATH AND FILE NAME FOR ANATOMICAL IMAGE
self.input_path = '/Users/taduv_admin/data/Vertebralabeling/t1/errsm_03_C1-T12/'
self.input_centerline = 'segmentation_centerline_binary'
self.input_anat = 'errsm_03_t1.nii.gz'
# PATH FOR OUTPUT
self.output_path = '/Users/taduv_admin/data/Vertebralabeling/t1/errsm_03_C1-T12/labelling/'
self.output_labled_centerline = ''
self.input_surface = 'segmentation_binary' # optional
# =======================================================
self.shift_AP = 17 # shift the centerline on the spine in mm default : 17 mm
self.size_AP = 6 # mean around the centerline in the anterior-posterior direction in mm
self.size_RL = 5 # mean around the centerline in the right-left direction in mm
self.verbose = 1 # display figures
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
contrast = 'T1'
label = label_class(contrast)
try:
opts, args = getopt.getopt(sys.argv[1:],'hi:c:s')
except getopt.GetoptError as err:
print str(err)
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-i'):
label.input_anat = arg
elif opt in ('-c'):
label.output_labled_centerline = arg
elif opt in ('-s'):
label.output_labled_surface = arg
# Display usage if a mandatory argument is not provided
if label.input_anat == '' or label.output_labled_centerline == '':
print '\n \n All mandatory arguments are not provided \n \n'
usage()
if contrast == 'T1':
labeling_vertebrae_T1(label)
else:
labeling_vertebrae_T2(label)
#=======================================================================================================================
# labeling_vertebrae_T1 function
#=======================================================================================================================
def labeling_vertebrae_T1(label):
input_anat = label.input_path + label.input_anat + '.nii'
if label.segmentation_do==1:
input_centerline = label.output_path + label.segmentation_centerline + '.nii'
input_surface = label.output_path + label.segmentation_surface + '.nii'
else:
input_centerline = label.input_path + label.input_centerline + '.nii'
input_surface = label.input_path + label.input_surface + '.nii'
output_centerline_vertebra = label.output_path + label.output_labled_centerline
output_surface_vertebra = label.output_path + label.output_labled_surface
surface_do = label.surface_do
# check existence of input files
sct.check_file_exist(input_anat)
# extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(input_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(input_centerline)
# convert to nii
#print '\nCopy input data...'
#sct.run('cp ' + input_anat + ' tmp.anat' + ext_anat)
#sct.run('fslchfiletype NIFTI tmp.anat')
#sct.run('cp ' + input_centerline + ' tmp.centerline' + ext_centerline)
#sct.run('fslchfiletype NIFTI tmp.centerline')
#==================================================
# Reorientation of the data if needed
#==================================================
command = 'fslhd ' + input_anat
result = commands.getoutput(command)
orientation = result[result.find('qform_xorient')+15] + result[result.find('qform_yorient')+15] + result[result.find('qform_zorient')+15]
if orientation!='ASR':
print '\nReorient input volume to AP SI RL orientation...'
sct.run(sct.fsloutput + 'fslswapdim tmp.anat AP SI RL tmp.anat_orient')
sct.run(sct.fsloutput + 'fslswapdim tmp.centerline AP SI RL tmp.centerline_orient')
#load_images
anat_file = nibabel.load('tmp.anat_orient.nii')
anat = anat_file.get_data()
hdr = anat_file.get_header()
dims = hdr['dim']
scales = hdr['pixdim']
#if surface_do==1:
#surface_file = nibabel.load(input_surface_reorient)
#surface = surface_file.get_data()
centerline_file = nibabel.load('tmp.centerline_orient.nii')
centerline = centerline_file.get_data()
else:
# loading images
anat_file = nibabel.load(input_anat)
anat = anat_file.get_data()
hdr = anat_file.get_header()
dims = hdr['dim']
scales = hdr['pixdim']
#if surface_do==1:
#surface_file = nibabel.load(input_surface)
#surface = surface_file.get_data()
centerline_file = nibabel.load(input_centerline)
centerline = centerline_file.get_data()
#==================================================
# Calculation of the profile intensity
#==================================================
shift_AP = label.shift_AP*scales[1]
size_AP = label.size_AP*scales[1]
size_RL = label.size_RL*scales[3]
np.uint16(anat)
X,Y,Z = np.where(centerline>0)
#centerline = [anat[X[i]][Y[i]][Z[i]] for i in range(len(X))]
j = np.argsort(Y)
y = Y[j]
x = X[j]
z = Z[j]
#eliminating double in y
index=0
for i in range(len(y)-1):
if y[i]==y[i+1]:
if index==0:
index_double = i
else:
index_double = np.resize(index_double,index+1)
index_double[index] = i
index = index + 1
mask = np.ones(len(y), dtype=bool)
mask[index_double] = False
y = y[mask]
x = x[mask]
z = z[mask]
#shift the centerline to the spine of shift_AP
x1 = np.round(x-shift_AP/scales[1])
#build intensity profile along the centerline
I = np.zeros((len(y),1))
for index in range(len(y)):
lim_plus = index + 5
lim_minus = index - 5
if lim_minus<0:
lim_minus = 0
if lim_plus>=len(x1):
lim_plus = len(x1) - 1
# normal vector of the orthogonal plane to the centerline i.e tangent vector to the centerline
Vx = x1[lim_plus] - x1[lim_minus]
Vz = z[lim_plus] - z[lim_minus]
Vy = y[lim_plus] - y[lim_minus]
d = Vx*x1[index] + Vy*y[index] + Vz*z[index]
for i_slice_RL in range(2*np.int(round(size_RL/scales[3]))):
for i_slice_AP in range(2*np.int(round(size_AP/scales[1]))):
result = (d - Vx*(x1[index] + i_slice_AP - size_AP - 1) - Vz*z[index])/Vy
if result > anat.shape[1]:
result = anat.shape[1]
I[index] = I[index] + anat[np.int(round(x1[index]+i_slice_AP - size_AP - 1)),np.int(round(result)),np.int(round(z[index] + i_slice_RL - size_RL - 1))]
# Detrending Intensity
start_centerline_y = y[0]
X = np.where(I==0)
mask2 = np.ones((len(y),1), dtype=bool)
mask2[X,0] = False
#I = I[mask2]
if label.verbose==1:
pl.plot(I)
pl.xlabel('direction superior-inferior')
pl.ylabel('intensity')
pl.title('Intensity profile along the shifted spinal cord centerline')
pl.show()
#from scipy.interpolate import UnivariateSpline
#fit_detrend = UnivariateSpline(np.arange(len(I[:,0])),I[:,0])
#P_detrend = fit_detrend(np.arange(len(I[:,0])))
#popt, pcov = scipy.optimize.curve_fit(func,np.arange(len(I[:,0])),I[:,0],p0=None)
#P_fit = func(np.arange(len(I[:,0])), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5])
#popt = np.polyfit(np.arange(len(I[:,0])),I[:,0],9)
#P_fit = np.poly1d(popt)
#a = np.arange(len(I[:,0]))
#b = np.zeros(len(I[:,0]))
#print a ,I[:,0]
#nurbs = NURBS(3,len(a)+100,[[a[n],I[n,0],b[n]] for n in range(len(I[:,0]))])
#P = nurbs.getCourbe3D()
#I_detrend = np.zeros((len(I[:,0]),1))
#I_detrend[:,0] = I[:,0] - P[0]
#I_detrend[:,0] = I[:,0] - P_fit(np.arange(len(I[:,0])))
#I_detrend = scipy.signal.detrend(I,axis=0)
#if len(I)*scales[1]<(300/scales[1]):
#I_detrend = j_detrend_new_v2(I.T,5,'cos',1)
#else:
#I_detrend = j_detrend_new_v2(I.T,20,'cos',1)
# index_maxima = 0
# count = 0
# for i in range(len(I[:,0])):
# if i==0:
# if I[i,0]>I[i+1,0]:
# index_maxima = i
# count = count + 1
# elif i==(len(I[:,0])-1):
# if I[i,0]<I[i-1,0]:
# index_maxima = np.resize(index_maxima,count+1)
# index_maxima[len(index_maxima)-1] = i
# else:
# if I[i,0]>I[i+1,0]:
# if I[i,0]>I[i-1,0]:
# index_maxima = np.resize(index_maxima,count+1)
# index_maxima[len(index_maxima)-1] = i
# count = count + 1
#
# mean_maxima = np.mean(I[index_maxima,0])
# threshold = np.amin(I[index_maxima,0]) + (np.amax(I[index_maxima,0]) - np.amin(I[index_maxima,0]))/2
# indices = np.array(np.where(I[index_maxima,0]>threshold))
#
# weights = np.ones(len(I[:,0]))*float(1/float(len(I[:,0])-(len(indices.T))))
# weights[index_maxima] = 0
# #weights[index_maxima+1] = 0
# #weights[index_maxima-1] = 0
#
# tck = scipy.interpolate.splrep(np.arange(len(I[:,0])),I[:,0],w = weights ,xb=None, xe=None, k=3, task=0, s=60000, t=None, full_output=0, per=0, quiet=1)
# P_fit = scipy.interpolate.splev(np.arange(len(I[:,0])),tck,der=0,ext=0)
# frequency = scipy.fftpack.fftfreq(len(I[:,0]), d=1)
# Fc = 20
# Fs = 2*np.amax(frequency)
# h = scipy.signal.firwin(numtaps=N, cutoff=np.amax(frequency)/10, window='hann',pass_zero=True, nyq=Fs/2)
# P_fit=scipy.signal.lfilter(h, 1.0, I[:,0])
frequency = scipy.fftpack.fftfreq(len(I[:,0]), d=1)
z = np.abs(scipy.fftpack.fft(I[:,0], n=None, axis=-1, overwrite_x=False))
# print z.shape,frequency.shape
# pl.plot(frequency,z)
# pl.show()
# N, Wn = scipy.signal.buttord(wp = np.amax(frequency)/10, ws = (np.amax(frequency)/10)+ 0.2, gpass = 0.1, gstop = 50, analog=False)
# print N, Wn
# b, a = scipy.signal.cheby2(N, 20, Wn, btype='low', analog=False, output='ba')
Wn = np.amax(frequency)/10
N = 5 #Order of the filter
# b, a = scipy.signal.butter(N, Wn, btype='low', analog=False, output='ba')
b, a = scipy.signal.iirfilter(N, Wn, rp=None, rs=None, btype='low', analog=False, ftype='bessel', output='ba')
I_fit = scipy.signal.filtfilt(b, a, I[:,0], axis=-1, padtype='constant', padlen=None)
pl.plot(I[:,0])
pl.plot(I_fit)
pl.show()
I_detrend = np.zeros((len(I[:,0]),1))
I_detrend[:,0] = I[:,0] - I_fit
I_detrend = I_detrend/(np.amax(I_detrend))
if label.verbose==1:
pl.plot(I_detrend[:,0])
pl.xlabel('direction superior-inferior')
pl.ylabel('intensity')
pl.title('Intensity profile along the shifted spinal cord centerline after detrending and basic normalization')
pl.show()
info_1 = input('Is the more rostral vertebrae the C1 or C2 one? if yes, enter 1 otherwise 0:')
if info_1==0:
level_start = input('enter the level of the more rostral vertebra - choice of the more rostral vertebral level of the field of view:')
else:
level_start = 2
mean_distance_dict = scipy.io.loadmat('/home/django/kraju/code/spinalcordtoolbox_dev/src/vertebral_labeling/mean_distance.mat')
mean_distance = (mean_distance_dict.values()[2]).T
C1C2_distance = mean_distance[0:2]
mean_distance = mean_distance[level_start-1:len(mean_distance)-1]
space = np.linspace(-5/scales[2], 5/scales[2], round(11/scales[2]), endpoint=True)
pattern = (np.sinc((space*scales[2])/15))**(20)
xmax_pattern = np.argmax(pattern)
#==================================================
# step 1 : Find the First Peak
#==================================================
#correlation between the pattern and intensity profile
#corr_all = scipy.signal.correlate(pattern,I_detrend[:,0])
#corr_all = matplotlib.pyplot.xcorr(pattern,I_detrend[:,0])
pattern1 = np.concatenate((pattern,np.zeros(len(I_detrend[:,0])-len(pattern))))
corr_all = scipy.signal.correlate(I_detrend[:,0],pattern1)
loc_corr = np.arange(-np.round((len(corr_all)/2)),np.round(len(corr_all)/2)+2)
index_fp = 0
count = 0
for i in range(len(corr_all)):
if corr_all[i]>0.1:
if i==0:
if corr_all[i]<corr_all[i+1]:
index_fp = i
count = count + 1
elif i==(len(corr_all)-1):
if corr_all[i]<corr_all[i-1]:
index_fp = np.resize(index_fp,count+1)
index_fp[len(index_fp)-1] = i
else:
if corr_all[i]<corr_all[i+1]:
index_fp = np.resize(index_fp,count+1)
index_fp[len(index_fp)-1] = i
count = count + 1
elif corr_all[i]<corr_all[i-1]:
index_fp = np.resize(index_fp,count+1)
index_fp[len(index_fp)-1] = i
count = count + 1
else:
if i==0:
index_fp = i
count = count + 1
else:
index_fp = np.resize(index_fp,count+1)
index_fp[len(index_fp)-1] = i
count = count + 1
mask_fp = np.ones(len(corr_all), dtype=bool)
mask_fp[index_fp] = False
value = corr_all[mask_fp]
loc_corr = loc_corr[mask_fp]
loc_corr = loc_corr - I_detrend.shape[0]
loc_first_peak = xmax_pattern - loc_corr[np.amax(np.where(value>1))]
Mcorr1 = value[np.amax(np.where(value>1))]
#building the pattern that has to be added at each iteration in step 2
if xmax_pattern<loc_first_peak:
template_truncated = np.concatenate((np.zeros((loc_first_peak-xmax_pattern)),pattern))
else:
template_truncated = pattern[(xmax_pattern-loc_first_peak-1):]
xend = np.amax(np.where(template_truncated>0.02))
pixend = xend - loc_first_peak
if label.verbose==1:
pl.plot(template_truncated)
pl.plot(I_detrend)
pl.title('Detection of First Peak')
pl.xlabel('direction anterior-posterior (mm)')
pl.ylabel('intensity')
pl.show()
| |
#! /usr/bin/env python
#-*- coding: utf-8 -*-
#from __future__ import print_function
############################################## standard libs
import sys
import os
import time
from datetime import datetime
from copy import deepcopy
from math import degrees, radians, floor, ceil
############################################## numpy & argparse
import numpy
import argparse
############################################## pystrain
from pystrain.strain import *
from pystrain.geodesy.utm import *
from pystrain.iotools.iparser import *
import pystrain.grid
from pystrain.station import Station
############################################## ploting
from scipy.spatial import Delaunay
#from math import sqrt, radians, sin, cos, atan2, pi, asin
############################################## Flask
from flask import Flask, flash, request, redirect, url_for, render_template, send_file, session
from flask_restful import reqparse
## START applicaion
app = Flask(__name__, template_folder="templates", static_folder="static")
app.secret_key = '<KEY>'
app.debug = True
## disable cache for data files
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
## set rooturl_folder , if wsgi module used for apache or set '' if you run local server
ROOTURL_FOLDER=''
## Set application;s root folder, for local server use function os.getcwd()
##+ For WSGI module use absolute path to the folder
ROOT_FOLDER=os.getcwd()
## Upload files, not in use yet
# UPLOAD_FOLDER = '/uploads'
# ALLOWED_EXTENSIONS = set(['txt', 'vel'])
## path to files
f_temp = os.path.join(ROOT_FOLDER, 'temp.dat')
f_strain = os.path.join(ROOT_FOLDER,'strain_info.dat')
f_stats = os.path.join(ROOT_FOLDER,'strain_stats.dat')
f_deltr = os.path.join(ROOT_FOLDER,'delaunay_info.dat')
f_station = os.path.join(ROOT_FOLDER,'station_info.dat')
## Varsion of StrainTesnor used
Version = 'StrainTensor.py Version: 1.0'
@app.route('/website')
def website():
return render_template('website/index.html')
@app.route('/')
def webtool():
return render_template('webtool/tmpl_inputs.html', rooturl_folder=ROOTURL_FOLDER)
@app.route('/inputs')
def webtool_inputs():
if os.path.isfile(f_temp) == True :
os.remove(f_temp)
if os.path.isfile(f_strain) == True :
os.remove(f_strain)
if os.path.isfile(f_stats) == True :
os.remove(f_stats)
if os.path.isfile(f_station) == True :
os.remove(f_station)
if os.path.isfile(f_deltr) == True :
os.remove(f_deltr)
return render_template('webtool/tmpl_inputs.html', rooturl_folder=ROOTURL_FOLDER)
@app.route('/parameters', methods=['GET', 'POST'])
def webtool_params():
sta_list_ell = []
x_mean = 0
y_mean = 0
NoSta = 0
input_filename = ""
if request.method == 'POST':
file = request.files['file']
stations = []
for line in file.readlines():
stations.append(Station(line))
for sta in stations:
sta_list_ell.append(sta)
with open(f_temp, 'wb') as fout:
for idx, sta in enumerate(sta_list_ell):
fout.write('{:10s} {:+10.5f} {:10.5f} {:+7.2f} {:+7.2f} {:+7.3f} {:+7.3f} {:+7.3f} {:+7.3f} \n'.format(sta.name, degrees(sta.lon), degrees(sta.lat), sta.ve*1e03, sta.vn*1e03, sta.se*1e03, sta.sn*1e03, sta.rho*1e03, sta.t ))
sta_list_ell_tmpl = deepcopy(sta_list_ell)
for idx, sta in enumerate(sta_list_ell_tmpl):
sta_list_ell_tmpl[idx].lon = round(degrees(sta.lon), 3)
sta_list_ell_tmpl[idx].lat = round(degrees(sta.lat), 3)
sta_list_ell_tmpl[idx].vn = round(sta.vn*1.e3, 1)
sta_list_ell_tmpl[idx].ve = round(sta.ve*1.e3, 1)
sta_list_ell_tmpl[idx].sn = round(sta.sn*1.e3, 1)
sta_list_ell_tmpl[idx].se = round(sta.se*1.e3, 1)
sta_list_ell_tmpl[idx].rho = round(sta.rho*1.e3, 1)
sta_list_ell_tmpl[idx].t = round(sta.t, 2)
session['input_filename'] = file.filename
NoSta = format(len(stations))
session['NoSta'] = format(len(stations))
grd = pystrain.grid.generate_grid(sta_list_ell, 0.5 , 0.5, True)
x_mean = (grd.x_min + grd.x_max)/2.
y_mean = (grd.y_min + grd.y_max)/2.
#print('[DEBUG] Number of stations parsed: {}'.format(len(stations)))
return render_template('webtool/tmpl_params.html', rooturl_folder=ROOTURL_FOLDER, content = sta_list_ell_tmpl, input_file=file.filename, NoSta = NoSta, clon = x_mean, clat = y_mean, grd = grd)
def cut_rectangle(xmin, xmax, ymin, ymax, sta_lst, sta_list_to_degrees=False):
new_sta_lst = []
for sta in sta_lst:
if sta_list_to_degrees:
slon = degrees(sta.lon)
slat = degrees(sta.lat)
else:
slon = sta.lon
slat = sta.lat
if slon >= xmin and slon <= xmax and slat >= ymin and slat <= ymax:
new_sta_lst.append(sta)
return new_sta_lst
def write_station_info(sta_lst, filename=(f_station)):
with open(filename, 'wb') as fout:
fout.write('{:^10s} {:^10s} {:^10s} {:7s} {:7s} {:7s} {:7s} \n'.format('Station', 'Longtitude', 'Latitude', 'Ve', 'Vn', 'sVe', 'sVn'))
fout.write('{:^10s} {:^10s} {:^10s} {:7s} {:7s} {:7s} {:7s} \n'.format('', 'deg.', 'deg', 'mm/yr', 'mm/yr', 'mm/yr', 'mm/yr'))
for idx, sta in enumerate(sta_lst):
fout.write('{:10s} {:+10.5f} {:10.5f} {:+7.2f} {:+7.2f} {:+7.3f} {:+7.3f} \n'.format(sta.name, degrees(sta.lon), degrees(sta.lat), sta.ve*1e03, sta.vn*1e03, sta.se*1e03, sta.sn*1e03))
return
def print_model_info(fout, cmd, clargs):
fout.write('{:} \n'.format(Version))
fout.write('Module used:\n\t{:}\n'.format(' '.join(cmd)))
fout.write('Run at: {:}\n'.format(datetime.now().strftime('%c')))
fout.write('Command line switches/options parsed:\n')
for key in clargs:
fout.write('\t{:20s} -> {:}\n'.format(key, clargs[key]))
return
class get_strain_param:
strain_param_names = ['lat', 'lon', 'vx', 'dvx', 'vy', 'dvy', 'w', 'dw', 'exx', 'dexx', 'exy', 'dexy', 'eyy', 'deyy', 'emax', 'demax', 'emin', 'demin', 'shr', 'dshr', 'azi', 'dazi', 'dilat', 'ddilat', 'secinv', 'dsecinv' ]
def __init__(self, *args, **kargs):
self.set_none()
if len(args) is not 0:
self.init_from_ascii_line(args[0])
if len(kargs) is not 0:
for key, val in kargs.items():
if key in station_member_names:
setattr(self, key, val)
def init_from_ascii_line(self, input_line):
l = input_line.split()
try:
self.lat = float(l[0])
self.lon = float(l[1])
self.vx = float(l[2])
try:
self.dvx = float(l[3])
except ValueError:
self.dvx = str(l[3])
self.vy = float(l[4])
try:
self.dvy = float(l[5])
except ValueError:
self.dvy = str(l[5])
self.w = float(l[6])
try:
self.dw = float(l[7])
except ValueError:
self.dw = str(l[7])
self.exx = float(l[8])
try:
self.dexx = float(l[9])
except ValueError:
self.dexx = str(l[9])
self.exy = float(l[10])
try:
self.dexy = float(l[11])
except ValueError:
self.dexy = str(l[11])
self.eyy = float(l[12])
try:
self.deyy = float(l[13])
except ValueError:
self.deyy = str(l[13])
self.emax = float(l[14])
try:
self.demax = float(l[15])
except ValueError:
self.demax = str(l[15])
self.emin = float(l[16])
try:
self.demin = float(l[17])
except ValueError:
self.demin = str(l[17])
self.shr = float(l[18])
try:
self.dshr = float(l[19])
except ValueError:
self.dshr = str(l[19])
self.azi = float(l[20])
try:
self.dazi = float(l[21])
except ValueError:
self.dazi = str(l[21])
self.dilat = float(l[22])
try:
self.ddilat = float(l[23])
except ValueError:
self.ddilat = str(l[23])
self.secinv = float(l[24])
try:
self.dsecinv = float(l[25])
except ValueError:
self.dsecinv = str(l[25])
except:
print('[DEBUG] Invalid Station instance constrution.')
print('[DEBUG] Input line \"{}\"'.format(input_line.strip()))
#raise RuntimeError
def set_none(self):
self.lat = None
self.lon = None
self.vx = None
self.dvx = None
self.vy = None
self.dvy = None
self.w = None
self.dw = None
self.exx = None
self.dexx = None
self.exy = None
self.dexy = None
self.eyy = None
self.deyy = None
self.emax = None
self.demax = None
self.emin = None
self.demin = None
self.shr = None
self.dshr = None
self.azi = None
self.dazi = None
self.dilat = None
self.ddilat = None
self.secinv = None
self.dsecinv = None
@app.route('/results', methods=['GET', 'POST'])
def webtool_results():
NoSta = session.get('NoSta')
input_filename = session.get('input_filename')
sta_list_ell = []
with open(f_temp, 'r') as file:
stations = []
for line in file.readlines():
stations.append(Station(line))
for sta in stations:
sta_list_ell.append(sta)
if request.method == 'POST':
lonmin = 0#request.form['lonmin']
lonmax = 0#request.form['lonmax']
latmin = 0#request.form['latmin']
latmax = 0#request.form['latmax']
parser = reqparse.RequestParser()
if request.form.get('shen'):
parser.add_argument('shen',
location='form',
default='shen',
dest='method',
choices=['shen', 'veis'],
required=False,
help='Choose a method for strain estimation. If \'shen\' is passed in, the estimation will follow the algorithm described in Shen et al, 2015, using a weighted least squares approach. If \'veis\' is passed in, then the region is going to be split into delaneuy triangles and a strain estimated in each barycenter.')
if request.form.get('veis'):
parser.add_argument('veis',
location='form',
default='veis',
dest='method',
choices=['shen', 'veis'],
required=False,
help='Choose a method for strain estimation. If \'shen\' is passed in, the estimation will follow the algorithm described in Shen et al, 2015, using a weighted least squares approach. If \'veis\' is passed in, then the region is going to be split into delaneuy triangles and a strain estimated in each barycenter.')
if request.form.get('barycenter'):
parser.add_argument('barycenter',
location='form',
dest='one_tensor',
action='store_true',
help='Only estimate one strain tensor, at the region\'s barycentre.')
parser.add_argument('region',
location='form',
default='18/23/32/43', #reqparse.SUPPRESS,
dest='region',
help='Specify a region; any station (in the input file) falling outside will be ommited. The region should be given as a rectangle, specifying min/max values in longtitude and latitude (using decimal degrees). E.g. \"[...] --region=21.0/23.5/36.0/38.5 [...]\"',
required=False)
parser.add_argument('x-grid-step',
location='form',
default=0.5,
dest='x_grid_step',
type=float,
required=False,
help='The x-axis grid step size in degrees. This option is only relevant if the program computes more than one strain tensors.')
parser.add_argument('y-grid-step',
location='form',
default=0.5,
dest='y_grid_step',
type=float,
required=False,
help='The y-axis grid step size in degrees. This option is only relevant if the program computes more than one strain tensors.')
parser.add_argument('Wt',
location='form',
default=24,
dest='Wt',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. Let W=Σ_i*G_i, the total reweighting coefficients of the data, and let Wt be the threshold of W. For a given Wt, the smoothing constant D is determined by Wd=Wt . It should be noted that W is a function of the interpolation coordinate, therefore for the same Wt assigned, D varies spatially based on the in situ data strength; that is, the denser the local data array is, the smaller is D, and vice versa.')
parser.add_argument('dmin',
location='form',
default=1,
dest='dmin',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. This is the lower limit for searching for an optimal D-parameter value. Unit is km.')
parser.add_argument('dmax',
location='form',
default=500,
dest='dmax',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. This is the upper limit for searching for an optimal d-param value. Unit is km.')
parser.add_argument('dstep',
location='form',
default=2,
dest='dstep',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. This is the step size for searching for an optimal d-param value. Unit is km.')
parser.add_argument('d-param',
default=None,
dest='d_coef',
type=float,
required=False,
help='Only relevant for | |
"array"):
raise ValueError('invalid action')
num_existing_items = len(sub_data)
if action_type == 'add':
if 'maxItems' not in sub_schema or num_existing_items < sub_schema["maxItems"]:
sub_data.append(generate_placeholder(sub_schema["items"]))
elif action_type == 'delete':
action_index = int(action_index)
if ('minItems' not in sub_schema or num_existing_items > sub_schema["minItems"]) and action_index < num_existing_items:
del sub_data[action_index]
else:
num_existing_columns = sub_schema["items"].get("minItems", 1)
for row in sub_data:
num_existing_columns = max(num_existing_columns, len(row))
if action_type == 'addcolumn':
if 'maxItems' not in sub_schema["items"] or num_existing_columns < sub_schema["items"]["maxItems"]:
num_existing_columns += 1
for row in sub_data:
while len(row) < num_existing_columns:
row.append(generate_placeholder(sub_schema["items"]["items"]))
elif action_type == 'deletecolumn':
if num_existing_columns > sub_schema.get("minItems", 1):
num_existing_columns -= 1
for row in sub_data:
while len(row) > num_existing_columns:
del row[-1]
def show_object_form(object, action, previous_object=None, should_upgrade_schema=False, placeholder_data=None):
if object is None and previous_object is None:
data = generate_placeholder(action.schema)
if placeholder_data:
for path, value in placeholder_data.items():
try:
sub_data = data
for step in path[:-1]:
sub_data = sub_data[step]
sub_data[path[-1]] = value
except Exception:
# Ignore invalid placeholder data
pass
elif object is None and previous_object is not None:
data = previous_object.data
else:
data = object.data
previous_object_schema = None
mode = 'edit'
if should_upgrade_schema:
mode = 'upgrade'
assert object is not None
schema = action.schema
data, upgrade_warnings = logic.schemas.convert_to_schema(object.data, object.schema, action.schema)
for upgrade_warning in upgrade_warnings:
flask.flash(upgrade_warning, 'warning')
elif object is not None:
schema = object.schema
elif previous_object is not None:
schema = previous_object.schema
previous_object_schema = schema
else:
schema = action.schema
if previous_object is not None:
action_id = previous_object.action_id
previous_object_id = previous_object.id
else:
action_id = action.id
previous_object_id = None
errors = []
object_errors = {}
form_data = {}
previous_actions = []
serializer = itsdangerous.URLSafeSerializer(flask.current_app.config['SECRET_KEY'])
form = ObjectForm()
if flask.request.method != 'GET' and form.validate_on_submit():
raw_form_data = {key: flask.request.form.getlist(key) for key in flask.request.form}
form_data = {k: v[0] for k, v in raw_form_data.items()}
if 'input_num_batch_objects' in form_data:
try:
num_objects_in_batch = int(form_data['input_num_batch_objects'])
except ValueError:
try:
# The form allows notations like '1.2e1' for '12', however
# Python can only parse these as floats
num_objects_in_batch = float(form_data['input_num_batch_objects'])
if num_objects_in_batch == int(num_objects_in_batch):
num_objects_in_batch = int(num_objects_in_batch)
else:
raise
except ValueError:
errors.append('input_num_batch_objects')
num_objects_in_batch = None
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
form_data['input_num_batch_objects'] = str(num_objects_in_batch)
else:
num_objects_in_batch = None
if 'previous_actions' in flask.request.form:
try:
previous_actions = serializer.loads(flask.request.form['previous_actions'])
except itsdangerous.BadData:
flask.abort(400)
if "action_submit" in form_data:
# The object name might need the batch number to match the pattern
if schema.get('batch', False) and num_objects_in_batch is not None:
batch_base_name = form_data['object__name__text']
name_suffix_format = schema.get('batch_name_format', '{:d}')
try:
name_suffix_format.format(1)
except (ValueError, KeyError):
name_suffix_format = '{:d}'
if name_suffix_format:
example_name_suffix = name_suffix_format.format(1)
else:
example_name_suffix = ''
raw_form_data['object__name__text'] = [batch_base_name + example_name_suffix]
else:
batch_base_name = None
name_suffix_format = None
object_data, object_errors = parse_form_data(raw_form_data, schema)
errors += object_errors
if object_data is not None and not errors:
try:
validate(object_data, schema)
except ValidationError:
# TODO: proper logging
print('object schema validation failed')
# TODO: handle error
flask.abort(400)
if object is None:
if schema.get('batch', False) and num_objects_in_batch is not None:
if 'name' in object_data and 'text' in object_data['name'] and name_suffix_format is not None and batch_base_name is not None:
data_sequence = []
for i in range(1, num_objects_in_batch + 1):
if name_suffix_format:
name_suffix = name_suffix_format.format(i)
else:
name_suffix = ''
object_data['name']['text'] = batch_base_name + name_suffix
data_sequence.append(deepcopy(object_data))
else:
data_sequence = [object_data] * num_objects_in_batch
objects = create_object_batch(action_id=action.id, data_sequence=data_sequence, user_id=flask_login.current_user.id)
object_ids = [object.id for object in objects]
flask.flash('The objects were created successfully.', 'success')
return flask.redirect(flask.url_for('.objects', ids=','.join([str(object_id) for object_id in object_ids])))
else:
object = create_object(action_id=action.id, data=object_data, user_id=flask_login.current_user.id, previous_object_id=previous_object_id, schema=previous_object_schema)
flask.flash('The object was created successfully.', 'success')
else:
update_object(object_id=object.id, user_id=flask_login.current_user.id, data=object_data, schema=schema)
flask.flash('The object was updated successfully.', 'success')
return flask.redirect(flask.url_for('.object', object_id=object.id))
elif any(name.startswith('action_object__') and (name.endswith('__delete') or name.endswith('__add') or name.endswith('__addcolumn') or name.endswith('__deletecolumn')) for name in form_data):
action = [name for name in form_data if name.startswith('action_')][0]
previous_actions.append(action)
if previous_actions:
try:
for action in previous_actions:
apply_action_to_data(action, data, schema)
form_data = apply_action_to_form_data(previous_actions[-1], form_data)
except ValueError:
flask.abort(400)
# TODO: make this search more narrow
samples = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
action_type=ActionType.SAMPLE_CREATION
)
measurements = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
action_type=ActionType.MEASUREMENT
)
if object is not None:
samples = [
sample
for sample in samples
if sample.object_id != object.object_id
]
measurements = [
measurement
for measurement in measurements
if measurement.object_id != object.object_id
]
tags = [{'name': tag.name, 'uses': tag.uses} for tag in logic.tags.get_tags()]
if object is None:
return flask.render_template(
'objects/forms/form_create.html',
action_id=action_id,
schema=schema,
data=data,
errors=errors,
object_errors=object_errors,
form_data=form_data,
previous_actions=serializer.dumps(previous_actions),
form=form,
samples=samples,
measurements=measurements,
datetime=datetime,
tags=tags,
previous_object_id=previous_object_id
)
else:
return flask.render_template(
'objects/forms/form_edit.html',
schema=schema,
data=data,
object_id=object.object_id,
errors=errors,
object_errors=object_errors,
form_data=form_data,
previous_actions=serializer.dumps(previous_actions),
form=form,
samples=samples,
measurements=measurements,
datetime=datetime,
tags=tags,
mode=mode
)
@frontend.route('/objects/<int:object_id>', methods=['GET', 'POST'])
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def object(object_id):
object = get_object(object_id=object_id)
related_objects_tree = logic.object_relationships.build_related_objects_tree(object_id, flask_login.current_user.id)
user_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
user_may_edit = Permissions.WRITE in user_permissions
user_may_grant = Permissions.GRANT in user_permissions
action = get_action(object.action_id)
if action.schema != object.schema:
new_schema_available = True
else:
new_schema_available = False
if not user_may_edit and flask.request.args.get('mode', '') == 'edit':
return flask.abort(403)
if not user_may_edit and flask.request.args.get('mode', '') == 'upgrade':
return flask.abort(403)
if flask.request.method == 'GET' and flask.request.args.get('mode', '') not in ('edit', 'upgrade'):
# TODO: make this search more narrow
samples = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
action_type=ActionType.SAMPLE_CREATION
)
measurements = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
action_type=ActionType.MEASUREMENT
)
instrument = action.instrument
object_type = {
ActionType.SAMPLE_CREATION: "Sample",
ActionType.MEASUREMENT: "Measurement",
ActionType.SIMULATION: "Simulation"
}.get(action.type, "Object")
object_log_entries = object_log.get_object_log_entries(object_id=object_id, user_id=flask_login.current_user.id)
if user_may_edit:
serializer = itsdangerous.URLSafeTimedSerializer(flask.current_app.config['SECRET_KEY'], salt='mobile-upload')
token = serializer.dumps([flask_login.current_user.id, object_id])
mobile_upload_url = flask.url_for('.mobile_file_upload', object_id=object_id, token=token, _external=True)
mobile_upload_qrcode = generate_qrcode(mobile_upload_url, should_cache=False)
else:
mobile_upload_url = None
mobile_upload_qrcode = None
object_url = flask.url_for('.object', object_id=object_id, _external=True)
object_qrcode = generate_qrcode(object_url, should_cache=True)
location_form = ObjectLocationAssignmentForm()
locations_map, locations_tree = get_locations_tree()
locations = [('-1', '—')]
unvisited_location_ids_prefixes_and_subtrees = [(location_id, '', locations_tree[location_id]) for location_id in locations_tree]
while unvisited_location_ids_prefixes_and_subtrees:
location_id, prefix, subtree = unvisited_location_ids_prefixes_and_subtrees.pop(0)
location = locations_map[location_id]
locations.append((str(location_id), '{}{} (#{})'.format(prefix, location.name, location.id)))
for location_id in sorted(subtree, key=lambda location_id: locations_map[location_id].name, reverse=True):
unvisited_location_ids_prefixes_and_subtrees.insert(0, (location_id, '{}{} / '.format(prefix, location.name), subtree[location_id]))
location_form.location.choices = locations
possible_responsible_users = [('-1', '—')]
for user in logic.users.get_users(exclude_hidden=True):
possible_responsible_users.append((str(user.id), '{} (#{})'.format(user.name, user.id)))
location_form.responsible_user.choices = possible_responsible_users
measurement_actions = logic.actions.get_actions(logic.actions.ActionType.MEASUREMENT)
favorite_action_ids = logic.favorites.get_user_favorite_action_ids(flask_login.current_user.id)
favorite_measurement_actions = [
action
for action in measurement_actions
if action.id in favorite_action_ids
]
# Sort by: instrument name (independent actions first), action name
favorite_measurement_actions.sort(key=lambda action: (
action.user.name.lower() if action.user else '',
action.instrument.name.lower() if action.instrument else '',
action.name.lower()
))
publication_form = ObjectPublicationForm()
object_publications = logic.publications.get_publications_for_object(object_id=object.id)
user_may_link_publication = Permissions.WRITE in user_permissions
notebook_templates = object.schema.get('notebookTemplates', [])
for notebook_template in notebook_templates:
for parameter, parameter_value in notebook_template['params'].items():
if parameter_value == 'object_id':
notebook_template['params'][parameter] = object_id
elif isinstance(parameter_value, list):
parameter_data = object.data
for step in parameter_value:
if isinstance(step, str) and isinstance(parameter_data, dict) and step in parameter_data:
parameter_data = parameter_data[step]
elif isinstance(step, int) and isinstance(parameter_data, list) and 0 <= step < len(parameter_data):
parameter_data = parameter_data[step]
else:
parameter_data = None
notebook_template['params'][parameter] = parameter_data
else:
notebook_template['params'][parameter] = None
def build_object_location_assignment_confirmation_url(object_location_assignment_id: int) -> None:
confirmation_url = flask.url_for(
'frontend.accept_responsibility_for_object',
t=logic.security_tokens.generate_token(
object_location_assignment_id,
salt='confirm_responsibility',
secret_key=flask.current_app.config['SECRET_KEY']
),
_external=True
)
return confirmation_url
return flask.render_template(
'objects/view/base.html',
object_type=object_type,
action=action,
instrument=instrument,
schema=object.schema,
data=object.data,
object_log_entries=object_log_entries,
ObjectLogEntryType=ObjectLogEntryType,
last_edit_datetime=object.utc_datetime,
last_edit_user=get_user(object.user_id),
object_id=object_id,
user_may_edit=user_may_edit,
user_may_comment=user_may_edit,
comments=comments.get_comments_for_object(object_id),
comment_form=CommentForm(),
files=logic.files.get_files_for_object(object_id),
file_source_instrument_exists=False,
file_source_jupyterhub_exists=False,
file_form=FileForm(),
external_link_form=ExternalLinkForm(),
external_link_invalid='invalid_link' in flask.request.args,
mobile_upload_url=mobile_upload_url,
mobile_upload_qrcode=mobile_upload_qrcode,
notebook_templates=notebook_templates,
object_qrcode=object_qrcode,
object_url=object_url,
restore_form=None,
version_id=object.version_id,
user_may_grant=user_may_grant,
samples=samples,
measurements=measurements,
favorite_measurement_actions=favorite_measurement_actions,
FileLogEntryType=FileLogEntryType,
file_information_form=FileInformationForm(),
file_hiding_form=FileHidingForm(),
new_schema_available=new_schema_available,
related_objects_tree=related_objects_tree,
object_publications=object_publications,
user_may_link_publication=user_may_link_publication,
publication_form=publication_form,
get_object=get_object,
get_object_location_assignment=get_object_location_assignment,
get_user=get_user,
get_location=get_location,
object_location_assignments=get_object_location_assignments(object_id),
build_object_location_assignment_confirmation_url=build_object_location_assignment_confirmation_url,
user_may_assign_location=user_may_edit,
location_form=location_form
)
check_current_user_is_not_readonly()
if flask.request.args.get('mode', '') == 'upgrade':
should_upgrade_schema = True
else:
should_upgrade_schema = False
return show_object_form(object, action=get_action(object.action_id), should_upgrade_schema=should_upgrade_schema)
@frontend.route('/objects/<int:object_id>/label')
@object_permissions_required(Permissions.READ, on_unauthorized=on_unauthorized)
def print_object_label(object_id):
object = get_object(object_id=object_id)
object_log_entries = object_log.get_object_log_entries(object_id=object_id, user_id=flask_login.current_user.id)
for object_log_entry in object_log_entries:
if object_log_entry.type in (ObjectLogEntryType.CREATE_OBJECT, ObjectLogEntryType.CREATE_BATCH):
creation_date = object_log_entry.utc_datetime.strftime('%Y-%m-%d')
creation_user = get_user(object_log_entry.user_id).name
break
else:
creation_date = 'Unknown'
creation_user = 'Unknown'
if 'created' in object.data and '_type' in object.data['created'] and object.data['created']['_type'] == 'datetime':
creation_date = object.data['created']['utc_datetime'].split(' ')[0]
if 'name' in object.data and '_type' in object.data['name'] and object.data['name']['_type'] == 'text':
object_name = object.data['name']['text']
else:
object_name = 'Unknown Sample'
object_url = flask.url_for('.object', object_id=object_id, _external=True)
if 'hazards' in object.data and '_type' in object.data['hazards'] and object.data['hazards']['_type'] == 'hazards':
hazards = object.data['hazards']['hazards']
else:
hazards = []
pdf_data = create_labels(
object_id=object_id,
object_name=object_name,
object_url=object_url,
creation_user=creation_user,
creation_date=creation_date,
ghs_classes=hazards
)
return flask.send_file(
io.BytesIO(pdf_data),
mimetype='application/pdf',
cache_timeout=-1
)
@frontend.route('/objects/<int:object_id>/comments/', methods=['POST'])
@object_permissions_required(Permissions.WRITE)
def post_object_comments(object_id):
check_current_user_is_not_readonly()
comment_form = CommentForm()
if comment_form.validate_on_submit():
content = comment_form.content.data
comments.create_comment(object_id=object_id, user_id=flask_login.current_user.id, content=content)
flask.flash('Successfully posted a comment.', 'success')
else:
flask.flash('Please enter a comment text.', 'error')
return flask.redirect(flask.url_for('.object', object_id=object_id))
@frontend.route('/objects/search/')
@flask_login.login_required
def search():
return flask.render_template('search.html')
@frontend.route('/objects/<int:object_id>/permissions/request', methods=['POST'])
@flask_login.login_required
def object_permissions_request(object_id):
current_permissions = get_user_object_permissions(object_id=object_id, user_id=flask_login.current_user.id)
if Permissions.READ in current_permissions:
flask.flash('You already have permissions to access this |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.